# File: huggingface_hub-main/src/huggingface_hub/__init__.py import importlib import os import sys from typing import TYPE_CHECKING __version__ = '0.25.0.dev0' _SUBMOD_ATTRS = {'_commit_scheduler': ['CommitScheduler'], '_inference_endpoints': ['InferenceEndpoint', 'InferenceEndpointError', 'InferenceEndpointStatus', 'InferenceEndpointTimeoutError', 'InferenceEndpointType'], '_login': ['interpreter_login', 'login', 'logout', 'notebook_login'], '_multi_commits': ['MultiCommitException', 'plan_multi_commits'], '_snapshot_download': ['snapshot_download'], '_space_api': ['SpaceHardware', 'SpaceRuntime', 'SpaceStage', 'SpaceStorage', 'SpaceVariable'], '_tensorboard_logger': ['HFSummaryWriter'], '_webhooks_payload': ['WebhookPayload', 'WebhookPayloadComment', 'WebhookPayloadDiscussion', 'WebhookPayloadDiscussionChanges', 'WebhookPayloadEvent', 'WebhookPayloadMovedTo', 'WebhookPayloadRepo', 'WebhookPayloadUrl', 'WebhookPayloadWebhook'], '_webhooks_server': ['WebhooksServer', 'webhook_endpoint'], 'community': ['Discussion', 'DiscussionComment', 'DiscussionCommit', 'DiscussionEvent', 'DiscussionStatusChange', 'DiscussionTitleChange', 'DiscussionWithDetails'], 'constants': ['CONFIG_NAME', 'FLAX_WEIGHTS_NAME', 'HUGGINGFACE_CO_URL_HOME', 'HUGGINGFACE_CO_URL_TEMPLATE', 'PYTORCH_WEIGHTS_NAME', 'REPO_TYPE_DATASET', 'REPO_TYPE_MODEL', 'REPO_TYPE_SPACE', 'TF2_WEIGHTS_NAME', 'TF_WEIGHTS_NAME'], 'fastai_utils': ['_save_pretrained_fastai', 'from_pretrained_fastai', 'push_to_hub_fastai'], 'file_download': ['HfFileMetadata', '_CACHED_NO_EXIST', 'cached_download', 'get_hf_file_metadata', 'hf_hub_download', 'hf_hub_url', 'try_to_load_from_cache'], 'hf_api': ['Collection', 'CollectionItem', 'CommitInfo', 'CommitOperation', 'CommitOperationAdd', 'CommitOperationCopy', 'CommitOperationDelete', 'DatasetInfo', 'GitCommitInfo', 'GitRefInfo', 'GitRefs', 'HfApi', 'ModelInfo', 'RepoUrl', 'SpaceInfo', 'User', 'UserLikes', 'WebhookInfo', 'WebhookWatchedItem', 'accept_access_request', 'add_collection_item', 'add_space_secret', 'add_space_variable', 'auth_check', 'cancel_access_request', 'change_discussion_status', 'comment_discussion', 'create_branch', 'create_collection', 'create_commit', 'create_commits_on_pr', 'create_discussion', 'create_inference_endpoint', 'create_pull_request', 'create_repo', 'create_tag', 'create_webhook', 'dataset_info', 'delete_branch', 'delete_collection', 'delete_collection_item', 'delete_file', 'delete_folder', 'delete_inference_endpoint', 'delete_repo', 'delete_space_secret', 'delete_space_storage', 'delete_space_variable', 'delete_tag', 'delete_webhook', 'disable_webhook', 'duplicate_space', 'edit_discussion_comment', 'enable_webhook', 'file_exists', 'get_collection', 'get_dataset_tags', 'get_discussion_details', 'get_full_repo_name', 'get_inference_endpoint', 'get_model_tags', 'get_paths_info', 'get_repo_discussions', 'get_safetensors_metadata', 'get_space_runtime', 'get_space_variables', 'get_token_permission', 'get_user_overview', 'get_webhook', 'grant_access', 'like', 'list_accepted_access_requests', 'list_collections', 'list_datasets', 'list_inference_endpoints', 'list_liked_repos', 'list_metrics', 'list_models', 'list_organization_members', 'list_pending_access_requests', 'list_rejected_access_requests', 'list_repo_commits', 'list_repo_files', 'list_repo_likers', 'list_repo_refs', 'list_repo_tree', 'list_spaces', 'list_user_followers', 'list_user_following', 'list_webhooks', 'merge_pull_request', 'model_info', 'move_repo', 'parse_safetensors_file_metadata', 'pause_inference_endpoint', 'pause_space', 'preupload_lfs_files', 'reject_access_request', 'rename_discussion', 'repo_exists', 'repo_info', 'repo_type_and_id_from_hf_id', 'request_space_hardware', 'request_space_storage', 'restart_space', 'resume_inference_endpoint', 'revision_exists', 'run_as_future', 'scale_to_zero_inference_endpoint', 'set_space_sleep_time', 'space_info', 'super_squash_history', 'unlike', 'update_collection_item', 'update_collection_metadata', 'update_inference_endpoint', 'update_repo_settings', 'update_repo_visibility', 'update_webhook', 'upload_file', 'upload_folder', 'upload_large_folder', 'whoami'], 'hf_file_system': ['HfFileSystem', 'HfFileSystemFile', 'HfFileSystemResolvedPath', 'HfFileSystemStreamFile'], 'hub_mixin': ['ModelHubMixin', 'PyTorchModelHubMixin'], 'inference._client': ['InferenceClient', 'InferenceTimeoutError'], 'inference._generated._async_client': ['AsyncInferenceClient'], 'inference._generated.types': ['AudioClassificationInput', 'AudioClassificationOutputElement', 'AudioClassificationParameters', 'AudioToAudioInput', 'AudioToAudioOutputElement', 'AutomaticSpeechRecognitionGenerationParameters', 'AutomaticSpeechRecognitionInput', 'AutomaticSpeechRecognitionOutput', 'AutomaticSpeechRecognitionOutputChunk', 'AutomaticSpeechRecognitionParameters', 'ChatCompletionInput', 'ChatCompletionInputFunctionDefinition', 'ChatCompletionInputFunctionName', 'ChatCompletionInputGrammarType', 'ChatCompletionInputMessage', 'ChatCompletionInputMessageChunk', 'ChatCompletionInputTool', 'ChatCompletionInputToolTypeClass', 'ChatCompletionInputURL', 'ChatCompletionOutput', 'ChatCompletionOutputComplete', 'ChatCompletionOutputFunctionDefinition', 'ChatCompletionOutputLogprob', 'ChatCompletionOutputLogprobs', 'ChatCompletionOutputMessage', 'ChatCompletionOutputToolCall', 'ChatCompletionOutputTopLogprob', 'ChatCompletionOutputUsage', 'ChatCompletionStreamOutput', 'ChatCompletionStreamOutputChoice', 'ChatCompletionStreamOutputDelta', 'ChatCompletionStreamOutputDeltaToolCall', 'ChatCompletionStreamOutputFunction', 'ChatCompletionStreamOutputLogprob', 'ChatCompletionStreamOutputLogprobs', 'ChatCompletionStreamOutputTopLogprob', 'DepthEstimationInput', 'DepthEstimationOutput', 'DocumentQuestionAnsweringInput', 'DocumentQuestionAnsweringInputData', 'DocumentQuestionAnsweringOutputElement', 'DocumentQuestionAnsweringParameters', 'FeatureExtractionInput', 'FillMaskInput', 'FillMaskOutputElement', 'FillMaskParameters', 'ImageClassificationInput', 'ImageClassificationOutputElement', 'ImageClassificationParameters', 'ImageSegmentationInput', 'ImageSegmentationOutputElement', 'ImageSegmentationParameters', 'ImageToImageInput', 'ImageToImageOutput', 'ImageToImageParameters', 'ImageToImageTargetSize', 'ImageToTextGenerationParameters', 'ImageToTextInput', 'ImageToTextOutput', 'ImageToTextParameters', 'ObjectDetectionBoundingBox', 'ObjectDetectionInput', 'ObjectDetectionOutputElement', 'ObjectDetectionParameters', 'QuestionAnsweringInput', 'QuestionAnsweringInputData', 'QuestionAnsweringOutputElement', 'QuestionAnsweringParameters', 'SentenceSimilarityInput', 'SentenceSimilarityInputData', 'SummarizationGenerationParameters', 'SummarizationInput', 'SummarizationOutput', 'TableQuestionAnsweringInput', 'TableQuestionAnsweringInputData', 'TableQuestionAnsweringOutputElement', 'Text2TextGenerationInput', 'Text2TextGenerationOutput', 'Text2TextGenerationParameters', 'TextClassificationInput', 'TextClassificationOutputElement', 'TextClassificationParameters', 'TextGenerationInput', 'TextGenerationInputGenerateParameters', 'TextGenerationInputGrammarType', 'TextGenerationOutput', 'TextGenerationOutputBestOfSequence', 'TextGenerationOutputDetails', 'TextGenerationOutputPrefillToken', 'TextGenerationOutputToken', 'TextGenerationStreamOutput', 'TextGenerationStreamOutputStreamDetails', 'TextGenerationStreamOutputToken', 'TextToAudioGenerationParameters', 'TextToAudioInput', 'TextToAudioOutput', 'TextToAudioParameters', 'TextToImageInput', 'TextToImageOutput', 'TextToImageParameters', 'TextToImageTargetSize', 'TokenClassificationInput', 'TokenClassificationOutputElement', 'TokenClassificationParameters', 'TranslationGenerationParameters', 'TranslationInput', 'TranslationOutput', 'VideoClassificationInput', 'VideoClassificationOutputElement', 'VideoClassificationParameters', 'VisualQuestionAnsweringInput', 'VisualQuestionAnsweringInputData', 'VisualQuestionAnsweringOutputElement', 'VisualQuestionAnsweringParameters', 'ZeroShotClassificationInput', 'ZeroShotClassificationInputData', 'ZeroShotClassificationOutputElement', 'ZeroShotClassificationParameters', 'ZeroShotImageClassificationInput', 'ZeroShotImageClassificationInputData', 'ZeroShotImageClassificationOutputElement', 'ZeroShotImageClassificationParameters', 'ZeroShotObjectDetectionBoundingBox', 'ZeroShotObjectDetectionInput', 'ZeroShotObjectDetectionInputData', 'ZeroShotObjectDetectionOutputElement'], 'inference_api': ['InferenceApi'], 'keras_mixin': ['KerasModelHubMixin', 'from_pretrained_keras', 'push_to_hub_keras', 'save_pretrained_keras'], 'repocard': ['DatasetCard', 'ModelCard', 'RepoCard', 'SpaceCard', 'metadata_eval_result', 'metadata_load', 'metadata_save', 'metadata_update'], 'repocard_data': ['CardData', 'DatasetCardData', 'EvalResult', 'ModelCardData', 'SpaceCardData'], 'repository': ['Repository'], 'serialization': ['StateDictSplit', 'get_tf_storage_size', 'get_torch_storage_id', 'get_torch_storage_size', 'save_torch_model', 'save_torch_state_dict', 'split_state_dict_into_shards_factory', 'split_tf_state_dict_into_shards', 'split_torch_state_dict_into_shards'], 'utils': ['CacheNotFound', 'CachedFileInfo', 'CachedRepoInfo', 'CachedRevisionInfo', 'CorruptedCacheException', 'DeleteCacheStrategy', 'HFCacheInfo', 'HfFolder', 'cached_assets_path', 'configure_http_backend', 'dump_environment_info', 'get_session', 'get_token', 'logging', 'scan_cache_dir']} def _attach(package_name, submodules=None, submod_attrs=None): if submod_attrs is None: submod_attrs = {} if submodules is None: submodules = set() else: submodules = set(submodules) attr_to_modules = {attr: mod for (mod, attrs) in submod_attrs.items() for attr in attrs} __all__ = list(submodules | attr_to_modules.keys()) def __getattr__(name): if name in submodules: try: return importlib.import_module(f'{package_name}.{name}') except Exception as e: print(f'Error importing {package_name}.{name}: {e}') raise elif name in attr_to_modules: submod_path = f'{package_name}.{attr_to_modules[name]}' try: submod = importlib.import_module(submod_path) except Exception as e: print(f'Error importing {submod_path}: {e}') raise attr = getattr(submod, name) if name == attr_to_modules[name]: pkg = sys.modules[package_name] pkg.__dict__[name] = attr return attr else: raise AttributeError(f'No {package_name} attribute {name}') def __dir__(): return __all__ return (__getattr__, __dir__, list(__all__)) (__getattr__, __dir__, __all__) = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS) if os.environ.get('EAGER_IMPORT', ''): for attr in __all__: __getattr__(attr) if TYPE_CHECKING: from ._commit_scheduler import CommitScheduler from ._inference_endpoints import InferenceEndpoint, InferenceEndpointError, InferenceEndpointStatus, InferenceEndpointTimeoutError, InferenceEndpointType from ._login import interpreter_login, login, logout, notebook_login from ._multi_commits import MultiCommitException, plan_multi_commits from ._snapshot_download import snapshot_download from ._space_api import SpaceHardware, SpaceRuntime, SpaceStage, SpaceStorage, SpaceVariable from ._tensorboard_logger import HFSummaryWriter from ._webhooks_payload import WebhookPayload, WebhookPayloadComment, WebhookPayloadDiscussion, WebhookPayloadDiscussionChanges, WebhookPayloadEvent, WebhookPayloadMovedTo, WebhookPayloadRepo, WebhookPayloadUrl, WebhookPayloadWebhook from ._webhooks_server import WebhooksServer, webhook_endpoint from .community import Discussion, DiscussionComment, DiscussionCommit, DiscussionEvent, DiscussionStatusChange, DiscussionTitleChange, DiscussionWithDetails from .constants import CONFIG_NAME, FLAX_WEIGHTS_NAME, HUGGINGFACE_CO_URL_HOME, HUGGINGFACE_CO_URL_TEMPLATE, PYTORCH_WEIGHTS_NAME, REPO_TYPE_DATASET, REPO_TYPE_MODEL, REPO_TYPE_SPACE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME from .fastai_utils import _save_pretrained_fastai, from_pretrained_fastai, push_to_hub_fastai from .file_download import _CACHED_NO_EXIST, HfFileMetadata, cached_download, get_hf_file_metadata, hf_hub_download, hf_hub_url, try_to_load_from_cache from .hf_api import Collection, CollectionItem, CommitInfo, CommitOperation, CommitOperationAdd, CommitOperationCopy, CommitOperationDelete, DatasetInfo, GitCommitInfo, GitRefInfo, GitRefs, HfApi, ModelInfo, RepoUrl, SpaceInfo, User, UserLikes, WebhookInfo, WebhookWatchedItem, accept_access_request, add_collection_item, add_space_secret, add_space_variable, auth_check, cancel_access_request, change_discussion_status, comment_discussion, create_branch, create_collection, create_commit, create_commits_on_pr, create_discussion, create_inference_endpoint, create_pull_request, create_repo, create_tag, create_webhook, dataset_info, delete_branch, delete_collection, delete_collection_item, delete_file, delete_folder, delete_inference_endpoint, delete_repo, delete_space_secret, delete_space_storage, delete_space_variable, delete_tag, delete_webhook, disable_webhook, duplicate_space, edit_discussion_comment, enable_webhook, file_exists, get_collection, get_dataset_tags, get_discussion_details, get_full_repo_name, get_inference_endpoint, get_model_tags, get_paths_info, get_repo_discussions, get_safetensors_metadata, get_space_runtime, get_space_variables, get_token_permission, get_user_overview, get_webhook, grant_access, like, list_accepted_access_requests, list_collections, list_datasets, list_inference_endpoints, list_liked_repos, list_metrics, list_models, list_organization_members, list_pending_access_requests, list_rejected_access_requests, list_repo_commits, list_repo_files, list_repo_likers, list_repo_refs, list_repo_tree, list_spaces, list_user_followers, list_user_following, list_webhooks, merge_pull_request, model_info, move_repo, parse_safetensors_file_metadata, pause_inference_endpoint, pause_space, preupload_lfs_files, reject_access_request, rename_discussion, repo_exists, repo_info, repo_type_and_id_from_hf_id, request_space_hardware, request_space_storage, restart_space, resume_inference_endpoint, revision_exists, run_as_future, scale_to_zero_inference_endpoint, set_space_sleep_time, space_info, super_squash_history, unlike, update_collection_item, update_collection_metadata, update_inference_endpoint, update_repo_settings, update_repo_visibility, update_webhook, upload_file, upload_folder, upload_large_folder, whoami from .hf_file_system import HfFileSystem, HfFileSystemFile, HfFileSystemResolvedPath, HfFileSystemStreamFile from .hub_mixin import ModelHubMixin, PyTorchModelHubMixin from .inference._client import InferenceClient, InferenceTimeoutError from .inference._generated._async_client import AsyncInferenceClient from .inference._generated.types import AudioClassificationInput, AudioClassificationOutputElement, AudioClassificationParameters, AudioToAudioInput, AudioToAudioOutputElement, AutomaticSpeechRecognitionGenerationParameters, AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput, AutomaticSpeechRecognitionOutputChunk, AutomaticSpeechRecognitionParameters, ChatCompletionInput, ChatCompletionInputFunctionDefinition, ChatCompletionInputFunctionName, ChatCompletionInputGrammarType, ChatCompletionInputMessage, ChatCompletionInputMessageChunk, ChatCompletionInputTool, ChatCompletionInputToolTypeClass, ChatCompletionInputURL, ChatCompletionOutput, ChatCompletionOutputComplete, ChatCompletionOutputFunctionDefinition, ChatCompletionOutputLogprob, ChatCompletionOutputLogprobs, ChatCompletionOutputMessage, ChatCompletionOutputToolCall, ChatCompletionOutputTopLogprob, ChatCompletionOutputUsage, ChatCompletionStreamOutput, ChatCompletionStreamOutputChoice, ChatCompletionStreamOutputDelta, ChatCompletionStreamOutputDeltaToolCall, ChatCompletionStreamOutputFunction, ChatCompletionStreamOutputLogprob, ChatCompletionStreamOutputLogprobs, ChatCompletionStreamOutputTopLogprob, DepthEstimationInput, DepthEstimationOutput, DocumentQuestionAnsweringInput, DocumentQuestionAnsweringInputData, DocumentQuestionAnsweringOutputElement, DocumentQuestionAnsweringParameters, FeatureExtractionInput, FillMaskInput, FillMaskOutputElement, FillMaskParameters, ImageClassificationInput, ImageClassificationOutputElement, ImageClassificationParameters, ImageSegmentationInput, ImageSegmentationOutputElement, ImageSegmentationParameters, ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize, ImageToTextGenerationParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters, ObjectDetectionBoundingBox, ObjectDetectionInput, ObjectDetectionOutputElement, ObjectDetectionParameters, QuestionAnsweringInput, QuestionAnsweringInputData, QuestionAnsweringOutputElement, QuestionAnsweringParameters, SentenceSimilarityInput, SentenceSimilarityInputData, SummarizationGenerationParameters, SummarizationInput, SummarizationOutput, TableQuestionAnsweringInput, TableQuestionAnsweringInputData, TableQuestionAnsweringOutputElement, Text2TextGenerationInput, Text2TextGenerationOutput, Text2TextGenerationParameters, TextClassificationInput, TextClassificationOutputElement, TextClassificationParameters, TextGenerationInput, TextGenerationInputGenerateParameters, TextGenerationInputGrammarType, TextGenerationOutput, TextGenerationOutputBestOfSequence, TextGenerationOutputDetails, TextGenerationOutputPrefillToken, TextGenerationOutputToken, TextGenerationStreamOutput, TextGenerationStreamOutputStreamDetails, TextGenerationStreamOutputToken, TextToAudioGenerationParameters, TextToAudioInput, TextToAudioOutput, TextToAudioParameters, TextToImageInput, TextToImageOutput, TextToImageParameters, TextToImageTargetSize, TokenClassificationInput, TokenClassificationOutputElement, TokenClassificationParameters, TranslationGenerationParameters, TranslationInput, TranslationOutput, VideoClassificationInput, VideoClassificationOutputElement, VideoClassificationParameters, VisualQuestionAnsweringInput, VisualQuestionAnsweringInputData, VisualQuestionAnsweringOutputElement, VisualQuestionAnsweringParameters, ZeroShotClassificationInput, ZeroShotClassificationInputData, ZeroShotClassificationOutputElement, ZeroShotClassificationParameters, ZeroShotImageClassificationInput, ZeroShotImageClassificationInputData, ZeroShotImageClassificationOutputElement, ZeroShotImageClassificationParameters, ZeroShotObjectDetectionBoundingBox, ZeroShotObjectDetectionInput, ZeroShotObjectDetectionInputData, ZeroShotObjectDetectionOutputElement from .inference_api import InferenceApi from .keras_mixin import KerasModelHubMixin, from_pretrained_keras, push_to_hub_keras, save_pretrained_keras from .repocard import DatasetCard, ModelCard, RepoCard, SpaceCard, metadata_eval_result, metadata_load, metadata_save, metadata_update from .repocard_data import CardData, DatasetCardData, EvalResult, ModelCardData, SpaceCardData from .repository import Repository from .serialization import StateDictSplit, get_tf_storage_size, get_torch_storage_id, get_torch_storage_size, save_torch_model, save_torch_state_dict, split_state_dict_into_shards_factory, split_tf_state_dict_into_shards, split_torch_state_dict_into_shards from .utils import CachedFileInfo, CachedRepoInfo, CachedRevisionInfo, CacheNotFound, CorruptedCacheException, DeleteCacheStrategy, HFCacheInfo, HfFolder, cached_assets_path, configure_http_backend, dump_environment_info, get_session, get_token, logging, scan_cache_dir # File: huggingface_hub-main/src/huggingface_hub/_commit_api.py """""" import base64 import io import os import warnings from collections import defaultdict from contextlib import contextmanager from dataclasses import dataclass, field from itertools import groupby from pathlib import Path, PurePosixPath from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, Iterator, List, Literal, Optional, Tuple, Union from tqdm.contrib.concurrent import thread_map from . import constants from .errors import EntryNotFoundError from .file_download import hf_hub_url from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info from .utils import FORBIDDEN_FOLDERS, chunk_iterable, get_session, hf_raise_for_status, logging, sha, tqdm_stream_file, validate_hf_hub_args from .utils import tqdm as hf_tqdm if TYPE_CHECKING: from .hf_api import RepoFile logger = logging.get_logger(__name__) UploadMode = Literal['lfs', 'regular'] FETCH_LFS_BATCH_SIZE = 500 @dataclass class CommitOperationDelete: path_in_repo: str is_folder: Union[bool, Literal['auto']] = 'auto' def __post_init__(self): self.path_in_repo = _validate_path_in_repo(self.path_in_repo) if self.is_folder == 'auto': self.is_folder = self.path_in_repo.endswith('/') if not isinstance(self.is_folder, bool): raise ValueError(f"Wrong value for `is_folder`. Must be one of [`True`, `False`, `'auto'`]. Got '{self.is_folder}'.") @dataclass class CommitOperationCopy: src_path_in_repo: str path_in_repo: str src_revision: Optional[str] = None def __post_init__(self): self.src_path_in_repo = _validate_path_in_repo(self.src_path_in_repo) self.path_in_repo = _validate_path_in_repo(self.path_in_repo) @dataclass class CommitOperationAdd: path_in_repo: str path_or_fileobj: Union[str, Path, bytes, BinaryIO] upload_info: UploadInfo = field(init=False, repr=False) _upload_mode: Optional[UploadMode] = field(init=False, repr=False, default=None) _should_ignore: Optional[bool] = field(init=False, repr=False, default=None) _remote_oid: Optional[str] = field(init=False, repr=False, default=None) _is_uploaded: bool = field(init=False, repr=False, default=False) _is_committed: bool = field(init=False, repr=False, default=False) def __post_init__(self) -> None: self.path_in_repo = _validate_path_in_repo(self.path_in_repo) if isinstance(self.path_or_fileobj, Path): self.path_or_fileobj = str(self.path_or_fileobj) if isinstance(self.path_or_fileobj, str): path_or_fileobj = os.path.normpath(os.path.expanduser(self.path_or_fileobj)) if not os.path.isfile(path_or_fileobj): raise ValueError(f"Provided path: '{path_or_fileobj}' is not a file on the local file system") elif not isinstance(self.path_or_fileobj, (io.BufferedIOBase, bytes)): raise ValueError('path_or_fileobj must be either an instance of str, bytes or io.BufferedIOBase. If you passed a file-like object, make sure it is in binary mode.') if isinstance(self.path_or_fileobj, io.BufferedIOBase): try: self.path_or_fileobj.tell() self.path_or_fileobj.seek(0, os.SEEK_CUR) except (OSError, AttributeError) as exc: raise ValueError('path_or_fileobj is a file-like object but does not implement seek() and tell()') from exc if isinstance(self.path_or_fileobj, str): self.upload_info = UploadInfo.from_path(self.path_or_fileobj) elif isinstance(self.path_or_fileobj, bytes): self.upload_info = UploadInfo.from_bytes(self.path_or_fileobj) else: self.upload_info = UploadInfo.from_fileobj(self.path_or_fileobj) @contextmanager def as_file(self, with_tqdm: bool=False) -> Iterator[BinaryIO]: if isinstance(self.path_or_fileobj, str) or isinstance(self.path_or_fileobj, Path): if with_tqdm: with tqdm_stream_file(self.path_or_fileobj) as file: yield file else: with open(self.path_or_fileobj, 'rb') as file: yield file elif isinstance(self.path_or_fileobj, bytes): yield io.BytesIO(self.path_or_fileobj) elif isinstance(self.path_or_fileobj, io.BufferedIOBase): prev_pos = self.path_or_fileobj.tell() yield self.path_or_fileobj self.path_or_fileobj.seek(prev_pos, io.SEEK_SET) def b64content(self) -> bytes: with self.as_file() as file: return base64.b64encode(file.read()) @property def _local_oid(self) -> Optional[str]: if self._upload_mode is None: return None elif self._upload_mode == 'lfs': return self.upload_info.sha256.hex() else: with self.as_file() as file: return sha.git_hash(file.read()) def _validate_path_in_repo(path_in_repo: str) -> str: if path_in_repo.startswith('/'): path_in_repo = path_in_repo[1:] if path_in_repo == '.' or path_in_repo == '..' or path_in_repo.startswith('../'): raise ValueError(f"Invalid `path_in_repo` in CommitOperation: '{path_in_repo}'") if path_in_repo.startswith('./'): path_in_repo = path_in_repo[2:] for forbidden in FORBIDDEN_FOLDERS: if any((part == forbidden for part in path_in_repo.split('/'))): raise ValueError(f"Invalid `path_in_repo` in CommitOperation: cannot update files under a '{forbidden}/' folder (path: '{path_in_repo}').") return path_in_repo CommitOperation = Union[CommitOperationAdd, CommitOperationCopy, CommitOperationDelete] def _warn_on_overwriting_operations(operations: List[CommitOperation]) -> None: nb_additions_per_path: Dict[str, int] = defaultdict(int) for operation in operations: path_in_repo = operation.path_in_repo if isinstance(operation, CommitOperationAdd): if nb_additions_per_path[path_in_repo] > 0: warnings.warn(f"About to update multiple times the same file in the same commit: '{path_in_repo}'. This can cause undesired inconsistencies in your repo.") nb_additions_per_path[path_in_repo] += 1 for parent in PurePosixPath(path_in_repo).parents: nb_additions_per_path[str(parent)] += 1 if isinstance(operation, CommitOperationDelete): if nb_additions_per_path[str(PurePosixPath(path_in_repo))] > 0: if operation.is_folder: warnings.warn(f"About to delete a folder containing files that have just been updated within the same commit: '{path_in_repo}'. This can cause undesired inconsistencies in your repo.") else: warnings.warn(f"About to delete a file that have just been updated within the same commit: '{path_in_repo}'. This can cause undesired inconsistencies in your repo.") @validate_hf_hub_args def _upload_lfs_files(*, additions: List[CommitOperationAdd], repo_type: str, repo_id: str, headers: Dict[str, str], endpoint: Optional[str]=None, num_threads: int=5, revision: Optional[str]=None): batch_actions: List[Dict] = [] for chunk in chunk_iterable(additions, chunk_size=256): (batch_actions_chunk, batch_errors_chunk) = post_lfs_batch_info(upload_infos=[op.upload_info for op in chunk], repo_id=repo_id, repo_type=repo_type, revision=revision, endpoint=endpoint, headers=headers, token=None) if batch_errors_chunk: message = '\n'.join([f"Encountered error for file with OID {err.get('oid')}: `{err.get('error', {}).get('message')}" for err in batch_errors_chunk]) raise ValueError(f'LFS batch endpoint returned errors:\n{message}') batch_actions += batch_actions_chunk oid2addop = {add_op.upload_info.sha256.hex(): add_op for add_op in additions} filtered_actions = [] for action in batch_actions: if action.get('actions') is None: logger.debug(f"Content of file {oid2addop[action['oid']].path_in_repo} is already present upstream - skipping upload.") else: filtered_actions.append(action) if len(filtered_actions) == 0: logger.debug('No LFS files to upload.') return def _wrapped_lfs_upload(batch_action) -> None: try: operation = oid2addop[batch_action['oid']] lfs_upload(operation=operation, lfs_batch_action=batch_action, headers=headers, endpoint=endpoint) except Exception as exc: raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc if constants.HF_HUB_ENABLE_HF_TRANSFER: logger.debug(f'Uploading {len(filtered_actions)} LFS files to the Hub using `hf_transfer`.') for action in hf_tqdm(filtered_actions, name='huggingface_hub.lfs_upload'): _wrapped_lfs_upload(action) elif len(filtered_actions) == 1: logger.debug('Uploading 1 LFS file to the Hub') _wrapped_lfs_upload(filtered_actions[0]) else: logger.debug(f'Uploading {len(filtered_actions)} LFS files to the Hub using up to {num_threads} threads concurrently') thread_map(_wrapped_lfs_upload, filtered_actions, desc=f'Upload {len(filtered_actions)} LFS files', max_workers=num_threads, tqdm_class=hf_tqdm) def _validate_preupload_info(preupload_info: dict): files = preupload_info.get('files') if not isinstance(files, list): raise ValueError('preupload_info is improperly formatted') for file_info in files: if not (isinstance(file_info, dict) and isinstance(file_info.get('path'), str) and isinstance(file_info.get('uploadMode'), str) and (file_info['uploadMode'] in ('lfs', 'regular'))): raise ValueError('preupload_info is improperly formatted:') return preupload_info @validate_hf_hub_args def _fetch_upload_modes(additions: Iterable[CommitOperationAdd], repo_type: str, repo_id: str, headers: Dict[str, str], revision: str, endpoint: Optional[str]=None, create_pr: bool=False, gitignore_content: Optional[str]=None) -> None: endpoint = endpoint if endpoint is not None else constants.ENDPOINT upload_modes: Dict[str, UploadMode] = {} should_ignore_info: Dict[str, bool] = {} oid_info: Dict[str, Optional[str]] = {} for chunk in chunk_iterable(additions, 256): payload: Dict = {'files': [{'path': op.path_in_repo, 'sample': base64.b64encode(op.upload_info.sample).decode('ascii'), 'size': op.upload_info.size} for op in chunk]} if gitignore_content is not None: payload['gitIgnore'] = gitignore_content resp = get_session().post(f'{endpoint}/api/{repo_type}s/{repo_id}/preupload/{revision}', json=payload, headers=headers, params={'create_pr': '1'} if create_pr else None) hf_raise_for_status(resp) preupload_info = _validate_preupload_info(resp.json()) upload_modes.update(**{file['path']: file['uploadMode'] for file in preupload_info['files']}) should_ignore_info.update(**{file['path']: file['shouldIgnore'] for file in preupload_info['files']}) oid_info.update(**{file['path']: file.get('oid') for file in preupload_info['files']}) for addition in additions: addition._upload_mode = upload_modes[addition.path_in_repo] addition._should_ignore = should_ignore_info[addition.path_in_repo] addition._remote_oid = oid_info[addition.path_in_repo] for addition in additions: if addition.upload_info.size == 0: addition._upload_mode = 'regular' @validate_hf_hub_args def _fetch_files_to_copy(copies: Iterable[CommitOperationCopy], repo_type: str, repo_id: str, headers: Dict[str, str], revision: str, endpoint: Optional[str]=None) -> Dict[Tuple[str, Optional[str]], Union['RepoFile', bytes]]: from .hf_api import HfApi, RepoFolder hf_api = HfApi(endpoint=endpoint, headers=headers) files_to_copy: Dict[Tuple[str, Optional[str]], Union['RepoFile', bytes]] = {} for (src_revision, operations) in groupby(copies, key=lambda op: op.src_revision): operations = list(operations) paths = [op.src_path_in_repo for op in operations] for offset in range(0, len(paths), FETCH_LFS_BATCH_SIZE): src_repo_files = hf_api.get_paths_info(repo_id=repo_id, paths=paths[offset:offset + FETCH_LFS_BATCH_SIZE], revision=src_revision or revision, repo_type=repo_type) for src_repo_file in src_repo_files: if isinstance(src_repo_file, RepoFolder): raise NotImplementedError('Copying a folder is not implemented.') if src_repo_file.lfs: files_to_copy[src_repo_file.path, src_revision] = src_repo_file else: url = hf_hub_url(endpoint=endpoint, repo_type=repo_type, repo_id=repo_id, revision=src_revision or revision, filename=src_repo_file.path) response = get_session().get(url, headers=headers) hf_raise_for_status(response) files_to_copy[src_repo_file.path, src_revision] = response.content for operation in operations: if (operation.src_path_in_repo, src_revision) not in files_to_copy: raise EntryNotFoundError(f'Cannot copy {operation.src_path_in_repo} at revision {src_revision or revision}: file is missing on repo.') return files_to_copy def _prepare_commit_payload(operations: Iterable[CommitOperation], files_to_copy: Dict[Tuple[str, Optional[str]], Union['RepoFile', bytes]], commit_message: str, commit_description: Optional[str]=None, parent_commit: Optional[str]=None) -> Iterable[Dict[str, Any]]: commit_description = commit_description if commit_description is not None else '' header_value = {'summary': commit_message, 'description': commit_description} if parent_commit is not None: header_value['parentCommit'] = parent_commit yield {'key': 'header', 'value': header_value} nb_ignored_files = 0 for operation in operations: if isinstance(operation, CommitOperationAdd) and operation._should_ignore: logger.debug(f"Skipping file '{operation.path_in_repo}' in commit (ignored by gitignore file).") nb_ignored_files += 1 continue if isinstance(operation, CommitOperationAdd) and operation._upload_mode == 'regular': yield {'key': 'file', 'value': {'content': operation.b64content().decode(), 'path': operation.path_in_repo, 'encoding': 'base64'}} elif isinstance(operation, CommitOperationAdd) and operation._upload_mode == 'lfs': yield {'key': 'lfsFile', 'value': {'path': operation.path_in_repo, 'algo': 'sha256', 'oid': operation.upload_info.sha256.hex(), 'size': operation.upload_info.size}} elif isinstance(operation, CommitOperationDelete): yield {'key': 'deletedFolder' if operation.is_folder else 'deletedFile', 'value': {'path': operation.path_in_repo}} elif isinstance(operation, CommitOperationCopy): file_to_copy = files_to_copy[operation.src_path_in_repo, operation.src_revision] if isinstance(file_to_copy, bytes): yield {'key': 'file', 'value': {'content': base64.b64encode(file_to_copy).decode(), 'path': operation.path_in_repo, 'encoding': 'base64'}} elif file_to_copy.lfs: yield {'key': 'lfsFile', 'value': {'path': operation.path_in_repo, 'algo': 'sha256', 'oid': file_to_copy.lfs.sha256}} else: raise ValueError('Malformed files_to_copy (should be raw file content as bytes or RepoFile objects with LFS info.') else: raise ValueError(f"Unknown operation to commit. Operation: {operation}. Upload mode: {getattr(operation, '_upload_mode', None)}") if nb_ignored_files > 0: logger.info(f'Skipped {nb_ignored_files} file(s) in commit (ignored by gitignore file).') # File: huggingface_hub-main/src/huggingface_hub/_commit_scheduler.py import atexit import logging import os import time from concurrent.futures import Future from dataclasses import dataclass from io import SEEK_END, SEEK_SET, BytesIO from pathlib import Path from threading import Lock, Thread from typing import Dict, List, Optional, Union from .hf_api import DEFAULT_IGNORE_PATTERNS, CommitInfo, CommitOperationAdd, HfApi from .utils import filter_repo_objects logger = logging.getLogger(__name__) @dataclass(frozen=True) class _FileToUpload: local_path: Path path_in_repo: str size_limit: int last_modified: float class CommitScheduler: def __init__(self, *, repo_id: str, folder_path: Union[str, Path], every: Union[int, float]=5, path_in_repo: Optional[str]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, private: bool=False, token: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, squash_history: bool=False, hf_api: Optional['HfApi']=None) -> None: self.api = hf_api or HfApi(token=token) self.folder_path = Path(folder_path).expanduser().resolve() self.path_in_repo = path_in_repo or '' self.allow_patterns = allow_patterns if ignore_patterns is None: ignore_patterns = [] elif isinstance(ignore_patterns, str): ignore_patterns = [ignore_patterns] self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS if self.folder_path.is_file(): raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.") self.folder_path.mkdir(parents=True, exist_ok=True) repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True) self.repo_id = repo_url.repo_id self.repo_type = repo_type self.revision = revision self.token = token self.last_uploaded: Dict[Path, float] = {} if not every > 0: raise ValueError(f"'every' must be a positive integer, not '{every}'.") self.lock = Lock() self.every = every self.squash_history = squash_history logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.") self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True) self._scheduler_thread.start() atexit.register(self._push_to_hub) self.__stopped = False def stop(self) -> None: self.__stopped = True def _run_scheduler(self) -> None: while True: self.last_future = self.trigger() time.sleep(self.every * 60) if self.__stopped: break def trigger(self) -> Future: return self.api.run_as_future(self._push_to_hub) def _push_to_hub(self) -> Optional[CommitInfo]: if self.__stopped: return None logger.info('(Background) scheduled commit triggered.') try: value = self.push_to_hub() if self.squash_history: logger.info('(Background) squashing repo history.') self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision) return value except Exception as e: logger.error(f'Error while pushing to Hub: {e}') raise def push_to_hub(self) -> Optional[CommitInfo]: with self.lock: logger.debug('Listing files to upload for scheduled commit.') relpath_to_abspath = {path.relative_to(self.folder_path).as_posix(): path for path in sorted(self.folder_path.glob('**/*')) if path.is_file()} prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else '' files_to_upload: List[_FileToUpload] = [] for relpath in filter_repo_objects(relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns): local_path = relpath_to_abspath[relpath] stat = local_path.stat() if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime: files_to_upload.append(_FileToUpload(local_path=local_path, path_in_repo=prefix + relpath, size_limit=stat.st_size, last_modified=stat.st_mtime)) if len(files_to_upload) == 0: logger.debug('Dropping schedule commit: no changed file to upload.') return None logger.debug('Removing unchanged files since previous scheduled commit.') add_operations = [CommitOperationAdd(path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit), path_in_repo=file_to_upload.path_in_repo) for file_to_upload in files_to_upload] logger.debug('Uploading files for scheduled commit.') commit_info = self.api.create_commit(repo_id=self.repo_id, repo_type=self.repo_type, operations=add_operations, commit_message='Scheduled Commit', revision=self.revision) for file in files_to_upload: self.last_uploaded[file.local_path] = file.last_modified return commit_info class PartialFileIO(BytesIO): def __init__(self, file_path: Union[str, Path], size_limit: int) -> None: self._file_path = Path(file_path) self._file = self._file_path.open('rb') self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size) def __del__(self) -> None: self._file.close() return super().__del__() def __repr__(self) -> str: return f'' def __len__(self) -> int: return self._size_limit def __getattribute__(self, name: str): if name.startswith('_') or name in ('read', 'tell', 'seek'): return super().__getattribute__(name) raise NotImplementedError(f"PartialFileIO does not support '{name}'.") def tell(self) -> int: return self._file.tell() def seek(self, __offset: int, __whence: int=SEEK_SET) -> int: if __whence == SEEK_END: __offset = len(self) + __offset __whence = SEEK_SET pos = self._file.seek(__offset, __whence) if pos > self._size_limit: return self._file.seek(self._size_limit) return pos def read(self, __size: Optional[int]=-1) -> bytes: current = self._file.tell() if __size is None or __size < 0: truncated_size = self._size_limit - current else: truncated_size = min(__size, self._size_limit - current) return self._file.read(truncated_size) # File: huggingface_hub-main/src/huggingface_hub/_inference_endpoints.py import time from dataclasses import dataclass, field from datetime import datetime from enum import Enum from typing import TYPE_CHECKING, Dict, Optional, Union from huggingface_hub.errors import InferenceEndpointError, InferenceEndpointTimeoutError from .inference._client import InferenceClient from .inference._generated._async_client import AsyncInferenceClient from .utils import get_session, logging, parse_datetime if TYPE_CHECKING: from .hf_api import HfApi logger = logging.get_logger(__name__) class InferenceEndpointStatus(str, Enum): PENDING = 'pending' INITIALIZING = 'initializing' UPDATING = 'updating' UPDATE_FAILED = 'updateFailed' RUNNING = 'running' PAUSED = 'paused' FAILED = 'failed' SCALED_TO_ZERO = 'scaledToZero' class InferenceEndpointType(str, Enum): PUBlIC = 'public' PROTECTED = 'protected' PRIVATE = 'private' @dataclass class InferenceEndpoint: name: str = field(init=False) namespace: str repository: str = field(init=False) status: InferenceEndpointStatus = field(init=False) url: Optional[str] = field(init=False) framework: str = field(repr=False, init=False) revision: str = field(repr=False, init=False) task: str = field(repr=False, init=False) created_at: datetime = field(repr=False, init=False) updated_at: datetime = field(repr=False, init=False) type: InferenceEndpointType = field(repr=False, init=False) raw: Dict = field(repr=False) _token: Union[str, bool, None] = field(repr=False, compare=False) _api: 'HfApi' = field(repr=False, compare=False) @classmethod def from_raw(cls, raw: Dict, namespace: str, token: Union[str, bool, None]=None, api: Optional['HfApi']=None) -> 'InferenceEndpoint': if api is None: from .hf_api import HfApi api = HfApi() if token is None: token = api.token return cls(raw=raw, namespace=namespace, _token=token, _api=api) def __post_init__(self) -> None: self._populate_from_raw() @property def client(self) -> InferenceClient: if self.url is None: raise InferenceEndpointError('Cannot create a client for this Inference Endpoint as it is not yet deployed. Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again.') return InferenceClient(model=self.url, token=self._token) @property def async_client(self) -> AsyncInferenceClient: if self.url is None: raise InferenceEndpointError('Cannot create a client for this Inference Endpoint as it is not yet deployed. Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again.') return AsyncInferenceClient(model=self.url, token=self._token) def wait(self, timeout: Optional[int]=None, refresh_every: int=5) -> 'InferenceEndpoint': if timeout is not None and timeout < 0: raise ValueError('`timeout` cannot be negative.') if refresh_every <= 0: raise ValueError('`refresh_every` must be positive.') start = time.time() while True: if self.url is not None: response = get_session().get(self.url, headers=self._api._build_hf_headers(token=self._token)) if response.status_code == 200: logger.info('Inference Endpoint is ready to be used.') return self if self.status == InferenceEndpointStatus.FAILED: raise InferenceEndpointError(f'Inference Endpoint {self.name} failed to deploy. Please check the logs for more information.') if timeout is not None: if time.time() - start > timeout: raise InferenceEndpointTimeoutError('Timeout while waiting for Inference Endpoint to be deployed.') logger.info(f'Inference Endpoint is not deployed yet ({self.status}). Waiting {refresh_every}s...') time.sleep(refresh_every) self.fetch() def fetch(self) -> 'InferenceEndpoint': obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) self.raw = obj.raw self._populate_from_raw() return self def update(self, *, accelerator: Optional[str]=None, instance_size: Optional[str]=None, instance_type: Optional[str]=None, min_replica: Optional[int]=None, max_replica: Optional[int]=None, scale_to_zero_timeout: Optional[int]=None, repository: Optional[str]=None, framework: Optional[str]=None, revision: Optional[str]=None, task: Optional[str]=None, custom_image: Optional[Dict]=None, secrets: Optional[Dict[str, str]]=None) -> 'InferenceEndpoint': obj = self._api.update_inference_endpoint(name=self.name, namespace=self.namespace, accelerator=accelerator, instance_size=instance_size, instance_type=instance_type, min_replica=min_replica, max_replica=max_replica, scale_to_zero_timeout=scale_to_zero_timeout, repository=repository, framework=framework, revision=revision, task=task, custom_image=custom_image, secrets=secrets, token=self._token) self.raw = obj.raw self._populate_from_raw() return self def pause(self) -> 'InferenceEndpoint': obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) self.raw = obj.raw self._populate_from_raw() return self def resume(self, running_ok: bool=True) -> 'InferenceEndpoint': obj = self._api.resume_inference_endpoint(name=self.name, namespace=self.namespace, running_ok=running_ok, token=self._token) self.raw = obj.raw self._populate_from_raw() return self def scale_to_zero(self) -> 'InferenceEndpoint': obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) self.raw = obj.raw self._populate_from_raw() return self def delete(self) -> None: self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) def _populate_from_raw(self) -> None: self.name = self.raw['name'] self.repository = self.raw['model']['repository'] self.status = self.raw['status']['state'] self.url = self.raw['status'].get('url') self.framework = self.raw['model']['framework'] self.revision = self.raw['model']['revision'] self.task = self.raw['model']['task'] self.created_at = parse_datetime(self.raw['status']['createdAt']) self.updated_at = parse_datetime(self.raw['status']['updatedAt']) self.type = self.raw['type'] # File: huggingface_hub-main/src/huggingface_hub/_local_folder.py """""" import logging import os import time from dataclasses import dataclass from functools import lru_cache from pathlib import Path from typing import Optional from .utils import WeakFileLock logger = logging.getLogger(__name__) @dataclass class LocalDownloadFilePaths: file_path: Path lock_path: Path metadata_path: Path def incomplete_path(self, etag: str) -> Path: return self.metadata_path.with_suffix(f'.{etag}.incomplete') @dataclass(frozen=True) class LocalUploadFilePaths: path_in_repo: str file_path: Path lock_path: Path metadata_path: Path @dataclass class LocalDownloadFileMetadata: filename: str commit_hash: str etag: str timestamp: float @dataclass class LocalUploadFileMetadata: size: int timestamp: Optional[float] = None should_ignore: Optional[bool] = None sha256: Optional[str] = None upload_mode: Optional[str] = None is_uploaded: bool = False is_committed: bool = False def save(self, paths: LocalUploadFilePaths) -> None: with WeakFileLock(paths.lock_path): with paths.metadata_path.open('w') as f: new_timestamp = time.time() f.write(str(new_timestamp) + '\n') f.write(str(self.size)) f.write('\n') if self.should_ignore is not None: f.write(str(int(self.should_ignore))) f.write('\n') if self.sha256 is not None: f.write(self.sha256) f.write('\n') if self.upload_mode is not None: f.write(self.upload_mode) f.write('\n') f.write(str(int(self.is_uploaded)) + '\n') f.write(str(int(self.is_committed)) + '\n') self.timestamp = new_timestamp @lru_cache(maxsize=128) def get_local_download_paths(local_dir: Path, filename: str) -> LocalDownloadFilePaths: sanitized_filename = os.path.join(*filename.split('/')) if os.name == 'nt': if sanitized_filename.startswith('..\\') or '\\..\\' in sanitized_filename: raise ValueError(f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository owner to rename this file.") file_path = local_dir / sanitized_filename metadata_path = _huggingface_dir(local_dir) / 'download' / f'{sanitized_filename}.metadata' lock_path = metadata_path.with_suffix('.lock') if os.name == 'nt': if not str(local_dir).startswith('\\\\?\\') and len(os.path.abspath(lock_path)) > 255: file_path = Path('\\\\?\\' + os.path.abspath(file_path)) lock_path = Path('\\\\?\\' + os.path.abspath(lock_path)) metadata_path = Path('\\\\?\\' + os.path.abspath(metadata_path)) file_path.parent.mkdir(parents=True, exist_ok=True) metadata_path.parent.mkdir(parents=True, exist_ok=True) return LocalDownloadFilePaths(file_path=file_path, lock_path=lock_path, metadata_path=metadata_path) @lru_cache(maxsize=128) def get_local_upload_paths(local_dir: Path, filename: str) -> LocalUploadFilePaths: sanitized_filename = os.path.join(*filename.split('/')) if os.name == 'nt': if sanitized_filename.startswith('..\\') or '\\..\\' in sanitized_filename: raise ValueError(f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository owner to rename this file.") file_path = local_dir / sanitized_filename metadata_path = _huggingface_dir(local_dir) / 'upload' / f'{sanitized_filename}.metadata' lock_path = metadata_path.with_suffix('.lock') if os.name == 'nt': if not str(local_dir).startswith('\\\\?\\') and len(os.path.abspath(lock_path)) > 255: file_path = Path('\\\\?\\' + os.path.abspath(file_path)) lock_path = Path('\\\\?\\' + os.path.abspath(lock_path)) metadata_path = Path('\\\\?\\' + os.path.abspath(metadata_path)) file_path.parent.mkdir(parents=True, exist_ok=True) metadata_path.parent.mkdir(parents=True, exist_ok=True) return LocalUploadFilePaths(path_in_repo=filename, file_path=file_path, lock_path=lock_path, metadata_path=metadata_path) def read_download_metadata(local_dir: Path, filename: str) -> Optional[LocalDownloadFileMetadata]: paths = get_local_download_paths(local_dir, filename) with WeakFileLock(paths.lock_path): if paths.metadata_path.exists(): try: with paths.metadata_path.open() as f: commit_hash = f.readline().strip() etag = f.readline().strip() timestamp = float(f.readline().strip()) metadata = LocalDownloadFileMetadata(filename=filename, commit_hash=commit_hash, etag=etag, timestamp=timestamp) except Exception as e: logger.warning(f'Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue.') try: paths.metadata_path.unlink() except Exception as e: logger.warning(f'Could not remove corrupted metadata file {paths.metadata_path}: {e}') try: stat = paths.file_path.stat() if stat.st_mtime - 1 <= metadata.timestamp: return metadata logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.") except FileNotFoundError: return None return None def read_upload_metadata(local_dir: Path, filename: str) -> LocalUploadFileMetadata: paths = get_local_upload_paths(local_dir, filename) with WeakFileLock(paths.lock_path): if paths.metadata_path.exists(): try: with paths.metadata_path.open() as f: timestamp = float(f.readline().strip()) size = int(f.readline().strip()) _should_ignore = f.readline().strip() should_ignore = None if _should_ignore == '' else bool(int(_should_ignore)) _sha256 = f.readline().strip() sha256 = None if _sha256 == '' else _sha256 _upload_mode = f.readline().strip() upload_mode = None if _upload_mode == '' else _upload_mode if upload_mode not in (None, 'regular', 'lfs'): raise ValueError(f'Invalid upload mode in metadata {paths.path_in_repo}: {upload_mode}') is_uploaded = bool(int(f.readline().strip())) is_committed = bool(int(f.readline().strip())) metadata = LocalUploadFileMetadata(timestamp=timestamp, size=size, should_ignore=should_ignore, sha256=sha256, upload_mode=upload_mode, is_uploaded=is_uploaded, is_committed=is_committed) except Exception as e: logger.warning(f'Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue.') try: paths.metadata_path.unlink() except Exception as e: logger.warning(f'Could not remove corrupted metadata file {paths.metadata_path}: {e}') if metadata.timestamp is not None and metadata.is_uploaded and (not metadata.is_committed) and (time.time() - metadata.timestamp > 20 * 3600): metadata.is_uploaded = False try: if metadata.timestamp is not None and paths.file_path.stat().st_mtime <= metadata.timestamp: return metadata logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.") except FileNotFoundError: pass return LocalUploadFileMetadata(size=paths.file_path.stat().st_size) def write_download_metadata(local_dir: Path, filename: str, commit_hash: str, etag: str) -> None: paths = get_local_download_paths(local_dir, filename) with WeakFileLock(paths.lock_path): with paths.metadata_path.open('w') as f: f.write(f'{commit_hash}\n{etag}\n{time.time()}\n') @lru_cache() def _huggingface_dir(local_dir: Path) -> Path: path = local_dir / '.cache' / 'huggingface' path.mkdir(exist_ok=True, parents=True) gitignore = path / '.gitignore' gitignore_lock = path / '.gitignore.lock' if not gitignore.exists(): try: with WeakFileLock(gitignore_lock): gitignore.write_text('*') gitignore_lock.unlink() except OSError: pass return path # File: huggingface_hub-main/src/huggingface_hub/_login.py """""" import os import subprocess from functools import partial from getpass import getpass from pathlib import Path from typing import Optional from . import constants from .commands._cli_utils import ANSI from .utils import capture_output, get_token, is_google_colab, is_notebook, list_credential_helpers, logging, run_subprocess, set_git_credential, unset_git_credential from .utils._token import _get_token_from_environment, _get_token_from_google_colab logger = logging.get_logger(__name__) _HF_LOGO_ASCII = '\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n' def login(token: Optional[str]=None, add_to_git_credential: bool=False, new_session: bool=True, write_permission: bool=False) -> None: if token is not None: if not add_to_git_credential: print('The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.') _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) elif is_notebook(): notebook_login(new_session=new_session, write_permission=write_permission) else: interpreter_login(new_session=new_session, write_permission=write_permission) def logout() -> None: if get_token() is None: print('Not logged in!') return unset_git_credential() try: Path(constants.HF_TOKEN_PATH).unlink() except FileNotFoundError: pass if _get_token_from_google_colab() is not None: raise EnvironmentError('You are automatically logged in using a Google Colab secret.\nTo log out, you must unset the `HF_TOKEN` secret in your Colab settings.') if _get_token_from_environment() is not None: raise EnvironmentError('Token has been deleted from your machine but you are still logged in.\nTo log out, you must clear out both `HF_TOKEN` and `HUGGING_FACE_HUB_TOKEN` environment variables.') print('Successfully logged out.') def interpreter_login(new_session: bool=True, write_permission: bool=False) -> None: if not new_session and _current_token_okay(write_permission=write_permission): print('User is already logged in.') return from .commands.delete_cache import _ask_for_confirmation_no_tui print(_HF_LOGO_ASCII) if get_token() is not None: print(' A token is already saved on your machine. Run `huggingface-cli whoami` to get more information or `huggingface-cli logout` if you want to log out.') print(' Setting a new token will erase the existing one.') print(' To log in, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens .') if os.name == 'nt': print("Token can be pasted using 'Right-Click'.") token = getpass('Enter your token (input will not be visible): ') add_to_git_credential = _ask_for_confirmation_no_tui('Add token as git credential?') _login(token=token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) NOTEBOOK_LOGIN_PASSWORD_HTML = "

Immediately click login after typing your password or\nit might be stored in plain text in this notebook file.
" NOTEBOOK_LOGIN_TOKEN_HTML_START = '

Copy a token from your Hugging Face\ntokens page and paste it below.
Immediately click login after copying\nyour token or it might be stored in plain text in this notebook file.
' NOTEBOOK_LOGIN_TOKEN_HTML_END = "\nPro Tip: If you don't already have one, you can create a dedicated\n'notebooks' token with 'write' access, that you can then easily reuse for all\nnotebooks. " def notebook_login(new_session: bool=True, write_permission: bool=False) -> None: try: import ipywidgets.widgets as widgets from IPython.display import display except ImportError: raise ImportError('The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the `ipywidgets` module: `pip install ipywidgets`.') if not new_session and _current_token_okay(write_permission=write_permission): print('User is already logged in.') return box_layout = widgets.Layout(display='flex', flex_flow='column', align_items='center', width='50%') token_widget = widgets.Password(description='Token:') git_checkbox_widget = widgets.Checkbox(value=True, description='Add token as git credential?') token_finish_button = widgets.Button(description='Login') login_token_widget = widgets.VBox([widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START), token_widget, git_checkbox_widget, token_finish_button, widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END)], layout=box_layout) display(login_token_widget) def login_token_event(t, write_permission: bool=False): token = token_widget.value add_to_git_credential = git_checkbox_widget.value token_widget.value = '' login_token_widget.children = [widgets.Label('Connecting...')] try: with capture_output() as captured: _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission) message = captured.getvalue() except Exception as error: message = str(error) login_token_widget.children = [widgets.Label(line) for line in message.split('\n') if line.strip()] token_finish_button.on_click(partial(login_token_event, write_permission=write_permission)) def _login(token: str, add_to_git_credential: bool, write_permission: bool=False) -> None: from .hf_api import get_token_permission if token.startswith('api_org'): raise ValueError('You must use your personal account token, not an organization token.') permission = get_token_permission(token) if permission is None: raise ValueError('Invalid token passed!') elif write_permission and permission != 'write': raise ValueError("Token is valid but is 'read-only' and a 'write' token is required.\nPlease provide a new token with correct permission.") print(f'Token is valid (permission: {permission}).') if add_to_git_credential: if _is_git_credential_helper_configured(): set_git_credential(token) print('Your token has been saved in your configured git credential helpers' + f" ({','.join(list_credential_helpers())}).") else: print('Token has not been saved to git credential helper.') path = Path(constants.HF_TOKEN_PATH) path.parent.mkdir(parents=True, exist_ok=True) path.write_text(token) print(f'Your token has been saved to {constants.HF_TOKEN_PATH}') print('Login successful') def _current_token_okay(write_permission: bool=False): from .hf_api import get_token_permission permission = get_token_permission() if permission is None or (write_permission and permission != 'write'): return False return True def _is_git_credential_helper_configured() -> bool: helpers = list_credential_helpers() if len(helpers) > 0: return True if is_google_colab(): _set_store_as_git_credential_helper_globally() return True print(ANSI.red("Cannot authenticate through git-credential as no helper is defined on your machine.\nYou might have to re-authenticate when pushing to the Hugging Face Hub.\nRun the following command in your terminal in case you want to set the 'store' credential helper as default.\n\ngit config --global credential.helper store\n\nRead https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more details.")) return False def _set_store_as_git_credential_helper_globally() -> None: try: run_subprocess('git config --global credential.helper store') except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) # File: huggingface_hub-main/src/huggingface_hub/_multi_commits.py """""" import re from dataclasses import dataclass, field from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Union from ._commit_api import CommitOperationAdd, CommitOperationDelete from .community import DiscussionWithDetails from .utils import experimental from .utils._cache_manager import _format_size from .utils.insecure_hashlib import sha256 if TYPE_CHECKING: from .hf_api import HfApi class MultiCommitException(Exception): MULTI_COMMIT_PR_DESCRIPTION_TEMPLATE = '\n## {commit_message}\n\n{commit_description}\n\n**Multi commit ID:** {multi_commit_id}\n\nScheduled commits:\n\n{multi_commit_strategy}\n\n_This is a PR opened using the `huggingface_hub` library in the context of a multi-commit. PR can be commented as a usual PR. However, please be aware that manually updating the PR description, changing the PR status, or pushing new commits, is not recommended as it might corrupt the commit process. Learn more about multi-commits [in this guide](https://huggingface.co/docs/huggingface_hub/main/guides/upload)._\n' MULTI_COMMIT_PR_COMPLETION_COMMENT_TEMPLATE = '\nMulti-commit is now completed! You can ping the repo owner to review the changes. This PR can now be commented or modified without risking to corrupt it.\n\n_This is a comment posted using the `huggingface_hub` library in the context of a multi-commit. Learn more about multi-commits [in this guide](https://huggingface.co/docs/huggingface_hub/main/guides/upload)._\n' MULTI_COMMIT_PR_CLOSING_COMMENT_TEMPLATE = '\n`create_pr=False` has been passed so PR is automatically merged.\n\n_This is a comment posted using the `huggingface_hub` library in the context of a multi-commit. Learn more about multi-commits [in this guide](https://huggingface.co/docs/huggingface_hub/main/guides/upload)._\n' MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_NO_CHANGES_TEMPLATE = '\nCannot merge Pull Requests as no changes are associated. This PR will be closed automatically.\n\n_This is a comment posted using the `huggingface_hub` library in the context of a multi-commit. Learn more about multi-commits [in this guide](https://huggingface.co/docs/huggingface_hub/main/guides/upload)._\n' MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_BAD_REQUEST_TEMPLATE = '\nAn error occurred while trying to merge the Pull Request: `{error_message}`.\n\n_This is a comment posted using the `huggingface_hub` library in the context of a multi-commit. Learn more about multi-commits [in this guide](https://huggingface.co/docs/huggingface_hub/main/guides/upload)._\n' STEP_ID_REGEX = re.compile('- \\[(?P[ |x])\\].*(?P[a-fA-F0-9]{64})', flags=re.MULTILINE) @experimental def plan_multi_commits(operations: Iterable[Union[CommitOperationAdd, CommitOperationDelete]], max_operations_per_commit: int=50, max_upload_size_per_commit: int=2 * 1024 * 1024 * 1024) -> Tuple[List[List[CommitOperationAdd]], List[List[CommitOperationDelete]]]: addition_commits: List[List[CommitOperationAdd]] = [] deletion_commits: List[List[CommitOperationDelete]] = [] additions: List[CommitOperationAdd] = [] additions_size = 0 deletions: List[CommitOperationDelete] = [] for op in operations: if isinstance(op, CommitOperationDelete): deletions.append(op) if len(deletions) >= max_operations_per_commit: deletion_commits.append(deletions) deletions = [] elif op.upload_info.size >= max_upload_size_per_commit: addition_commits.append([op]) elif additions_size + op.upload_info.size < max_upload_size_per_commit: additions.append(op) additions_size += op.upload_info.size else: addition_commits.append(additions) additions = [op] additions_size = op.upload_info.size if len(additions) >= max_operations_per_commit: addition_commits.append(additions) additions = [] additions_size = 0 if len(additions) > 0: addition_commits.append(additions) if len(deletions) > 0: deletion_commits.append(deletions) return (addition_commits, deletion_commits) @dataclass class MultiCommitStep: operations: List[Union[CommitOperationAdd, CommitOperationDelete]] id: str = field(init=False) completed: bool = False def __post_init__(self) -> None: if len(self.operations) == 0: raise ValueError('A MultiCommitStep must have at least 1 commit operation, got 0.') sha = sha256() for op in self.operations: if isinstance(op, CommitOperationAdd): sha.update(b'ADD') sha.update(op.path_in_repo.encode()) sha.update(op.upload_info.sha256) elif isinstance(op, CommitOperationDelete): sha.update(b'DELETE') sha.update(op.path_in_repo.encode()) sha.update(str(op.is_folder).encode()) else: NotImplementedError() self.id = sha.hexdigest() def __str__(self) -> str: additions = [op for op in self.operations if isinstance(op, CommitOperationAdd)] file_deletions = [op for op in self.operations if isinstance(op, CommitOperationDelete) and (not op.is_folder)] folder_deletions = [op for op in self.operations if isinstance(op, CommitOperationDelete) and op.is_folder] if len(additions) > 0: return f"- [{('x' if self.completed else ' ')}] Upload {len(additions)} file(s) totalling {_format_size(sum((add.upload_info.size for add in additions)))} ({self.id})" else: return f"- [{('x' if self.completed else ' ')}] Delete {len(file_deletions)} file(s) and {len(folder_deletions)} folder(s) ({self.id})" @dataclass class MultiCommitStrategy: addition_commits: List[MultiCommitStep] deletion_commits: List[MultiCommitStep] id: str = field(init=False) all_steps: Set[str] = field(init=False) def __post_init__(self) -> None: self.all_steps = {step.id for step in self.addition_commits + self.deletion_commits} if len(self.all_steps) < len(self.addition_commits) + len(self.deletion_commits): raise ValueError('Got duplicate commits in MultiCommitStrategy. All commits must be unique.') if len(self.all_steps) == 0: raise ValueError('A MultiCommitStrategy must have at least 1 commit, got 0.') sha = sha256() for step in self.addition_commits + self.deletion_commits: sha.update('new step'.encode()) sha.update(step.id.encode()) self.id = sha.hexdigest() def multi_commit_create_pull_request(api: 'HfApi', repo_id: str, commit_message: str, commit_description: Optional[str], strategy: MultiCommitStrategy, repo_type: Optional[str], token: Union[str, bool, None]=None) -> DiscussionWithDetails: return api.create_pull_request(repo_id=repo_id, title=f'[WIP] {commit_message} (multi-commit {strategy.id})', description=multi_commit_generate_comment(commit_message=commit_message, commit_description=commit_description, strategy=strategy), token=token, repo_type=repo_type) def multi_commit_generate_comment(commit_message: str, commit_description: Optional[str], strategy: MultiCommitStrategy) -> str: return MULTI_COMMIT_PR_DESCRIPTION_TEMPLATE.format(commit_message=commit_message, commit_description=commit_description or '', multi_commit_id=strategy.id, multi_commit_strategy='\n'.join((str(commit) for commit in strategy.deletion_commits + strategy.addition_commits))) def multi_commit_parse_pr_description(description: str) -> Set[str]: return {match[1] for match in STEP_ID_REGEX.findall(description)} # File: huggingface_hub-main/src/huggingface_hub/_snapshot_download.py import os from pathlib import Path from typing import Dict, List, Literal, Optional, Union import requests from tqdm.auto import tqdm as base_tqdm from tqdm.contrib.concurrent import thread_map from . import constants from .errors import GatedRepoError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from .file_download import REGEX_COMMIT_HASH, hf_hub_download, repo_folder_name from .hf_api import DatasetInfo, HfApi, ModelInfo, SpaceInfo from .utils import OfflineModeIsEnabled, filter_repo_objects, logging, validate_hf_hub_args from .utils import tqdm as hf_tqdm logger = logging.get_logger(__name__) @validate_hf_hub_args def snapshot_download(repo_id: str, *, repo_type: Optional[str]=None, revision: Optional[str]=None, cache_dir: Union[str, Path, None]=None, local_dir: Union[str, Path, None]=None, library_name: Optional[str]=None, library_version: Optional[str]=None, user_agent: Optional[Union[Dict, str]]=None, proxies: Optional[Dict]=None, etag_timeout: float=constants.DEFAULT_ETAG_TIMEOUT, force_download: bool=False, token: Optional[Union[bool, str]]=None, local_files_only: bool=False, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, max_workers: int=8, tqdm_class: Optional[base_tqdm]=None, headers: Optional[Dict[str, str]]=None, endpoint: Optional[str]=None, local_dir_use_symlinks: Union[bool, Literal['auto']]='auto', resume_download: Optional[bool]=None) -> str: if cache_dir is None: cache_dir = constants.HF_HUB_CACHE if revision is None: revision = constants.DEFAULT_REVISION if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if repo_type is None: repo_type = 'model' if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type: {repo_type}. Accepted repo types are: {str(constants.REPO_TYPES)}') storage_folder = os.path.join(cache_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type)) repo_info: Union[ModelInfo, DatasetInfo, SpaceInfo, None] = None api_call_error: Optional[Exception] = None if not local_files_only: try: api = HfApi(library_name=library_name, library_version=library_version, user_agent=user_agent, endpoint=endpoint, headers=headers) repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision, token=token) except (requests.exceptions.SSLError, requests.exceptions.ProxyError): raise except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, OfflineModeIsEnabled) as error: api_call_error = error pass except RevisionNotFoundError: raise except requests.HTTPError as error: api_call_error = error pass if repo_info is None: commit_hash = None if REGEX_COMMIT_HASH.match(revision): commit_hash = revision else: ref_path = os.path.join(storage_folder, 'refs', revision) if os.path.exists(ref_path): with open(ref_path) as f: commit_hash = f.read() if commit_hash is not None: snapshot_folder = os.path.join(storage_folder, 'snapshots', commit_hash) if os.path.exists(snapshot_folder): return snapshot_folder if local_files_only: raise LocalEntryNotFoundError("Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and outgoing traffic has been disabled. To enable repo look-ups and downloads online, pass 'local_files_only=False' as input.") elif isinstance(api_call_error, OfflineModeIsEnabled): raise LocalEntryNotFoundError("Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and outgoing traffic has been disabled. To enable repo look-ups and downloads online, set 'HF_HUB_OFFLINE=0' as environment variable.") from api_call_error elif isinstance(api_call_error, RepositoryNotFoundError) or isinstance(api_call_error, GatedRepoError): raise api_call_error else: raise LocalEntryNotFoundError('An error happened while trying to locate the files on the Hub and we cannot find the appropriate snapshot folder for the specified revision on the local disk. Please check your internet connection and try again.') from api_call_error assert repo_info.sha is not None, 'Repo info returned from server must have a revision sha.' assert repo_info.siblings is not None, 'Repo info returned from server must have a siblings list.' filtered_repo_files = list(filter_repo_objects(items=[f.rfilename for f in repo_info.siblings], allow_patterns=allow_patterns, ignore_patterns=ignore_patterns)) commit_hash = repo_info.sha snapshot_folder = os.path.join(storage_folder, 'snapshots', commit_hash) if revision != commit_hash: ref_path = os.path.join(storage_folder, 'refs', revision) os.makedirs(os.path.dirname(ref_path), exist_ok=True) with open(ref_path, 'w') as f: f.write(commit_hash) def _inner_hf_hub_download(repo_file: str): return hf_hub_download(repo_id, filename=repo_file, repo_type=repo_type, revision=commit_hash, endpoint=endpoint, cache_dir=cache_dir, local_dir=local_dir, local_dir_use_symlinks=local_dir_use_symlinks, library_name=library_name, library_version=library_version, user_agent=user_agent, proxies=proxies, etag_timeout=etag_timeout, resume_download=resume_download, force_download=force_download, token=token, headers=headers) if constants.HF_HUB_ENABLE_HF_TRANSFER: for file in filtered_repo_files: _inner_hf_hub_download(file) else: thread_map(_inner_hf_hub_download, filtered_repo_files, desc=f'Fetching {len(filtered_repo_files)} files', max_workers=max_workers, tqdm_class=tqdm_class or hf_tqdm) if local_dir is not None: return str(os.path.realpath(local_dir)) return snapshot_folder # File: huggingface_hub-main/src/huggingface_hub/_space_api.py from dataclasses import dataclass from datetime import datetime from enum import Enum from typing import Dict, Optional from huggingface_hub.utils import parse_datetime class SpaceStage(str, Enum): NO_APP_FILE = 'NO_APP_FILE' CONFIG_ERROR = 'CONFIG_ERROR' BUILDING = 'BUILDING' BUILD_ERROR = 'BUILD_ERROR' RUNNING = 'RUNNING' RUNNING_BUILDING = 'RUNNING_BUILDING' RUNTIME_ERROR = 'RUNTIME_ERROR' DELETING = 'DELETING' STOPPED = 'STOPPED' PAUSED = 'PAUSED' class SpaceHardware(str, Enum): CPU_BASIC = 'cpu-basic' CPU_UPGRADE = 'cpu-upgrade' T4_SMALL = 't4-small' T4_MEDIUM = 't4-medium' L4X1 = 'l4x1' L4X4 = 'l4x4' ZERO_A10G = 'zero-a10g' A10G_SMALL = 'a10g-small' A10G_LARGE = 'a10g-large' A10G_LARGEX2 = 'a10g-largex2' A10G_LARGEX4 = 'a10g-largex4' A100_LARGE = 'a100-large' V5E_1X1 = 'v5e-1x1' V5E_2X2 = 'v5e-2x2' V5E_2X4 = 'v5e-2x4' class SpaceStorage(str, Enum): SMALL = 'small' MEDIUM = 'medium' LARGE = 'large' @dataclass class SpaceRuntime: stage: SpaceStage hardware: Optional[SpaceHardware] requested_hardware: Optional[SpaceHardware] sleep_time: Optional[int] storage: Optional[SpaceStorage] raw: Dict def __init__(self, data: Dict) -> None: self.stage = data['stage'] self.hardware = data.get('hardware', {}).get('current') self.requested_hardware = data.get('hardware', {}).get('requested') self.sleep_time = data.get('gcTimeout') self.storage = data.get('storage') self.raw = data @dataclass class SpaceVariable: key: str value: str description: Optional[str] updated_at: Optional[datetime] def __init__(self, key: str, values: Dict) -> None: self.key = key self.value = values['value'] self.description = values.get('description') updated_at = values.get('updatedAt') self.updated_at = parse_datetime(updated_at) if updated_at is not None else None # File: huggingface_hub-main/src/huggingface_hub/_tensorboard_logger.py """""" from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Union from ._commit_scheduler import CommitScheduler from .errors import EntryNotFoundError from .repocard import ModelCard from .utils import experimental try: from tensorboardX import SummaryWriter is_summary_writer_available = True except ImportError: try: from torch.utils.tensorboard import SummaryWriter is_summary_writer_available = False except ImportError: SummaryWriter = object is_summary_writer_available = False if TYPE_CHECKING: from tensorboardX import SummaryWriter class HFSummaryWriter(SummaryWriter): @experimental def __new__(cls, *args, **kwargs) -> 'HFSummaryWriter': if not is_summary_writer_available: raise ImportError('You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade tensorboardX` first.') return super().__new__(cls) def __init__(self, repo_id: str, *, logdir: Optional[str]=None, commit_every: Union[int, float]=5, squash_history: bool=False, repo_type: Optional[str]=None, repo_revision: Optional[str]=None, repo_private: bool=False, path_in_repo: Optional[str]='tensorboard', repo_allow_patterns: Optional[Union[List[str], str]]='*.tfevents.*', repo_ignore_patterns: Optional[Union[List[str], str]]=None, token: Optional[str]=None, **kwargs): super().__init__(logdir=logdir, **kwargs) if not isinstance(self.logdir, str): raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.") if path_in_repo is None or path_in_repo == '': path_in_repo = Path(self.logdir).name else: path_in_repo = path_in_repo.strip('/') + '/' + Path(self.logdir).name self.scheduler = CommitScheduler(folder_path=self.logdir, path_in_repo=path_in_repo, repo_id=repo_id, repo_type=repo_type, revision=repo_revision, private=repo_private, token=token, allow_patterns=repo_allow_patterns, ignore_patterns=repo_ignore_patterns, every=commit_every, squash_history=squash_history) self.repo_id = self.scheduler.repo_id self.repo_type = self.scheduler.repo_type self.repo_revision = self.scheduler.revision try: card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type) except EntryNotFoundError: card = ModelCard('') tags = card.data.get('tags', []) if 'hf-summary-writer' not in tags: tags.append('hf-summary-writer') card.data['tags'] = tags card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type) def __exit__(self, exc_type, exc_val, exc_tb): super().__exit__(exc_type, exc_val, exc_tb) future = self.scheduler.trigger() future.result() # File: huggingface_hub-main/src/huggingface_hub/_upload_large_folder.py import enum import logging import os import queue import shutil import sys import threading import time import traceback from datetime import datetime from pathlib import Path from threading import Lock from typing import TYPE_CHECKING, List, Optional, Tuple, Union from . import constants from ._commit_api import CommitOperationAdd, UploadInfo, _fetch_upload_modes from ._local_folder import LocalUploadFileMetadata, LocalUploadFilePaths, get_local_upload_paths, read_upload_metadata from .constants import DEFAULT_REVISION, REPO_TYPES from .utils import DEFAULT_IGNORE_PATTERNS, filter_repo_objects, tqdm from .utils._cache_manager import _format_size from .utils.sha import sha_fileobj if TYPE_CHECKING: from .hf_api import HfApi logger = logging.getLogger(__name__) WAITING_TIME_IF_NO_TASKS = 10 def upload_large_folder_internal(api: 'HfApi', repo_id: str, folder_path: Union[str, Path], *, repo_type: str, revision: Optional[str]=None, private: bool=False, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, num_workers: Optional[int]=None, print_report: bool=True, print_report_every: int=60): if repo_type is None: raise ValueError('For large uploads, `repo_type` is explicitly required. Please set it to `model`, `dataset` or `space`. If you are using the CLI, pass it as `--repo-type=model`.') if repo_type not in REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {REPO_TYPES}') if revision is None: revision = DEFAULT_REVISION folder_path = Path(folder_path).expanduser().resolve() if not folder_path.is_dir(): raise ValueError(f"Provided path: '{folder_path}' is not a directory") if ignore_patterns is None: ignore_patterns = [] elif isinstance(ignore_patterns, str): ignore_patterns = [ignore_patterns] ignore_patterns += DEFAULT_IGNORE_PATTERNS if num_workers is None: nb_cores = os.cpu_count() or 1 num_workers = max(nb_cores - 2, 2) repo_url = api.create_repo(repo_id=repo_id, repo_type=repo_type, private=private, exist_ok=True) logger.info(f'Repo created: {repo_url}') repo_id = repo_url.repo_id filtered_paths_list = filter_repo_objects((path.relative_to(folder_path).as_posix() for path in folder_path.glob('**/*') if path.is_file()), allow_patterns=allow_patterns, ignore_patterns=ignore_patterns) paths_list = [get_local_upload_paths(folder_path, relpath) for relpath in filtered_paths_list] logger.info(f'Found {len(paths_list)} candidate files to upload') items = [(paths, read_upload_metadata(folder_path, paths.path_in_repo)) for paths in tqdm(paths_list, desc='Recovering from metadata files')] status = LargeUploadStatus(items) threads = [threading.Thread(target=_worker_job, kwargs={'status': status, 'api': api, 'repo_id': repo_id, 'repo_type': repo_type, 'revision': revision}) for _ in range(num_workers)] for thread in threads: thread.start() if print_report: print('\n\n' + status.current_report()) last_report_ts = time.time() while True: time.sleep(1) if time.time() - last_report_ts >= print_report_every: if print_report: _print_overwrite(status.current_report()) last_report_ts = time.time() if status.is_done(): logging.info('Is done: exiting main loop') break for thread in threads: thread.join() logger.info(status.current_report()) logging.info('Upload is complete!') class WorkerJob(enum.Enum): SHA256 = enum.auto() GET_UPLOAD_MODE = enum.auto() PREUPLOAD_LFS = enum.auto() COMMIT = enum.auto() WAIT = enum.auto() JOB_ITEM_T = Tuple[LocalUploadFilePaths, LocalUploadFileMetadata] class LargeUploadStatus: def __init__(self, items: List[JOB_ITEM_T]): self.items = items self.queue_sha256: 'queue.Queue[JOB_ITEM_T]' = queue.Queue() self.queue_get_upload_mode: 'queue.Queue[JOB_ITEM_T]' = queue.Queue() self.queue_preupload_lfs: 'queue.Queue[JOB_ITEM_T]' = queue.Queue() self.queue_commit: 'queue.Queue[JOB_ITEM_T]' = queue.Queue() self.lock = Lock() self.nb_workers_sha256: int = 0 self.nb_workers_get_upload_mode: int = 0 self.nb_workers_preupload_lfs: int = 0 self.nb_workers_commit: int = 0 self.nb_workers_waiting: int = 0 self.last_commit_attempt: Optional[float] = None self._started_at = datetime.now() for item in self.items: (paths, metadata) = item if metadata.sha256 is None: self.queue_sha256.put(item) elif metadata.upload_mode is None: self.queue_get_upload_mode.put(item) elif metadata.upload_mode == 'lfs' and (not metadata.is_uploaded): self.queue_preupload_lfs.put(item) elif not metadata.is_committed: self.queue_commit.put(item) else: logger.debug(f'Skipping file {paths.path_in_repo} (already uploaded and committed)') def current_report(self) -> str: nb_hashed = 0 size_hashed = 0 nb_preuploaded = 0 nb_lfs = 0 nb_lfs_unsure = 0 size_preuploaded = 0 nb_committed = 0 size_committed = 0 total_size = 0 ignored_files = 0 total_files = 0 with self.lock: for (_, metadata) in self.items: if metadata.should_ignore: ignored_files += 1 continue total_size += metadata.size total_files += 1 if metadata.sha256 is not None: nb_hashed += 1 size_hashed += metadata.size if metadata.upload_mode == 'lfs': nb_lfs += 1 if metadata.upload_mode is None: nb_lfs_unsure += 1 if metadata.is_uploaded: nb_preuploaded += 1 size_preuploaded += metadata.size if metadata.is_committed: nb_committed += 1 size_committed += metadata.size total_size_str = _format_size(total_size) now = datetime.now() now_str = now.strftime('%Y-%m-%d %H:%M:%S') elapsed = now - self._started_at elapsed_str = str(elapsed).split('.')[0] message = '\n' + '-' * 10 message += f' {now_str} ({elapsed_str}) ' message += '-' * 10 + '\n' message += 'Files: ' message += f'hashed {nb_hashed}/{total_files} ({_format_size(size_hashed)}/{total_size_str}) | ' message += f'pre-uploaded: {nb_preuploaded}/{nb_lfs} ({_format_size(size_preuploaded)}/{total_size_str})' if nb_lfs_unsure > 0: message += f' (+{nb_lfs_unsure} unsure)' message += f' | committed: {nb_committed}/{total_files} ({_format_size(size_committed)}/{total_size_str})' message += f' | ignored: {ignored_files}\n' message += 'Workers: ' message += f'hashing: {self.nb_workers_sha256} | ' message += f'get upload mode: {self.nb_workers_get_upload_mode} | ' message += f'pre-uploading: {self.nb_workers_preupload_lfs} | ' message += f'committing: {self.nb_workers_commit} | ' message += f'waiting: {self.nb_workers_waiting}\n' message += '-' * 51 return message def is_done(self) -> bool: with self.lock: return all((metadata.is_committed or metadata.should_ignore for (_, metadata) in self.items)) def _worker_job(status: LargeUploadStatus, api: 'HfApi', repo_id: str, repo_type: str, revision: str): while True: next_job: Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]] = None next_job = _determine_next_job(status) if next_job is None: return (job, items) = next_job if job == WorkerJob.SHA256: item = items[0] try: _compute_sha256(item) status.queue_get_upload_mode.put(item) except KeyboardInterrupt: raise except Exception as e: logger.error(f'Failed to compute sha256: {e}') traceback.format_exc() status.queue_sha256.put(item) with status.lock: status.nb_workers_sha256 -= 1 elif job == WorkerJob.GET_UPLOAD_MODE: try: _get_upload_mode(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision) except KeyboardInterrupt: raise except Exception as e: logger.error(f'Failed to get upload mode: {e}') traceback.format_exc() for item in items: (_, metadata) = item if metadata.should_ignore: continue if metadata.upload_mode == 'lfs': status.queue_preupload_lfs.put(item) elif metadata.upload_mode == 'regular': status.queue_commit.put(item) else: status.queue_get_upload_mode.put(item) with status.lock: status.nb_workers_get_upload_mode -= 1 elif job == WorkerJob.PREUPLOAD_LFS: item = items[0] try: _preupload_lfs(item, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision) status.queue_commit.put(item) except KeyboardInterrupt: raise except Exception as e: logger.error(f'Failed to preupload LFS: {e}') traceback.format_exc() status.queue_preupload_lfs.put(item) with status.lock: status.nb_workers_preupload_lfs -= 1 elif job == WorkerJob.COMMIT: try: _commit(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision) except KeyboardInterrupt: raise except Exception as e: logger.error(f'Failed to commit: {e}') traceback.format_exc() for item in items: status.queue_commit.put(item) with status.lock: status.last_commit_attempt = time.time() status.nb_workers_commit -= 1 elif job == WorkerJob.WAIT: time.sleep(WAITING_TIME_IF_NO_TASKS) with status.lock: status.nb_workers_waiting -= 1 def _determine_next_job(status: LargeUploadStatus) -> Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]]: with status.lock: if status.nb_workers_commit == 0 and status.queue_commit.qsize() > 0 and (status.last_commit_attempt is None or time.time() - status.last_commit_attempt > 5 * 60): status.nb_workers_commit += 1 logger.debug('Job: commit (more than 5 minutes since last commit attempt)') return (WorkerJob.COMMIT, _get_n(status.queue_commit, 25)) elif status.nb_workers_commit == 0 and status.queue_commit.qsize() >= 25: status.nb_workers_commit += 1 logger.debug('Job: commit (>25 files ready)') return (WorkerJob.COMMIT, _get_n(status.queue_commit, 25)) elif status.queue_get_upload_mode.qsize() >= 10: status.nb_workers_get_upload_mode += 1 logger.debug('Job: get upload mode (>10 files ready)') return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, 50)) elif status.queue_preupload_lfs.qsize() > 0 and status.nb_workers_preupload_lfs == 0: status.nb_workers_preupload_lfs += 1 logger.debug('Job: preupload LFS (no other worker preuploading LFS)') return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs)) elif status.queue_sha256.qsize() > 0 and status.nb_workers_sha256 == 0: status.nb_workers_sha256 += 1 logger.debug('Job: sha256 (no other worker computing sha256)') return (WorkerJob.SHA256, _get_one(status.queue_sha256)) elif status.queue_get_upload_mode.qsize() > 0 and status.nb_workers_get_upload_mode == 0: status.nb_workers_get_upload_mode += 1 logger.debug('Job: get upload mode (no other worker getting upload mode)') return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, 50)) elif status.queue_preupload_lfs.qsize() > 0 and (status.nb_workers_preupload_lfs == 0 or not constants.HF_HUB_ENABLE_HF_TRANSFER): status.nb_workers_preupload_lfs += 1 logger.debug('Job: preupload LFS') return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs)) elif status.queue_sha256.qsize() > 0: status.nb_workers_sha256 += 1 logger.debug('Job: sha256') return (WorkerJob.SHA256, _get_one(status.queue_sha256)) elif status.queue_get_upload_mode.qsize() > 0: status.nb_workers_get_upload_mode += 1 logger.debug('Job: get upload mode') return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, 50)) elif status.nb_workers_commit == 0 and status.queue_commit.qsize() > 0: status.nb_workers_commit += 1 logger.debug('Job: commit') return (WorkerJob.COMMIT, _get_n(status.queue_commit, 25)) elif all((metadata.is_committed or metadata.should_ignore for (_, metadata) in status.items)): logger.info('All files have been processed! Exiting worker.') return None else: status.nb_workers_waiting += 1 logger.debug(f'No task available, waiting... ({WAITING_TIME_IF_NO_TASKS}s)') return (WorkerJob.WAIT, []) def _compute_sha256(item: JOB_ITEM_T) -> None: (paths, metadata) = item if metadata.sha256 is None: with paths.file_path.open('rb') as f: metadata.sha256 = sha_fileobj(f).hex() metadata.save(paths) def _get_upload_mode(items: List[JOB_ITEM_T], api: 'HfApi', repo_id: str, repo_type: str, revision: str) -> None: additions = [_build_hacky_operation(item) for item in items] _fetch_upload_modes(additions=additions, repo_type=repo_type, repo_id=repo_id, headers=api._build_hf_headers(), revision=revision) for (item, addition) in zip(items, additions): (paths, metadata) = item metadata.upload_mode = addition._upload_mode metadata.should_ignore = addition._should_ignore metadata.save(paths) def _preupload_lfs(item: JOB_ITEM_T, api: 'HfApi', repo_id: str, repo_type: str, revision: str) -> None: (paths, metadata) = item addition = _build_hacky_operation(item) api.preupload_lfs_files(repo_id=repo_id, repo_type=repo_type, revision=revision, additions=[addition]) metadata.is_uploaded = True metadata.save(paths) def _commit(items: List[JOB_ITEM_T], api: 'HfApi', repo_id: str, repo_type: str, revision: str) -> None: additions = [_build_hacky_operation(item) for item in items] api.create_commit(repo_id=repo_id, repo_type=repo_type, revision=revision, operations=additions, commit_message='Add files using upload-large-folder tool') for (paths, metadata) in items: metadata.is_committed = True metadata.save(paths) class HackyCommitOperationAdd(CommitOperationAdd): def __post_init__(self) -> None: if isinstance(self.path_or_fileobj, Path): self.path_or_fileobj = str(self.path_or_fileobj) def _build_hacky_operation(item: JOB_ITEM_T) -> HackyCommitOperationAdd: (paths, metadata) = item operation = HackyCommitOperationAdd(path_in_repo=paths.path_in_repo, path_or_fileobj=paths.file_path) with paths.file_path.open('rb') as file: sample = file.peek(512)[:512] if metadata.sha256 is None: raise ValueError('sha256 must have been computed by now!') operation.upload_info = UploadInfo(sha256=bytes.fromhex(metadata.sha256), size=metadata.size, sample=sample) return operation def _get_one(queue: 'queue.Queue[JOB_ITEM_T]') -> List[JOB_ITEM_T]: return [queue.get()] def _get_n(queue: 'queue.Queue[JOB_ITEM_T]', n: int) -> List[JOB_ITEM_T]: return [queue.get() for _ in range(min(queue.qsize(), n))] def _print_overwrite(report: str) -> None: report += '\n' terminal_width = shutil.get_terminal_size().columns nb_lines = sum((len(line) // terminal_width + 1 for line in report.splitlines())) for _ in range(nb_lines): sys.stdout.write('\r\x1b[K') sys.stdout.write('\x1b[F') sys.stdout.write(report) sys.stdout.write(' ' * (terminal_width - len(report.splitlines()[-1]))) sys.stdout.flush() # File: huggingface_hub-main/src/huggingface_hub/_webhooks_payload.py """""" from typing import List, Literal, Optional from .utils import is_pydantic_available if is_pydantic_available(): from pydantic import BaseModel else: class BaseModel: def __init__(self, *args, **kwargs) -> None: raise ImportError('You must have `pydantic` installed to use `WebhookPayload`. This is an optional dependency that should be installed separately. Please run `pip install --upgrade pydantic` and retry.') WebhookEvent_T = Literal['create', 'delete', 'move', 'update'] RepoChangeEvent_T = Literal['add', 'move', 'remove', 'update'] RepoType_T = Literal['dataset', 'model', 'space'] DiscussionStatus_T = Literal['closed', 'draft', 'open', 'merged'] SupportedWebhookVersion = Literal[3] class ObjectId(BaseModel): id: str class WebhookPayloadUrl(BaseModel): web: str api: Optional[str] = None class WebhookPayloadMovedTo(BaseModel): name: str owner: ObjectId class WebhookPayloadWebhook(ObjectId): version: SupportedWebhookVersion class WebhookPayloadEvent(BaseModel): action: WebhookEvent_T scope: str class WebhookPayloadDiscussionChanges(BaseModel): base: str mergeCommitId: Optional[str] = None class WebhookPayloadComment(ObjectId): author: ObjectId hidden: bool content: Optional[str] = None url: WebhookPayloadUrl class WebhookPayloadDiscussion(ObjectId): num: int author: ObjectId url: WebhookPayloadUrl title: str isPullRequest: bool status: DiscussionStatus_T changes: Optional[WebhookPayloadDiscussionChanges] = None pinned: Optional[bool] = None class WebhookPayloadRepo(ObjectId): owner: ObjectId head_sha: Optional[str] = None name: str private: bool subdomain: Optional[str] = None tags: Optional[List[str]] = None type: Literal['dataset', 'model', 'space'] url: WebhookPayloadUrl class WebhookPayloadUpdatedRef(BaseModel): ref: str oldSha: Optional[str] = None newSha: Optional[str] = None class WebhookPayload(BaseModel): event: WebhookPayloadEvent repo: WebhookPayloadRepo discussion: Optional[WebhookPayloadDiscussion] = None comment: Optional[WebhookPayloadComment] = None webhook: WebhookPayloadWebhook movedTo: Optional[WebhookPayloadMovedTo] = None updatedRefs: Optional[List[WebhookPayloadUpdatedRef]] = None # File: huggingface_hub-main/src/huggingface_hub/_webhooks_server.py """""" import atexit import inspect import os from functools import wraps from typing import TYPE_CHECKING, Any, Callable, Dict, Optional from .utils import experimental, is_fastapi_available, is_gradio_available if TYPE_CHECKING: import gradio as gr from fastapi import Request if is_fastapi_available(): from fastapi import FastAPI, Request from fastapi.responses import JSONResponse else: FastAPI = Request = JSONResponse = None _global_app: Optional['WebhooksServer'] = None _is_local = os.environ.get('SPACE_ID') is None @experimental class WebhooksServer: def __new__(cls, *args, **kwargs) -> 'WebhooksServer': if not is_gradio_available(): raise ImportError('You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio` first.') if not is_fastapi_available(): raise ImportError('You must have `fastapi` installed to use `WebhooksServer`. Please run `pip install --upgrade fastapi` first.') return super().__new__(cls) def __init__(self, ui: Optional['gr.Blocks']=None, webhook_secret: Optional[str]=None) -> None: self._ui = ui self.webhook_secret = webhook_secret or os.getenv('WEBHOOK_SECRET') self.registered_webhooks: Dict[str, Callable] = {} _warn_on_empty_secret(self.webhook_secret) def add_webhook(self, path: Optional[str]=None) -> Callable: if callable(path): return self.add_webhook()(path) @wraps(FastAPI.post) def _inner_post(*args, **kwargs): func = args[0] abs_path = f"/webhooks/{(path or func.__name__).strip('/')}" if abs_path in self.registered_webhooks: raise ValueError(f'Webhook {abs_path} already exists.') self.registered_webhooks[abs_path] = func return _inner_post def launch(self, prevent_thread_lock: bool=False, **launch_kwargs: Any) -> None: ui = self._ui or self._get_default_ui() launch_kwargs.setdefault('share', _is_local) (self.fastapi_app, _, _) = ui.launch(prevent_thread_lock=True, **launch_kwargs) for (path, func) in self.registered_webhooks.items(): if self.webhook_secret is not None: func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret) self.fastapi_app.post(path)(func) space_host = os.environ.get('SPACE_HOST') url = 'https://' + space_host if space_host is not None else ui.share_url or ui.local_url url = url.strip('/') message = '\nWebhooks are correctly setup and ready to use:' message += '\n' + '\n'.join((f' - POST {url}{webhook}' for webhook in self.registered_webhooks)) message += '\nGo to https://huggingface.co/settings/webhooks to setup your webhooks.' print(message) if not prevent_thread_lock: ui.block_thread() def _get_default_ui(self) -> 'gr.Blocks': import gradio as gr with gr.Blocks() as ui: gr.Markdown('# This is an app to process 🤗 Webhooks') gr.Markdown('Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on specific repos or to all repos belonging to particular set of users/organizations (not just your repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to know more about webhooks on the Huggingface Hub.') gr.Markdown(f'{len(self.registered_webhooks)} webhook(s) are registered:' + '\n\n' + '\n '.join((f'- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})' for (webhook_path, webhook) in self.registered_webhooks.items()))) gr.Markdown('Go to https://huggingface.co/settings/webhooks to setup your webhooks.' + '\nYou app is running locally. Please look at the logs to check the full URL you need to set.' if _is_local else "\nThis app is running on a Space. You can find the corresponding URL in the options menu (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'.") return ui @experimental def webhook_endpoint(path: Optional[str]=None) -> Callable: if callable(path): return webhook_endpoint()(path) @wraps(WebhooksServer.add_webhook) def _inner(func: Callable) -> Callable: app = _get_global_app() app.add_webhook(path)(func) if len(app.registered_webhooks) == 1: atexit.register(app.launch) @wraps(app.launch) def _launch_now(): atexit.unregister(app.launch) app.launch() func.launch = _launch_now return func return _inner def _get_global_app() -> WebhooksServer: global _global_app if _global_app is None: _global_app = WebhooksServer() return _global_app def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None: if webhook_secret is None: print('Webhook secret is not defined. This means your webhook endpoints will be open to everyone.') print("To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: \n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`") print('For more details about webhook secrets, please refer to https://huggingface.co/docs/hub/webhooks#webhook-secret.') else: print('Webhook secret is correctly defined.') def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str: return '/docs#/default/' + webhook_name + webhook_path.replace('/', '_') + '_post' def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable: initial_sig = inspect.signature(func) @wraps(func) async def _protected_func(request: Request, **kwargs): request_secret = request.headers.get('x-webhook-secret') if request_secret is None: return JSONResponse({'error': 'x-webhook-secret header not set.'}, status_code=401) if request_secret != webhook_secret: return JSONResponse({'error': 'Invalid webhook secret.'}, status_code=403) if 'request' in initial_sig.parameters: kwargs['request'] = request if inspect.iscoroutinefunction(func): return await func(**kwargs) else: return func(**kwargs) if 'request' not in initial_sig.parameters: _protected_func.__signature__ = initial_sig.replace(parameters=(inspect.Parameter(name='request', kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request),) + tuple(initial_sig.parameters.values())) return _protected_func # File: huggingface_hub-main/src/huggingface_hub/commands/__init__.py from abc import ABC, abstractmethod from argparse import _SubParsersAction class BaseHuggingfaceCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: _SubParsersAction): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError() # File: huggingface_hub-main/src/huggingface_hub/commands/_cli_utils.py """""" import os from typing import List, Union class ANSI: _bold = '\x1b[1m' _gray = '\x1b[90m' _red = '\x1b[31m' _reset = '\x1b[0m' _yellow = '\x1b[33m' @classmethod def bold(cls, s: str) -> str: return cls._format(s, cls._bold) @classmethod def gray(cls, s: str) -> str: return cls._format(s, cls._gray) @classmethod def red(cls, s: str) -> str: return cls._format(s, cls._bold + cls._red) @classmethod def yellow(cls, s: str) -> str: return cls._format(s, cls._yellow) @classmethod def _format(cls, s: str, code: str) -> str: if os.environ.get('NO_COLOR'): return s return f'{code}{s}{cls._reset}' def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: col_widths = [max((len(str(x)) for x in col)) for col in zip(*rows, headers)] row_format = ('{{:{}}} ' * len(headers)).format(*col_widths) lines = [] lines.append(row_format.format(*headers)) lines.append(row_format.format(*['-' * w for w in col_widths])) for row in rows: lines.append(row_format.format(*row)) return '\n'.join(lines) # File: huggingface_hub-main/src/huggingface_hub/commands/delete_cache.py """""" import os from argparse import Namespace, _SubParsersAction from functools import wraps from tempfile import mkstemp from typing import Any, Callable, Iterable, List, Optional, Union from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir from . import BaseHuggingfaceCLICommand from ._cli_utils import ANSI try: from InquirerPy import inquirer from InquirerPy.base.control import Choice from InquirerPy.separator import Separator _inquirer_py_available = True except ImportError: _inquirer_py_available = False def require_inquirer_py(fn: Callable) -> Callable: @wraps(fn) def _inner(*args, **kwargs): if not _inquirer_py_available: raise ImportError('The `delete-cache` command requires extra dependencies to work with the TUI.\nPlease run `pip install huggingface_hub[cli]` to install them.\nOtherwise, disable TUI using the `--disable-tui` flag.') return fn(*args, **kwargs) return _inner _CANCEL_DELETION_STR = 'CANCEL_DELETION' class DeleteCacheCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): delete_cache_parser = parser.add_parser('delete-cache', help='Delete revisions from the cache directory.') delete_cache_parser.add_argument('--dir', type=str, default=None, help='cache directory (optional). Default to the default HuggingFace cache.') delete_cache_parser.add_argument('--disable-tui', action='store_true', help="Disable Terminal User Interface (TUI) mode. Useful if your platform/terminal doesn't support the multiselect menu.") delete_cache_parser.set_defaults(func=DeleteCacheCommand) def __init__(self, args: Namespace) -> None: self.cache_dir: Optional[str] = args.dir self.disable_tui: bool = args.disable_tui def run(self): hf_cache_info = scan_cache_dir(self.cache_dir) if self.disable_tui: selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[]) else: selected_hashes = _manual_review_tui(hf_cache_info, preselected=[]) if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes: confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + ' Confirm deletion ?' if self.disable_tui: confirmed = _ask_for_confirmation_no_tui(confirm_message) else: confirmed = _ask_for_confirmation_tui(confirm_message) if confirmed: strategy = hf_cache_info.delete_revisions(*selected_hashes) print('Start deletion.') strategy.execute() print(f'Done. Deleted {len(strategy.repos)} repo(s) and {len(strategy.snapshots)} revision(s) for a total of {strategy.expected_freed_size_str}.') return print('Deletion is cancelled. Do nothing.') @require_inquirer_py def _manual_review_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]: choices = _get_tui_choices_from_scan(repos=hf_cache_info.repos, preselected=preselected) checkbox = inquirer.checkbox(message='Select revisions to delete:', choices=choices, cycle=False, height=100, instruction=_get_expectations_str(hf_cache_info, selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled]), long_instruction='Press to select, to validate and to quit without modification.', transformer=lambda result: f'{len(result)} revision(s) selected.') def _update_expectations(_) -> None: checkbox._instruction = _get_expectations_str(hf_cache_info, selected_hashes=[choice['value'] for choice in checkbox.content_control.choices if choice['enabled']]) checkbox.kb_func_lookup['toggle'].append({'func': _update_expectations}) try: return checkbox.execute() except KeyboardInterrupt: return [] @require_inquirer_py def _ask_for_confirmation_tui(message: str, default: bool=True) -> bool: return inquirer.confirm(message, default=default).execute() def _get_tui_choices_from_scan(repos: Iterable[CachedRepoInfo], preselected: List[str]) -> List: choices: List[Union[Choice, Separator]] = [] choices.append(Choice(_CANCEL_DELETION_STR, name='None of the following (if selected, nothing will be deleted).', enabled=False)) for repo in sorted(repos, key=_repo_sorting_order): choices.append(Separator(f'\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}, used {repo.last_accessed_str})')) for revision in sorted(repo.revisions, key=_revision_sorting_order): choices.append(Choice(revision.commit_hash, name=f"{revision.commit_hash[:8]}: {', '.join(sorted(revision.refs)) or '(detached)'} # modified {revision.last_modified_str}", enabled=revision.commit_hash in preselected)) return choices def _manual_review_no_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]: (fd, tmp_path) = mkstemp(suffix='.txt') os.close(fd) lines = [] for repo in sorted(hf_cache_info.repos, key=_repo_sorting_order): lines.append(f'\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}, used {repo.last_accessed_str})') for revision in sorted(repo.revisions, key=_revision_sorting_order): lines.append(f"{('' if revision.commit_hash in preselected else '#')} {revision.commit_hash} # Refs: {', '.join(sorted(revision.refs)) or '(detached)'} # modified {revision.last_modified_str}") with open(tmp_path, 'w') as f: f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS) f.write('\n'.join(lines)) instructions = f'\n TUI is disabled. In order to select which revisions you want to delete, please edit\n the following file using the text editor of your choice. Instructions for manual\n editing are located at the beginning of the file. Edit the file, save it and confirm\n to continue.\n File to edit: {ANSI.bold(tmp_path)}\n ' print('\n'.join((line.strip() for line in instructions.strip().split('\n')))) while True: selected_hashes = _read_manual_review_tmp_file(tmp_path) if _ask_for_confirmation_no_tui(_get_expectations_str(hf_cache_info, selected_hashes) + ' Continue ?', default=False): break os.remove(tmp_path) return selected_hashes def _ask_for_confirmation_no_tui(message: str, default: bool=True) -> bool: YES = ('y', 'yes', '1') NO = ('n', 'no', '0') DEFAULT = '' ALL = YES + NO + (DEFAULT,) full_message = message + (' (Y/n) ' if default else ' (y/N) ') while True: answer = input(full_message).lower() if answer == DEFAULT: return default if answer in YES: return True if answer in NO: return False print(f'Invalid input. Must be one of {ALL}') def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str: if _CANCEL_DELETION_STR in selected_hashes: return 'Nothing will be deleted.' strategy = hf_cache_info.delete_revisions(*selected_hashes) return f'{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}.' def _read_manual_review_tmp_file(tmp_path: str) -> List[str]: with open(tmp_path) as f: content = f.read() lines = [line.strip() for line in content.split('\n')] selected_lines = [line for line in lines if not line.startswith('#')] selected_hashes = [line.split('#')[0].strip() for line in selected_lines] return [hash for hash in selected_hashes if len(hash) > 0] _MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f"\n# INSTRUCTIONS\n# ------------\n# This is a temporary file created by running `huggingface-cli delete-cache` with the\n# `--disable-tui` option. It contains a set of revisions that can be deleted from your\n# local cache directory.\n#\n# Please manually review the revisions you want to delete:\n# - Revision hashes can be commented out with '#'.\n# - Only non-commented revisions in this file will be deleted.\n# - Revision hashes that are removed from this file are ignored as well.\n# - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and\n# no changes will be applied.\n#\n# Once you've manually reviewed this file, please confirm deletion in the terminal. This\n# file will be automatically removed once done.\n# ------------\n\n# KILL SWITCH\n# ------------\n# Un-comment following line to completely cancel the deletion process\n# {_CANCEL_DELETION_STR}\n# ------------\n\n# REVISIONS\n# ------------\n".strip() def _repo_sorting_order(repo: CachedRepoInfo) -> Any: return (repo.repo_type, repo.last_accessed) def _revision_sorting_order(revision: CachedRevisionInfo) -> Any: return revision.last_modified # File: huggingface_hub-main/src/huggingface_hub/commands/download.py """""" import warnings from argparse import Namespace, _SubParsersAction from typing import List, Optional from huggingface_hub import logging from huggingface_hub._snapshot_download import snapshot_download from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.file_download import hf_hub_download from huggingface_hub.utils import disable_progress_bars, enable_progress_bars logger = logging.get_logger(__name__) class DownloadCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): download_parser = parser.add_parser('download', help='Download files from the Hub') download_parser.add_argument('repo_id', type=str, help='ID of the repo to download from (e.g. `username/repo-name`).') download_parser.add_argument('filenames', type=str, nargs='*', help='Files to download (e.g. `config.json`, `data/metadata.jsonl`).') download_parser.add_argument('--repo-type', choices=['model', 'dataset', 'space'], default='model', help="Type of repo to download from (defaults to 'model').") download_parser.add_argument('--revision', type=str, help='An optional Git revision id which can be a branch name, a tag, or a commit hash.') download_parser.add_argument('--include', nargs='*', type=str, help='Glob patterns to match files to download.') download_parser.add_argument('--exclude', nargs='*', type=str, help='Glob patterns to exclude from files to download.') download_parser.add_argument('--cache-dir', type=str, help='Path to the directory where to save the downloaded files.') download_parser.add_argument('--local-dir', type=str, help='If set, the downloaded file will be placed under this directory. Check out https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more details.') download_parser.add_argument('--local-dir-use-symlinks', choices=['auto', 'True', 'False'], help='Deprecated and ignored. Downloading to a local directory does not use symlinks anymore.') download_parser.add_argument('--force-download', action='store_true', help='If True, the files will be downloaded even if they are already cached.') download_parser.add_argument('--resume-download', action='store_true', help='Deprecated and ignored. Downloading a file to local dir always attempts to resume previously interrupted downloads (unless hf-transfer is enabled).') download_parser.add_argument('--token', type=str, help='A User Access Token generated from https://huggingface.co/settings/tokens') download_parser.add_argument('--quiet', action='store_true', help='If True, progress bars are disabled and only the path to the download files is printed.') download_parser.add_argument('--max-workers', type=int, default=8, help='Maximum number of workers to use for downloading files. Default is 8.') download_parser.set_defaults(func=DownloadCommand) def __init__(self, args: Namespace) -> None: self.token = args.token self.repo_id: str = args.repo_id self.filenames: List[str] = args.filenames self.repo_type: str = args.repo_type self.revision: Optional[str] = args.revision self.include: Optional[List[str]] = args.include self.exclude: Optional[List[str]] = args.exclude self.cache_dir: Optional[str] = args.cache_dir self.local_dir: Optional[str] = args.local_dir self.force_download: bool = args.force_download self.resume_download: Optional[bool] = args.resume_download or None self.quiet: bool = args.quiet self.max_workers: int = args.max_workers if args.local_dir_use_symlinks is not None: warnings.warn('Ignoring --local-dir-use-symlinks. Downloading to a local directory does not use symlinks anymore.', FutureWarning) def run(self) -> None: if self.quiet: disable_progress_bars() with warnings.catch_warnings(): warnings.simplefilter('ignore') print(self._download()) enable_progress_bars() else: logging.set_verbosity_info() print(self._download()) logging.set_verbosity_warning() def _download(self) -> str: if len(self.filenames) > 0: if self.include is not None and len(self.include) > 0: warnings.warn('Ignoring `--include` since filenames have being explicitly set.') if self.exclude is not None and len(self.exclude) > 0: warnings.warn('Ignoring `--exclude` since filenames have being explicitly set.') if len(self.filenames) == 1: return hf_hub_download(repo_id=self.repo_id, repo_type=self.repo_type, revision=self.revision, filename=self.filenames[0], cache_dir=self.cache_dir, resume_download=self.resume_download, force_download=self.force_download, token=self.token, local_dir=self.local_dir, library_name='huggingface-cli') elif len(self.filenames) == 0: allow_patterns = self.include ignore_patterns = self.exclude else: allow_patterns = self.filenames ignore_patterns = None return snapshot_download(repo_id=self.repo_id, repo_type=self.repo_type, revision=self.revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, resume_download=self.resume_download, force_download=self.force_download, cache_dir=self.cache_dir, token=self.token, local_dir=self.local_dir, library_name='huggingface-cli', max_workers=self.max_workers) # File: huggingface_hub-main/src/huggingface_hub/commands/env.py """""" from argparse import _SubParsersAction from ..utils import dump_environment_info from . import BaseHuggingfaceCLICommand class EnvironmentCommand(BaseHuggingfaceCLICommand): def __init__(self, args): self.args = args @staticmethod def register_subcommand(parser: _SubParsersAction): env_parser = parser.add_parser('env', help='Print information about the environment.') env_parser.set_defaults(func=EnvironmentCommand) def run(self) -> None: dump_environment_info() # File: huggingface_hub-main/src/huggingface_hub/commands/huggingface_cli.py from argparse import ArgumentParser from huggingface_hub.commands.delete_cache import DeleteCacheCommand from huggingface_hub.commands.download import DownloadCommand from huggingface_hub.commands.env import EnvironmentCommand from huggingface_hub.commands.lfs import LfsCommands from huggingface_hub.commands.repo_files import RepoFilesCommand from huggingface_hub.commands.scan_cache import ScanCacheCommand from huggingface_hub.commands.tag import TagCommands from huggingface_hub.commands.upload import UploadCommand from huggingface_hub.commands.upload_large_folder import UploadLargeFolderCommand from huggingface_hub.commands.user import UserCommands from huggingface_hub.commands.version import VersionCommand def main(): parser = ArgumentParser('huggingface-cli', usage='huggingface-cli []') commands_parser = parser.add_subparsers(help='huggingface-cli command helpers') DownloadCommand.register_subcommand(commands_parser) UploadCommand.register_subcommand(commands_parser) RepoFilesCommand.register_subcommand(commands_parser) EnvironmentCommand.register_subcommand(commands_parser) UserCommands.register_subcommand(commands_parser) LfsCommands.register_subcommand(commands_parser) ScanCacheCommand.register_subcommand(commands_parser) DeleteCacheCommand.register_subcommand(commands_parser) TagCommands.register_subcommand(commands_parser) VersionCommand.register_subcommand(commands_parser) UploadLargeFolderCommand.register_subcommand(commands_parser) args = parser.parse_args() if not hasattr(args, 'func'): parser.print_help() exit(1) service = args.func(args) service.run() if __name__ == '__main__': main() # File: huggingface_hub-main/src/huggingface_hub/commands/lfs.py """""" import json import os import subprocess import sys from argparse import _SubParsersAction from typing import Dict, List, Optional from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj from ..utils import get_session, hf_raise_for_status, logging logger = logging.get_logger(__name__) class LfsCommands(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): enable_parser = parser.add_parser('lfs-enable-largefiles', help='Configure your repository to enable upload of files > 5GB.') enable_parser.add_argument('path', type=str, help='Local path to repository you want to configure.') enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False) upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) class LfsEnableCommand: def __init__(self, args): self.args = args def run(self): local_path = os.path.abspath(self.args.path) if not os.path.isdir(local_path): print('This does not look like a valid git repo.') exit(1) subprocess.run('git config lfs.customtransfer.multipart.path huggingface-cli'.split(), check=True, cwd=local_path) subprocess.run(f'git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}'.split(), check=True, cwd=local_path) print('Local repo set up for largefiles') def write_msg(msg: Dict): msg_str = json.dumps(msg) + '\n' sys.stdout.write(msg_str) sys.stdout.flush() def read_msg() -> Optional[Dict]: msg = json.loads(sys.stdin.readline().strip()) if 'terminate' in (msg.get('type'), msg.get('event')): return None if msg.get('event') not in ('download', 'upload'): logger.critical('Received unexpected message') sys.exit(1) return msg class LfsUploadCommand: def __init__(self, args) -> None: self.args = args def run(self) -> None: init_msg = json.loads(sys.stdin.readline().strip()) if not (init_msg.get('event') == 'init' and init_msg.get('operation') == 'upload'): write_msg({'error': {'code': 32, 'message': 'Wrong lfs init operation'}}) sys.exit(1) write_msg({}) while True: msg = read_msg() if msg is None: sys.exit(0) oid = msg['oid'] filepath = msg['path'] completion_url = msg['action']['href'] header = msg['action']['header'] chunk_size = int(header.pop('chunk_size')) presigned_urls: List[str] = list(header.values()) write_msg({'event': 'progress', 'oid': oid, 'bytesSoFar': 1, 'bytesSinceLast': 0}) parts = [] with open(filepath, 'rb') as file: for (i, presigned_url) in enumerate(presigned_urls): with SliceFileObj(file, seek_from=i * chunk_size, read_limit=chunk_size) as data: r = get_session().put(presigned_url, data=data) hf_raise_for_status(r) parts.append({'etag': r.headers.get('etag'), 'partNumber': i + 1}) write_msg({'event': 'progress', 'oid': oid, 'bytesSoFar': (i + 1) * chunk_size, 'bytesSinceLast': chunk_size}) r = get_session().post(completion_url, json={'oid': oid, 'parts': parts}) hf_raise_for_status(r) write_msg({'event': 'complete', 'oid': oid}) # File: huggingface_hub-main/src/huggingface_hub/commands/repo_files.py """""" from argparse import _SubParsersAction from typing import List, Optional from huggingface_hub import logging from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.hf_api import HfApi logger = logging.get_logger(__name__) class DeleteFilesSubCommand: def __init__(self, args) -> None: self.args = args self.repo_id: str = args.repo_id self.repo_type: Optional[str] = args.repo_type self.revision: Optional[str] = args.revision self.api: HfApi = HfApi(token=args.token, library_name='huggingface-cli') self.patterns: List[str] = args.patterns self.commit_message: Optional[str] = args.commit_message self.commit_description: Optional[str] = args.commit_description self.create_pr: bool = args.create_pr self.token: Optional[str] = args.token def run(self) -> None: logging.set_verbosity_info() url = self.api.delete_files(delete_patterns=self.patterns, repo_id=self.repo_id, repo_type=self.repo_type, revision=self.revision, commit_message=self.commit_message, commit_description=self.commit_description, create_pr=self.create_pr) print(f'Files correctly deleted from repo. Commit: {url}.') logging.set_verbosity_warning() class RepoFilesCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): repo_files_parser = parser.add_parser('repo-files', help='Manage files in a repo on the Hub') repo_files_parser.add_argument('repo_id', type=str, help='The ID of the repo to manage (e.g. `username/repo-name`).') repo_files_subparsers = repo_files_parser.add_subparsers(help='Action to execute against the files.', required=True) delete_subparser = repo_files_subparsers.add_parser('delete', help='Delete files from a repo on the Hub') delete_subparser.set_defaults(func=lambda args: DeleteFilesSubCommand(args)) delete_subparser.add_argument('patterns', nargs='+', type=str, help='Glob patterns to match files to delete.') delete_subparser.add_argument('--repo-type', choices=['model', 'dataset', 'space'], default='model', help='Type of the repo to upload to (e.g. `dataset`).') delete_subparser.add_argument('--revision', type=str, help='An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not exist and `--create-pr` is not set, a branch will be automatically created.') delete_subparser.add_argument('--commit-message', type=str, help='The summary / title / first line of the generated commit.') delete_subparser.add_argument('--commit-description', type=str, help='The description of the generated commit.') delete_subparser.add_argument('--create-pr', action='store_true', help='Whether to create a new Pull Request for these changes.') repo_files_parser.add_argument('--token', type=str, help='A User Access Token generated from https://huggingface.co/settings/tokens') repo_files_parser.set_defaults(func=RepoFilesCommand) # File: huggingface_hub-main/src/huggingface_hub/commands/scan_cache.py """""" import time from argparse import Namespace, _SubParsersAction from typing import Optional from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir from . import BaseHuggingfaceCLICommand from ._cli_utils import ANSI, tabulate class ScanCacheCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): scan_cache_parser = parser.add_parser('scan-cache', help='Scan cache directory.') scan_cache_parser.add_argument('--dir', type=str, default=None, help='cache directory to scan (optional). Default to the default HuggingFace cache.') scan_cache_parser.add_argument('-v', '--verbose', action='count', default=0, help='show a more verbose output') scan_cache_parser.set_defaults(func=ScanCacheCommand) def __init__(self, args: Namespace) -> None: self.verbosity: int = args.verbose self.cache_dir: Optional[str] = args.dir def run(self): try: t0 = time.time() hf_cache_info = scan_cache_dir(self.cache_dir) t1 = time.time() except CacheNotFound as exc: cache_dir = exc.cache_dir print(f'Cache directory not found: {cache_dir}') return self._print_hf_cache_info_as_table(hf_cache_info) print(f'\nDone in {round(t1 - t0, 1)}s. Scanned {len(hf_cache_info.repos)} repo(s) for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}.') if len(hf_cache_info.warnings) > 0: message = f'Got {len(hf_cache_info.warnings)} warning(s) while scanning.' if self.verbosity >= 3: print(ANSI.gray(message)) for warning in hf_cache_info.warnings: print(ANSI.gray(warning)) else: print(ANSI.gray(message + ' Use -vvv to print details.')) def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None: print(get_table(hf_cache_info, verbosity=self.verbosity)) def get_table(hf_cache_info: HFCacheInfo, *, verbosity: int=0) -> str: if verbosity == 0: return tabulate(rows=[[repo.repo_id, repo.repo_type, '{:>12}'.format(repo.size_on_disk_str), repo.nb_files, repo.last_accessed_str, repo.last_modified_str, ', '.join(sorted(repo.refs)), str(repo.repo_path)] for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path)], headers=['REPO ID', 'REPO TYPE', 'SIZE ON DISK', 'NB FILES', 'LAST_ACCESSED', 'LAST_MODIFIED', 'REFS', 'LOCAL PATH']) else: return tabulate(rows=[[repo.repo_id, repo.repo_type, revision.commit_hash, '{:>12}'.format(revision.size_on_disk_str), revision.nb_files, revision.last_modified_str, ', '.join(sorted(revision.refs)), str(revision.snapshot_path)] for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash)], headers=['REPO ID', 'REPO TYPE', 'REVISION', 'SIZE ON DISK', 'NB FILES', 'LAST_MODIFIED', 'REFS', 'LOCAL PATH']) # File: huggingface_hub-main/src/huggingface_hub/commands/tag.py """""" from argparse import Namespace, _SubParsersAction from requests.exceptions import HTTPError from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.constants import REPO_TYPES from huggingface_hub.hf_api import HfApi from ..errors import HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError from ._cli_utils import ANSI class TagCommands(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): tag_parser = parser.add_parser('tag', help='(create, list, delete) tags for a repo in the hub') tag_parser.add_argument('repo_id', type=str, help='The ID of the repo to tag (e.g. `username/repo-name`).') tag_parser.add_argument('tag', nargs='?', type=str, help='The name of the tag for creation or deletion.') tag_parser.add_argument('-m', '--message', type=str, help='The description of the tag to create.') tag_parser.add_argument('--revision', type=str, help='The git revision to tag.') tag_parser.add_argument('--token', type=str, help='A User Access Token generated from https://huggingface.co/settings/tokens.') tag_parser.add_argument('--repo-type', choices=['model', 'dataset', 'space'], default='model', help='Set the type of repository (model, dataset, or space).') tag_parser.add_argument('-y', '--yes', action='store_true', help='Answer Yes to prompts automatically.') tag_parser.add_argument('-l', '--list', action='store_true', help='List tags for a repository.') tag_parser.add_argument('-d', '--delete', action='store_true', help='Delete a tag for a repository.') tag_parser.set_defaults(func=lambda args: handle_commands(args)) def handle_commands(args: Namespace): if args.list: return TagListCommand(args) elif args.delete: return TagDeleteCommand(args) else: return TagCreateCommand(args) class TagCommand: def __init__(self, args: Namespace): self.args = args self.api = HfApi(token=self.args.token) self.repo_id = self.args.repo_id self.repo_type = self.args.repo_type if self.repo_type not in REPO_TYPES: print('Invalid repo --repo-type') exit(1) class TagCreateCommand(TagCommand): def run(self): print(f'You are about to create tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}') try: self.api.create_tag(repo_id=self.repo_id, tag=self.args.tag, tag_message=self.args.message, revision=self.args.revision, repo_type=self.repo_type) except RepositoryNotFoundError: print(f'{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.') exit(1) except RevisionNotFoundError: print(f'Revision {ANSI.bold(self.args.revision)} not found.') exit(1) except HfHubHTTPError as e: if e.response.status_code == 409: print(f'Tag {ANSI.bold(self.args.tag)} already exists on {ANSI.bold(self.repo_id)}') exit(1) raise e print(f'Tag {ANSI.bold(self.args.tag)} created on {ANSI.bold(self.repo_id)}') class TagListCommand(TagCommand): def run(self): try: refs = self.api.list_repo_refs(repo_id=self.repo_id, repo_type=self.repo_type) except RepositoryNotFoundError: print(f'{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.') exit(1) except HTTPError as e: print(e) print(ANSI.red(e.response.text)) exit(1) if len(refs.tags) == 0: print('No tags found') exit(0) print(f'Tags for {self.repo_type} {ANSI.bold(self.repo_id)}:') for tag in refs.tags: print(tag.name) class TagDeleteCommand(TagCommand): def run(self): print(f'You are about to delete tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}') if not self.args.yes: choice = input('Proceed? [Y/n] ').lower() if choice not in ('', 'y', 'yes'): print('Abort') exit() try: self.api.delete_tag(repo_id=self.repo_id, tag=self.args.tag, repo_type=self.repo_type) except RepositoryNotFoundError: print(f'{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.') exit(1) except RevisionNotFoundError: print(f'Tag {ANSI.bold(self.args.tag)} not found on {ANSI.bold(self.repo_id)}') exit(1) print(f'Tag {ANSI.bold(self.args.tag)} deleted on {ANSI.bold(self.repo_id)}') # File: huggingface_hub-main/src/huggingface_hub/commands/upload.py """""" import os import time import warnings from argparse import Namespace, _SubParsersAction from typing import List, Optional from huggingface_hub import logging from huggingface_hub._commit_scheduler import CommitScheduler from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER from huggingface_hub.errors import RevisionNotFoundError from huggingface_hub.hf_api import HfApi from huggingface_hub.utils import disable_progress_bars, enable_progress_bars logger = logging.get_logger(__name__) class UploadCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): upload_parser = parser.add_parser('upload', help='Upload a file or a folder to a repo on the Hub') upload_parser.add_argument('repo_id', type=str, help='The ID of the repo to upload to (e.g. `username/repo-name`).') upload_parser.add_argument('local_path', nargs='?', help='Local path to the file or folder to upload. Defaults to current directory.') upload_parser.add_argument('path_in_repo', nargs='?', help='Path of the file or folder in the repo. Defaults to the relative path of the file or folder.') upload_parser.add_argument('--repo-type', choices=['model', 'dataset', 'space'], default='model', help='Type of the repo to upload to (e.g. `dataset`).') upload_parser.add_argument('--revision', type=str, help='An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not exist and `--create-pr` is not set, a branch will be automatically created.') upload_parser.add_argument('--private', action='store_true', help="Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists.") upload_parser.add_argument('--include', nargs='*', type=str, help='Glob patterns to match files to upload.') upload_parser.add_argument('--exclude', nargs='*', type=str, help='Glob patterns to exclude from files to upload.') upload_parser.add_argument('--delete', nargs='*', type=str, help='Glob patterns for file to be deleted from the repo while committing.') upload_parser.add_argument('--commit-message', type=str, help='The summary / title / first line of the generated commit.') upload_parser.add_argument('--commit-description', type=str, help='The description of the generated commit.') upload_parser.add_argument('--create-pr', action='store_true', help='Whether to upload content as a new Pull Request.') upload_parser.add_argument('--every', type=float, help='If set, a background job is scheduled to create commits every `every` minutes.') upload_parser.add_argument('--token', type=str, help='A User Access Token generated from https://huggingface.co/settings/tokens') upload_parser.add_argument('--quiet', action='store_true', help='If True, progress bars are disabled and only the path to the uploaded files is printed.') upload_parser.set_defaults(func=UploadCommand) def __init__(self, args: Namespace) -> None: self.repo_id: str = args.repo_id self.repo_type: Optional[str] = args.repo_type self.revision: Optional[str] = args.revision self.private: bool = args.private self.include: Optional[List[str]] = args.include self.exclude: Optional[List[str]] = args.exclude self.delete: Optional[List[str]] = args.delete self.commit_message: Optional[str] = args.commit_message self.commit_description: Optional[str] = args.commit_description self.create_pr: bool = args.create_pr self.api: HfApi = HfApi(token=args.token, library_name='huggingface-cli') self.quiet: bool = args.quiet if args.every is not None and args.every <= 0: raise ValueError(f"`every` must be a positive value (got '{args.every}')") self.every: Optional[float] = args.every repo_name: str = args.repo_id.split('/')[-1] self.local_path: str self.path_in_repo: str if args.local_path is None and os.path.isfile(repo_name): self.local_path = repo_name self.path_in_repo = repo_name elif args.local_path is None and os.path.isdir(repo_name): self.local_path = repo_name self.path_in_repo = '.' elif args.local_path is None: raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.") elif args.path_in_repo is None and os.path.isfile(args.local_path): self.local_path = args.local_path self.path_in_repo = os.path.basename(args.local_path) elif args.path_in_repo is None: self.local_path = args.local_path self.path_in_repo = '.' else: self.local_path = args.local_path self.path_in_repo = args.path_in_repo def run(self) -> None: if self.quiet: disable_progress_bars() with warnings.catch_warnings(): warnings.simplefilter('ignore') print(self._upload()) enable_progress_bars() else: logging.set_verbosity_info() print(self._upload()) logging.set_verbosity_warning() def _upload(self) -> str: if os.path.isfile(self.local_path): if self.include is not None and len(self.include) > 0: warnings.warn('Ignoring `--include` since a single file is uploaded.') if self.exclude is not None and len(self.exclude) > 0: warnings.warn('Ignoring `--exclude` since a single file is uploaded.') if self.delete is not None and len(self.delete) > 0: warnings.warn('Ignoring `--delete` since a single file is uploaded.') if not HF_HUB_ENABLE_HF_TRANSFER: logger.info('Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.') if self.every is not None: if os.path.isfile(self.local_path): folder_path = os.path.dirname(self.local_path) path_in_repo = self.path_in_repo[:-len(self.local_path)] if self.path_in_repo.endswith(self.local_path) else self.path_in_repo allow_patterns = [self.local_path] ignore_patterns = [] else: folder_path = self.local_path path_in_repo = self.path_in_repo allow_patterns = self.include or [] ignore_patterns = self.exclude or [] if self.delete is not None and len(self.delete) > 0: warnings.warn('Ignoring `--delete` when uploading with scheduled commits.') scheduler = CommitScheduler(folder_path=folder_path, repo_id=self.repo_id, repo_type=self.repo_type, revision=self.revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, path_in_repo=path_in_repo, private=self.private, every=self.every, hf_api=self.api) print(f'Scheduling commits every {self.every} minutes to {scheduler.repo_id}.') try: while True: time.sleep(100) except KeyboardInterrupt: scheduler.stop() return 'Stopped scheduled commits.' if not os.path.isfile(self.local_path) and (not os.path.isdir(self.local_path)): raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.") repo_id = self.api.create_repo(repo_id=self.repo_id, repo_type=self.repo_type, exist_ok=True, private=self.private, space_sdk='gradio' if self.repo_type == 'space' else None).repo_id if self.revision is not None and (not self.create_pr): try: self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision) except RevisionNotFoundError: logger.info(f"Branch '{self.revision}' not found. Creating it...") self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True) if os.path.isfile(self.local_path): return self.api.upload_file(path_or_fileobj=self.local_path, path_in_repo=self.path_in_repo, repo_id=repo_id, repo_type=self.repo_type, revision=self.revision, commit_message=self.commit_message, commit_description=self.commit_description, create_pr=self.create_pr) else: return self.api.upload_folder(folder_path=self.local_path, path_in_repo=self.path_in_repo, repo_id=repo_id, repo_type=self.repo_type, revision=self.revision, commit_message=self.commit_message, commit_description=self.commit_description, create_pr=self.create_pr, allow_patterns=self.include, ignore_patterns=self.exclude, delete_patterns=self.delete) # File: huggingface_hub-main/src/huggingface_hub/commands/upload_large_folder.py """""" import os from argparse import Namespace, _SubParsersAction from typing import List, Optional from huggingface_hub import logging from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.hf_api import HfApi from huggingface_hub.utils import disable_progress_bars from ._cli_utils import ANSI logger = logging.get_logger(__name__) class UploadLargeFolderCommand(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): subparser = parser.add_parser('upload-large-folder', help='Upload a large folder to a repo on the Hub') subparser.add_argument('repo_id', type=str, help='The ID of the repo to upload to (e.g. `username/repo-name`).') subparser.add_argument('local_path', type=str, help='Local path to the file or folder to upload.') subparser.add_argument('--repo-type', choices=['model', 'dataset', 'space'], help='Type of the repo to upload to (e.g. `dataset`).') subparser.add_argument('--revision', type=str, help='An optional Git revision to push to. It can be a branch name or a PR reference.') subparser.add_argument('--private', action='store_true', help="Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists.") subparser.add_argument('--include', nargs='*', type=str, help='Glob patterns to match files to upload.') subparser.add_argument('--exclude', nargs='*', type=str, help='Glob patterns to exclude from files to upload.') subparser.add_argument('--token', type=str, help='A User Access Token generated from https://huggingface.co/settings/tokens') subparser.add_argument('--num-workers', type=int, help='Number of workers to use to hash, upload and commit files.') subparser.add_argument('--no-report', action='store_true', help='Whether to disable regular status report.') subparser.add_argument('--no-bars', action='store_true', help='Whether to disable progress bars.') subparser.set_defaults(func=UploadLargeFolderCommand) def __init__(self, args: Namespace) -> None: self.repo_id: str = args.repo_id self.local_path: str = args.local_path self.repo_type: str = args.repo_type self.revision: Optional[str] = args.revision self.private: bool = args.private self.include: Optional[List[str]] = args.include self.exclude: Optional[List[str]] = args.exclude self.api: HfApi = HfApi(token=args.token, library_name='huggingface-cli') self.num_workers: Optional[int] = args.num_workers self.no_report: bool = args.no_report self.no_bars: bool = args.no_bars if not os.path.isdir(self.local_path): raise ValueError('Large upload is only supported for folders.') def run(self) -> None: logging.set_verbosity_info() print(ANSI.yellow(f'You are about to upload a large folder to the Hub using `huggingface-cli upload-large-folder`. This is a new feature so feedback is very welcome!\n\nA few things to keep in mind:\n - Repository limits still apply: https://huggingface.co/docs/hub/repositories-recommendations\n - Do not start several processes in parallel.\n - You can interrupt and resume the process at any time. The script will pick up where it left off except for partially uploaded files that would have to be entirely reuploaded.\n - Do not upload the same folder to several repositories. If you need to do so, you must delete the `./.cache/huggingface/` folder first.\n\nSome temporary metadata will be stored under `{self.local_path}/.cache/huggingface`.\n - You must not modify those files manually.\n - You must not delete the `./.cache/huggingface/` folder while a process is running.\n - You can delete the `./.cache/huggingface/` folder to reinitialize the upload state when process is not running. Files will have to be hashed and preuploaded again, except for already committed files.\n\nIf the process output is to verbose, you can disable the progress bars with `--no-bars`. You can also entirely disable the status report with `--no-report`.\n\nFor more details, run `huggingface-cli upload-large-folder --help` or check the documentation at https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-large-folder.')) if self.no_bars: disable_progress_bars() self.api.upload_large_folder(repo_id=self.repo_id, folder_path=self.local_path, repo_type=self.repo_type, revision=self.revision, private=self.private, allow_patterns=self.include, ignore_patterns=self.exclude, num_workers=self.num_workers, print_report=not self.no_report) # File: huggingface_hub-main/src/huggingface_hub/commands/user.py import subprocess from argparse import _SubParsersAction from requests.exceptions import HTTPError from huggingface_hub.commands import BaseHuggingfaceCLICommand from huggingface_hub.constants import ENDPOINT, REPO_TYPES, REPO_TYPES_URL_PREFIXES, SPACES_SDK_TYPES from huggingface_hub.hf_api import HfApi from .._login import NOTEBOOK_LOGIN_PASSWORD_HTML, NOTEBOOK_LOGIN_TOKEN_HTML_END, NOTEBOOK_LOGIN_TOKEN_HTML_START, login, logout, notebook_login from ..utils import get_token from ._cli_utils import ANSI class UserCommands(BaseHuggingfaceCLICommand): @staticmethod def register_subcommand(parser: _SubParsersAction): login_parser = parser.add_parser('login', help='Log in using a token from huggingface.co/settings/tokens') login_parser.add_argument('--token', type=str, help='Token generated from https://huggingface.co/settings/tokens') login_parser.add_argument('--add-to-git-credential', action='store_true', help='Optional: Save token to git credential helper.') login_parser.set_defaults(func=lambda args: LoginCommand(args)) whoami_parser = parser.add_parser('whoami', help='Find out which huggingface.co account you are logged in as.') whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) logout_parser = parser.add_parser('logout', help='Log out') logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) repo_parser = parser.add_parser('repo', help='{create} Commands to interact with your huggingface.co repos.') repo_subparsers = repo_parser.add_subparsers(help='huggingface.co repos related commands') repo_create_parser = repo_subparsers.add_parser('create', help='Create a new repo on huggingface.co') repo_create_parser.add_argument('name', type=str, help='Name for your repo. Will be namespaced under your username to build the repo id.') repo_create_parser.add_argument('--type', type=str, help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.') repo_create_parser.add_argument('--organization', type=str, help='Optional: organization namespace.') repo_create_parser.add_argument('--space_sdk', type=str, help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".', choices=SPACES_SDK_TYPES) repo_create_parser.add_argument('-y', '--yes', action='store_true', help='Optional: answer Yes to the prompt') repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args)) class BaseUserCommand: def __init__(self, args): self.args = args self._api = HfApi() class LoginCommand(BaseUserCommand): def run(self): login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential) class LogoutCommand(BaseUserCommand): def run(self): logout() class WhoamiCommand(BaseUserCommand): def run(self): token = get_token() if token is None: print('Not logged in') exit() try: info = self._api.whoami(token) print(info['name']) orgs = [org['name'] for org in info['orgs']] if orgs: print(ANSI.bold('orgs: '), ','.join(orgs)) if ENDPOINT != 'https://huggingface.co': print(f'Authenticated through private endpoint: {ENDPOINT}') except HTTPError as e: print(e) print(ANSI.red(e.response.text)) exit(1) class RepoCreateCommand(BaseUserCommand): def run(self): token = get_token() if token is None: print('Not logged in') exit(1) try: stdout = subprocess.check_output(['git', '--version']).decode('utf-8') print(ANSI.gray(stdout.strip())) except FileNotFoundError: print('Looks like you do not have git installed, please install.') try: stdout = subprocess.check_output(['git-lfs', '--version']).decode('utf-8') print(ANSI.gray(stdout.strip())) except FileNotFoundError: print(ANSI.red('Looks like you do not have git-lfs installed, please install. You can install from https://git-lfs.github.com/. Then run `git lfs install` (you only have to do this once).')) print('') user = self._api.whoami(token)['name'] namespace = self.args.organization if self.args.organization is not None else user repo_id = f'{namespace}/{self.args.name}' if self.args.type not in REPO_TYPES: print('Invalid repo --type') exit(1) if self.args.type in REPO_TYPES_URL_PREFIXES: prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id else: prefixed_repo_id = repo_id print(f'You are about to create {ANSI.bold(prefixed_repo_id)}') if not self.args.yes: choice = input('Proceed? [Y/n] ').lower() if not (choice == '' or choice == 'y' or choice == 'yes'): print('Abort') exit() try: url = self._api.create_repo(repo_id=repo_id, token=token, repo_type=self.args.type, space_sdk=self.args.space_sdk) except HTTPError as e: print(e) print(ANSI.red(e.response.text)) exit(1) print('\nYour repo now lives at:') print(f' {ANSI.bold(url)}') print('\nYou can clone it locally with the command below, and commit/push as usual.') print(f'\n git clone {url}') print('') # File: huggingface_hub-main/src/huggingface_hub/commands/version.py """""" from argparse import _SubParsersAction from huggingface_hub import __version__ from . import BaseHuggingfaceCLICommand class VersionCommand(BaseHuggingfaceCLICommand): def __init__(self, args): self.args = args @staticmethod def register_subcommand(parser: _SubParsersAction): version_parser = parser.add_parser('version', help='Print information about the huggingface-cli version.') version_parser.set_defaults(func=VersionCommand) def run(self) -> None: print(f'huggingface_hub version: {__version__}') # File: huggingface_hub-main/src/huggingface_hub/community.py """""" from dataclasses import dataclass from datetime import datetime from typing import List, Literal, Optional, Union from . import constants from .utils import parse_datetime DiscussionStatus = Literal['open', 'closed', 'merged', 'draft'] @dataclass class Discussion: title: str status: DiscussionStatus num: int repo_id: str repo_type: str author: str is_pull_request: bool created_at: datetime endpoint: str @property def git_reference(self) -> Optional[str]: if self.is_pull_request: return f'refs/pr/{self.num}' return None @property def url(self) -> str: if self.repo_type is None or self.repo_type == constants.REPO_TYPE_MODEL: return f'{self.endpoint}/{self.repo_id}/discussions/{self.num}' return f'{self.endpoint}/{self.repo_type}s/{self.repo_id}/discussions/{self.num}' @dataclass class DiscussionWithDetails(Discussion): events: List['DiscussionEvent'] conflicting_files: Union[List[str], bool, None] target_branch: Optional[str] merge_commit_oid: Optional[str] diff: Optional[str] @dataclass class DiscussionEvent: id: str type: str created_at: datetime author: str _event: dict '' @dataclass class DiscussionComment(DiscussionEvent): content: str edited: bool hidden: bool @property def rendered(self) -> str: return self._event['data']['latest']['html'] @property def last_edited_at(self) -> datetime: return parse_datetime(self._event['data']['latest']['updatedAt']) @property def last_edited_by(self) -> str: return self._event['data']['latest'].get('author', {}).get('name', 'deleted') @property def edit_history(self) -> List[dict]: return self._event['data']['history'] @property def number_of_edits(self) -> int: return len(self.edit_history) @dataclass class DiscussionStatusChange(DiscussionEvent): new_status: str @dataclass class DiscussionCommit(DiscussionEvent): summary: str oid: str @dataclass class DiscussionTitleChange(DiscussionEvent): old_title: str new_title: str def deserialize_event(event: dict) -> DiscussionEvent: event_id: str = event['id'] event_type: str = event['type'] created_at = parse_datetime(event['createdAt']) common_args = dict(id=event_id, type=event_type, created_at=created_at, author=event.get('author', {}).get('name', 'deleted'), _event=event) if event_type == 'comment': return DiscussionComment(**common_args, edited=event['data']['edited'], hidden=event['data']['hidden'], content=event['data']['latest']['raw']) if event_type == 'status-change': return DiscussionStatusChange(**common_args, new_status=event['data']['status']) if event_type == 'commit': return DiscussionCommit(**common_args, summary=event['data']['subject'], oid=event['data']['oid']) if event_type == 'title-change': return DiscussionTitleChange(**common_args, old_title=event['data']['from'], new_title=event['data']['to']) return DiscussionEvent(**common_args) # File: huggingface_hub-main/src/huggingface_hub/constants.py import os import re import typing from typing import Literal, Optional, Tuple ENV_VARS_TRUE_VALUES = {'1', 'ON', 'YES', 'TRUE'} ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({'AUTO'}) def _is_true(value: Optional[str]) -> bool: if value is None: return False return value.upper() in ENV_VARS_TRUE_VALUES def _as_int(value: Optional[str]) -> Optional[int]: if value is None: return None return int(value) PYTORCH_WEIGHTS_NAME = 'pytorch_model.bin' TF2_WEIGHTS_NAME = 'tf_model.h5' TF_WEIGHTS_NAME = 'model.ckpt' FLAX_WEIGHTS_NAME = 'flax_model.msgpack' CONFIG_NAME = 'config.json' REPOCARD_NAME = 'README.md' DEFAULT_ETAG_TIMEOUT = 10 DEFAULT_DOWNLOAD_TIMEOUT = 10 DEFAULT_REQUEST_TIMEOUT = 10 DOWNLOAD_CHUNK_SIZE = 10 * 1024 * 1024 HF_TRANSFER_CONCURRENCY = 100 PYTORCH_WEIGHTS_FILE_PATTERN = 'pytorch_model{suffix}.bin' SAFETENSORS_WEIGHTS_FILE_PATTERN = 'model{suffix}.safetensors' TF2_WEIGHTS_FILE_PATTERN = 'tf_model{suffix}.h5' SAFETENSORS_SINGLE_FILE = 'model.safetensors' SAFETENSORS_INDEX_FILE = 'model.safetensors.index.json' SAFETENSORS_MAX_HEADER_LENGTH = 25000000 FILELOCK_LOG_EVERY_SECONDS = 10 DEFAULT_REVISION = 'main' REGEX_COMMIT_OID = re.compile('[A-Fa-f0-9]{5,40}') HUGGINGFACE_CO_URL_HOME = 'https://huggingface.co/' _staging_mode = _is_true(os.environ.get('HUGGINGFACE_CO_STAGING')) _HF_DEFAULT_ENDPOINT = 'https://huggingface.co' _HF_DEFAULT_STAGING_ENDPOINT = 'https://hub-ci.huggingface.co' ENDPOINT = os.getenv('HF_ENDPOINT') or (_HF_DEFAULT_STAGING_ENDPOINT if _staging_mode else _HF_DEFAULT_ENDPOINT) HUGGINGFACE_CO_URL_TEMPLATE = ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' HUGGINGFACE_HEADER_X_REPO_COMMIT = 'X-Repo-Commit' HUGGINGFACE_HEADER_X_LINKED_ETAG = 'X-Linked-Etag' HUGGINGFACE_HEADER_X_LINKED_SIZE = 'X-Linked-Size' INFERENCE_ENDPOINT = os.environ.get('HF_INFERENCE_ENDPOINT', 'https://api-inference.huggingface.co') INFERENCE_ENDPOINTS_ENDPOINT = 'https://api.endpoints.huggingface.cloud/v2' REPO_ID_SEPARATOR = '--' REPO_TYPE_DATASET = 'dataset' REPO_TYPE_SPACE = 'space' REPO_TYPE_MODEL = 'model' REPO_TYPES = [None, REPO_TYPE_MODEL, REPO_TYPE_DATASET, REPO_TYPE_SPACE] SPACES_SDK_TYPES = ['gradio', 'streamlit', 'docker', 'static'] REPO_TYPES_URL_PREFIXES = {REPO_TYPE_DATASET: 'datasets/', REPO_TYPE_SPACE: 'spaces/'} REPO_TYPES_MAPPING = {'datasets': REPO_TYPE_DATASET, 'spaces': REPO_TYPE_SPACE, 'models': REPO_TYPE_MODEL} DiscussionTypeFilter = Literal['all', 'discussion', 'pull_request'] DISCUSSION_TYPES: Tuple[DiscussionTypeFilter, ...] = typing.get_args(DiscussionTypeFilter) DiscussionStatusFilter = Literal['all', 'open', 'closed'] DISCUSSION_STATUS: Tuple[DiscussionTypeFilter, ...] = typing.get_args(DiscussionStatusFilter) WEBHOOK_DOMAIN_T = Literal['repo', 'discussions'] default_home = os.path.join(os.path.expanduser('~'), '.cache') HF_HOME = os.path.expanduser(os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', default_home), 'huggingface'))) hf_cache_home = HF_HOME default_cache_path = os.path.join(HF_HOME, 'hub') default_assets_cache_path = os.path.join(HF_HOME, 'assets') HUGGINGFACE_HUB_CACHE = os.getenv('HUGGINGFACE_HUB_CACHE', default_cache_path) HUGGINGFACE_ASSETS_CACHE = os.getenv('HUGGINGFACE_ASSETS_CACHE', default_assets_cache_path) HF_HUB_CACHE = os.getenv('HF_HUB_CACHE', HUGGINGFACE_HUB_CACHE) HF_ASSETS_CACHE = os.getenv('HF_ASSETS_CACHE', HUGGINGFACE_ASSETS_CACHE) HF_HUB_OFFLINE = _is_true(os.environ.get('HF_HUB_OFFLINE') or os.environ.get('TRANSFORMERS_OFFLINE')) HF_HUB_DISABLE_TELEMETRY = _is_true(os.environ.get('HF_HUB_DISABLE_TELEMETRY')) or _is_true(os.environ.get('DISABLE_TELEMETRY')) or _is_true(os.environ.get('DO_NOT_TRACK')) _OLD_HF_TOKEN_PATH = os.path.expanduser('~/.huggingface/token') HF_TOKEN_PATH = os.environ.get('HF_TOKEN_PATH', os.path.join(HF_HOME, 'token')) if _staging_mode: _staging_home = os.path.join(os.path.expanduser('~'), '.cache', 'huggingface_staging') HUGGINGFACE_HUB_CACHE = os.path.join(_staging_home, 'hub') _OLD_HF_TOKEN_PATH = os.path.join(_staging_home, '_old_token') HF_TOKEN_PATH = os.path.join(_staging_home, 'token') __HF_HUB_DISABLE_PROGRESS_BARS = os.environ.get('HF_HUB_DISABLE_PROGRESS_BARS') HF_HUB_DISABLE_PROGRESS_BARS: Optional[bool] = _is_true(__HF_HUB_DISABLE_PROGRESS_BARS) if __HF_HUB_DISABLE_PROGRESS_BARS is not None else None HF_HUB_DISABLE_SYMLINKS_WARNING: bool = _is_true(os.environ.get('HF_HUB_DISABLE_SYMLINKS_WARNING')) HF_HUB_DISABLE_EXPERIMENTAL_WARNING: bool = _is_true(os.environ.get('HF_HUB_DISABLE_EXPERIMENTAL_WARNING')) HF_HUB_DISABLE_IMPLICIT_TOKEN: bool = _is_true(os.environ.get('HF_HUB_DISABLE_IMPLICIT_TOKEN')) HF_HUB_ENABLE_HF_TRANSFER: bool = _is_true(os.environ.get('HF_HUB_ENABLE_HF_TRANSFER')) HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD: int = _as_int(os.environ.get('HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD')) or 5 * 1024 * 1024 HF_HUB_ETAG_TIMEOUT: int = _as_int(os.environ.get('HF_HUB_ETAG_TIMEOUT')) or DEFAULT_ETAG_TIMEOUT HF_HUB_DOWNLOAD_TIMEOUT: int = _as_int(os.environ.get('HF_HUB_DOWNLOAD_TIMEOUT')) or DEFAULT_DOWNLOAD_TIMEOUT MAIN_INFERENCE_API_FRAMEWORKS = ['diffusers', 'sentence-transformers', 'text-generation-inference', 'transformers'] ALL_INFERENCE_API_FRAMEWORKS = MAIN_INFERENCE_API_FRAMEWORKS + ['adapter-transformers', 'allennlp', 'asteroid', 'bertopic', 'doctr', 'espnet', 'fairseq', 'fastai', 'fasttext', 'flair', 'k2', 'keras', 'mindspore', 'nemo', 'open_clip', 'paddlenlp', 'peft', 'pyannote-audio', 'sklearn', 'spacy', 'span-marker', 'speechbrain', 'stanza', 'timm'] # File: huggingface_hub-main/src/huggingface_hub/errors.py """""" from pathlib import Path from typing import Optional, Union from requests import HTTPError, Response class CacheNotFound(Exception): cache_dir: Union[str, Path] def __init__(self, msg: str, cache_dir: Union[str, Path], *args, **kwargs): super().__init__(msg, *args, **kwargs) self.cache_dir = cache_dir class CorruptedCacheException(Exception): class LocalTokenNotFoundError(EnvironmentError): class OfflineModeIsEnabled(ConnectionError): class HfHubHTTPError(HTTPError): def __init__(self, message: str, response: Optional[Response]=None, *, server_message: Optional[str]=None): self.request_id = response.headers.get('x-request-id') if response is not None else None self.server_message = server_message super().__init__(message, response=response, request=response.request if response is not None else None) def append_to_message(self, additional_message: str) -> None: self.args = (self.args[0] + additional_message,) + self.args[1:] class InferenceTimeoutError(HTTPError, TimeoutError): class InferenceEndpointError(Exception): class InferenceEndpointTimeoutError(InferenceEndpointError, TimeoutError): class SafetensorsParsingError(Exception): class NotASafetensorsRepoError(Exception): class TemplateError(Exception): class TextGenerationError(HTTPError): class ValidationError(TextGenerationError): class GenerationError(TextGenerationError): pass class OverloadedError(TextGenerationError): pass class IncompleteGenerationError(TextGenerationError): pass class UnknownError(TextGenerationError): pass class HFValidationError(ValueError): class FileMetadataError(OSError): class RepositoryNotFoundError(HfHubHTTPError): class GatedRepoError(RepositoryNotFoundError): class DisabledRepoError(HfHubHTTPError): class RevisionNotFoundError(HfHubHTTPError): class EntryNotFoundError(HfHubHTTPError): class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError): def __init__(self, message: str): super().__init__(message, response=None) class BadRequestError(HfHubHTTPError, ValueError): # File: huggingface_hub-main/src/huggingface_hub/fastai_utils.py import json import os from pathlib import Path from pickle import DEFAULT_PROTOCOL, PicklingError from typing import Any, Dict, List, Optional, Union from packaging import version from huggingface_hub import constants, snapshot_download from huggingface_hub.hf_api import HfApi from huggingface_hub.utils import SoftTemporaryDirectory, get_fastai_version, get_fastcore_version, get_python_version from .utils import logging, validate_hf_hub_args from .utils._runtime import _PY_VERSION logger = logging.get_logger(__name__) def _check_fastai_fastcore_versions(fastai_min_version: str='2.4', fastcore_min_version: str='1.3.27'): if (get_fastcore_version() or get_fastai_version()) == 'N/A': raise ImportError(f'fastai>={fastai_min_version} and fastcore>={fastcore_min_version} are required. Currently using fastai=={get_fastai_version()} and fastcore=={get_fastcore_version()}.') current_fastai_version = version.Version(get_fastai_version()) current_fastcore_version = version.Version(get_fastcore_version()) if current_fastai_version < version.Version(fastai_min_version): raise ImportError(f'`push_to_hub_fastai` and `from_pretrained_fastai` require a fastai>={fastai_min_version} version, but you are using fastai version {get_fastai_version()} which is incompatible. Upgrade with `pip install fastai==2.5.6`.') if current_fastcore_version < version.Version(fastcore_min_version): raise ImportError(f'`push_to_hub_fastai` and `from_pretrained_fastai` require a fastcore>={fastcore_min_version} version, but you are using fastcore version {get_fastcore_version()} which is incompatible. Upgrade with `pip install fastcore==1.3.27`.') def _check_fastai_fastcore_pyproject_versions(storage_folder: str, fastai_min_version: str='2.4', fastcore_min_version: str='1.3.27'): try: import toml except ModuleNotFoundError: raise ImportError('`push_to_hub_fastai` and `from_pretrained_fastai` require the toml module. Install it with `pip install toml`.') if not os.path.isfile(f'{storage_folder}/pyproject.toml'): logger.warning('There is no `pyproject.toml` in the repository that contains the fastai `Learner`. The `pyproject.toml` would allow us to verify that your fastai and fastcore versions are compatible with those of the model you want to load.') return pyproject_toml = toml.load(f'{storage_folder}/pyproject.toml') if 'build-system' not in pyproject_toml.keys(): logger.warning('There is no `build-system` section in the pyproject.toml of the repository that contains the fastai `Learner`. The `build-system` would allow us to verify that your fastai and fastcore versions are compatible with those of the model you want to load.') return build_system_toml = pyproject_toml['build-system'] if 'requires' not in build_system_toml.keys(): logger.warning('There is no `requires` section in the pyproject.toml of the repository that contains the fastai `Learner`. The `requires` would allow us to verify that your fastai and fastcore versions are compatible with those of the model you want to load.') return package_versions = build_system_toml['requires'] fastai_packages = [pck for pck in package_versions if pck.startswith('fastai')] if len(fastai_packages) == 0: logger.warning('The repository does not have a fastai version specified in the `pyproject.toml`.') else: fastai_version = str(fastai_packages[0]).partition('=')[2] if fastai_version != '' and version.Version(fastai_version) < version.Version(fastai_min_version): raise ImportError(f'`from_pretrained_fastai` requires fastai>={fastai_min_version} version but the model to load uses {fastai_version} which is incompatible.') fastcore_packages = [pck for pck in package_versions if pck.startswith('fastcore')] if len(fastcore_packages) == 0: logger.warning('The repository does not have a fastcore version specified in the `pyproject.toml`.') else: fastcore_version = str(fastcore_packages[0]).partition('=')[2] if fastcore_version != '' and version.Version(fastcore_version) < version.Version(fastcore_min_version): raise ImportError(f'`from_pretrained_fastai` requires fastcore>={fastcore_min_version} version, but you are using fastcore version {fastcore_version} which is incompatible.') README_TEMPLATE = "---\ntags:\n- fastai\n---\n\n# Amazing!\n\n🥳 Congratulations on hosting your fastai model on the Hugging Face Hub!\n\n# Some next steps\n1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))!\n\n2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)).\n\n3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)!\n\nGreetings fellow fastlearner 🤝! Don't forget to delete this content from your model card.\n\n\n---\n\n\n# Model card\n\n## Model description\nMore information needed\n\n## Intended uses & limitations\nMore information needed\n\n## Training and evaluation data\nMore information needed\n" PYPROJECT_TEMPLATE = f'[build-system]\nrequires = ["setuptools>=40.8.0", "wheel", "python={get_python_version()}", "fastai={get_fastai_version()}", "fastcore={get_fastcore_version()}"]\nbuild-backend = "setuptools.build_meta:__legacy__"\n' def _create_model_card(repo_dir: Path): readme_path = repo_dir / 'README.md' if not readme_path.exists(): with readme_path.open('w', encoding='utf-8') as f: f.write(README_TEMPLATE) def _create_model_pyproject(repo_dir: Path): pyproject_path = repo_dir / 'pyproject.toml' if not pyproject_path.exists(): with pyproject_path.open('w', encoding='utf-8') as f: f.write(PYPROJECT_TEMPLATE) def _save_pretrained_fastai(learner, save_directory: Union[str, Path], config: Optional[Dict[str, Any]]=None): _check_fastai_fastcore_versions() os.makedirs(save_directory, exist_ok=True) if config is not None: if not isinstance(config, dict): raise RuntimeError(f"Provided config should be a dict. Got: '{type(config)}'") path = os.path.join(save_directory, constants.CONFIG_NAME) with open(path, 'w') as f: json.dump(config, f) _create_model_card(Path(save_directory)) _create_model_pyproject(Path(save_directory)) learner.path = Path(save_directory) os.makedirs(save_directory, exist_ok=True) try: learner.export(fname='model.pkl', pickle_protocol=DEFAULT_PROTOCOL) except PicklingError: raise PicklingError('You are using a lambda function, i.e., an anonymous function. `pickle` cannot pickle function objects and requires that all functions have names. One possible solution is to name the function.') @validate_hf_hub_args def from_pretrained_fastai(repo_id: str, revision: Optional[str]=None): _check_fastai_fastcore_versions() if not os.path.isdir(repo_id): storage_folder = snapshot_download(repo_id=repo_id, revision=revision, library_name='fastai', library_version=get_fastai_version()) else: storage_folder = repo_id _check_fastai_fastcore_pyproject_versions(storage_folder) from fastai.learner import load_learner return load_learner(os.path.join(storage_folder, 'model.pkl')) @validate_hf_hub_args def push_to_hub_fastai(learner, *, repo_id: str, commit_message: str='Push FastAI model using huggingface_hub.', private: bool=False, token: Optional[str]=None, config: Optional[dict]=None, branch: Optional[str]=None, create_pr: Optional[bool]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, api_endpoint: Optional[str]=None): _check_fastai_fastcore_versions() api = HfApi(endpoint=api_endpoint) repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id with SoftTemporaryDirectory() as tmp: saved_path = Path(tmp) / repo_id _save_pretrained_fastai(learner, saved_path, config=config) return api.upload_folder(repo_id=repo_id, token=token, folder_path=saved_path, commit_message=commit_message, revision=branch, create_pr=create_pr, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, delete_patterns=delete_patterns) # File: huggingface_hub-main/src/huggingface_hub/file_download.py import contextlib import copy import errno import fnmatch import inspect import json import os import re import shutil import stat import time import uuid import warnings from dataclasses import dataclass from pathlib import Path from typing import Any, BinaryIO, Dict, Literal, NoReturn, Optional, Tuple, Union from urllib.parse import quote, urlparse import requests from . import __version__, constants from ._local_folder import get_local_download_paths, read_download_metadata, write_download_metadata from .constants import HUGGINGFACE_CO_URL_TEMPLATE, HUGGINGFACE_HUB_CACHE from .errors import EntryNotFoundError, FileMetadataError, GatedRepoError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from .utils import OfflineModeIsEnabled, SoftTemporaryDirectory, WeakFileLock, build_hf_headers, get_fastai_version, get_fastcore_version, get_graphviz_version, get_jinja_version, get_pydot_version, get_session, get_tf_version, get_torch_version, hf_raise_for_status, is_fastai_available, is_fastcore_available, is_graphviz_available, is_jinja_available, is_pydot_available, is_tf_available, is_torch_available, logging, reset_sessions, tqdm, validate_hf_hub_args from .utils._deprecation import _deprecate_arguments, _deprecate_method from .utils._runtime import _PY_VERSION from .utils._typing import HTTP_METHOD_T from .utils.insecure_hashlib import sha256 from .utils.sha import sha_fileobj logger = logging.get_logger(__name__) _CACHED_NO_EXIST = object() _CACHED_NO_EXIST_T = Any HEADER_FILENAME_PATTERN = re.compile('filename="(?P.*?)";') REGEX_COMMIT_HASH = re.compile('^[0-9a-f]{40}$') REGEX_SHA256 = re.compile('^[0-9a-f]{64}$') _are_symlinks_supported_in_dir: Dict[str, bool] = {} def are_symlinks_supported(cache_dir: Union[str, Path, None]=None) -> bool: if cache_dir is None: cache_dir = constants.HF_HUB_CACHE cache_dir = str(Path(cache_dir).expanduser().resolve()) if cache_dir not in _are_symlinks_supported_in_dir: _are_symlinks_supported_in_dir[cache_dir] = True os.makedirs(cache_dir, exist_ok=True) with SoftTemporaryDirectory(dir=cache_dir) as tmpdir: src_path = Path(tmpdir) / 'dummy_file_src' src_path.touch() dst_path = Path(tmpdir) / 'dummy_file_dst' relative_src = os.path.relpath(src_path, start=os.path.dirname(dst_path)) try: os.symlink(relative_src, dst_path) except OSError: _are_symlinks_supported_in_dir[cache_dir] = False if not constants.HF_HUB_DISABLE_SYMLINKS_WARNING: message = f'`huggingface_hub` cache-system uses symlinks by default to efficiently store duplicated files but your machine does not support them in {cache_dir}. Caching files will still work but in a degraded version that might require more space on your disk. This warning can be disabled by setting the `HF_HUB_DISABLE_SYMLINKS_WARNING` environment variable. For more details, see https://huggingface.co/docs/huggingface_hub/how-to-cache#limitations.' if os.name == 'nt': message += '\nTo support symlinks on Windows, you either need to activate Developer Mode or to run Python as an administrator. In order to activate developer mode, see this article: https://docs.microsoft.com/en-us/windows/apps/get-started/enable-your-device-for-development' warnings.warn(message) return _are_symlinks_supported_in_dir[cache_dir] @dataclass(frozen=True) class HfFileMetadata: commit_hash: Optional[str] etag: Optional[str] location: str size: Optional[int] @validate_hf_hub_args def hf_hub_url(repo_id: str, filename: str, *, subfolder: Optional[str]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, endpoint: Optional[str]=None) -> str: if subfolder == '': subfolder = None if subfolder is not None: filename = f'{subfolder}/{filename}' if repo_type not in constants.REPO_TYPES: raise ValueError('Invalid repo type') if repo_type in constants.REPO_TYPES_URL_PREFIXES: repo_id = constants.REPO_TYPES_URL_PREFIXES[repo_type] + repo_id if revision is None: revision = constants.DEFAULT_REVISION url = HUGGINGFACE_CO_URL_TEMPLATE.format(repo_id=repo_id, revision=quote(revision, safe=''), filename=quote(filename)) if endpoint is not None and url.startswith(constants.ENDPOINT): url = endpoint + url[len(constants.ENDPOINT):] return url @_deprecate_method(version='0.26', message='Use `hf_hub_download` to benefit from the new cache layout.') def url_to_filename(url: str, etag: Optional[str]=None) -> str: url_bytes = url.encode('utf-8') filename = sha256(url_bytes).hexdigest() if etag: etag_bytes = etag.encode('utf-8') filename += '.' + sha256(etag_bytes).hexdigest() if url.endswith('.h5'): filename += '.h5' return filename @_deprecate_method(version='0.26', message='Use `hf_hub_url` instead.') def filename_to_url(filename, cache_dir: Optional[str]=None, legacy_cache_layout: bool=False) -> Tuple[str, str]: if not legacy_cache_layout: warnings.warn('`filename_to_url` uses the legacy way cache file layout', FutureWarning) if cache_dir is None: cache_dir = constants.HF_HUB_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError(f'file {cache_path} not found') meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise EnvironmentError(f'file {meta_path} not found') with open(meta_path, encoding='utf-8') as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return (url, etag) def _request_wrapper(method: HTTP_METHOD_T, url: str, *, follow_relative_redirects: bool=False, **params) -> requests.Response: if follow_relative_redirects: response = _request_wrapper(method=method, url=url, follow_relative_redirects=False, **params) if 300 <= response.status_code <= 399: parsed_target = urlparse(response.headers['Location']) if parsed_target.netloc == '': next_url = urlparse(url)._replace(path=parsed_target.path).geturl() return _request_wrapper(method=method, url=next_url, follow_relative_redirects=True, **params) return response response = get_session().request(method=method, url=url, **params) hf_raise_for_status(response) return response def http_get(url: str, temp_file: BinaryIO, *, proxies: Optional[Dict]=None, resume_size: float=0, headers: Optional[Dict[str, str]]=None, expected_size: Optional[int]=None, displayed_filename: Optional[str]=None, _nb_retries: int=5, _tqdm_bar: Optional[tqdm]=None) -> None: if expected_size is not None and resume_size == expected_size: return hf_transfer = None if constants.HF_HUB_ENABLE_HF_TRANSFER: if resume_size != 0: warnings.warn("'hf_transfer' does not support `resume_size`: falling back to regular download method") elif proxies is not None: warnings.warn("'hf_transfer' does not support `proxies`: falling back to regular download method") else: try: import hf_transfer except ImportError: raise ValueError("Fast download using 'hf_transfer' is enabled (HF_HUB_ENABLE_HF_TRANSFER=1) but 'hf_transfer' package is not available in your environment. Try `pip install hf_transfer`.") initial_headers = headers headers = copy.deepcopy(headers) or {} if resume_size > 0: headers['Range'] = 'bytes=%d-' % (resume_size,) r = _request_wrapper(method='GET', url=url, stream=True, proxies=proxies, headers=headers, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT) hf_raise_for_status(r) content_length = r.headers.get('Content-Length') total = resume_size + int(content_length) if content_length is not None else None if displayed_filename is None: displayed_filename = url content_disposition = r.headers.get('Content-Disposition') if content_disposition is not None: match = HEADER_FILENAME_PATTERN.search(content_disposition) if match is not None: displayed_filename = match.groupdict()['filename'] if len(displayed_filename) > 40: displayed_filename = f'(…){displayed_filename[-40:]}' consistency_error_message = f'Consistency check failed: file should be of size {expected_size} but has size {{actual_size}} ({displayed_filename}).\nWe are sorry for the inconvenience. Please retry with `force_download=True`.\nIf the issue persists, please let us know by opening an issue on https://github.com/huggingface/huggingface_hub.' progress_cm: tqdm = tqdm(unit='B', unit_scale=True, total=total, initial=resume_size, desc=displayed_filename, disable=True if logger.getEffectiveLevel() == logging.NOTSET else None, name='huggingface_hub.http_get') if _tqdm_bar is None else contextlib.nullcontext(_tqdm_bar) with progress_cm as progress: if hf_transfer and total is not None and (total > 5 * constants.DOWNLOAD_CHUNK_SIZE): supports_callback = 'callback' in inspect.signature(hf_transfer.download).parameters if not supports_callback: warnings.warn('You are using an outdated version of `hf_transfer`. Consider upgrading to latest version to enable progress bars using `pip install -U hf_transfer`.') try: hf_transfer.download(url=url, filename=temp_file.name, max_files=constants.HF_TRANSFER_CONCURRENCY, chunk_size=constants.DOWNLOAD_CHUNK_SIZE, headers=headers, parallel_failures=3, max_retries=5, **{'callback': progress.update} if supports_callback else {}) except Exception as e: raise RuntimeError('An error occurred while downloading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for better error handling.') from e if not supports_callback: progress.update(total) if expected_size is not None and expected_size != os.path.getsize(temp_file.name): raise EnvironmentError(consistency_error_message.format(actual_size=os.path.getsize(temp_file.name))) return new_resume_size = resume_size try: for chunk in r.iter_content(chunk_size=constants.DOWNLOAD_CHUNK_SIZE): if chunk: progress.update(len(chunk)) temp_file.write(chunk) new_resume_size += len(chunk) _nb_retries = 5 except (requests.ConnectionError, requests.ReadTimeout) as e: if _nb_retries <= 0: logger.warning('Error while downloading from %s: %s\nMax retries exceeded.', url, str(e)) raise logger.warning('Error while downloading from %s: %s\nTrying to resume download...', url, str(e)) time.sleep(1) reset_sessions() return http_get(url=url, temp_file=temp_file, proxies=proxies, resume_size=new_resume_size, headers=initial_headers, expected_size=expected_size, _nb_retries=_nb_retries - 1, _tqdm_bar=_tqdm_bar) if expected_size is not None and expected_size != temp_file.tell(): raise EnvironmentError(consistency_error_message.format(actual_size=temp_file.tell())) @validate_hf_hub_args @_deprecate_method(version='0.26', message='Use `hf_hub_download` instead.') def cached_download(url: str, *, library_name: Optional[str]=None, library_version: Optional[str]=None, cache_dir: Union[str, Path, None]=None, user_agent: Union[Dict, str, None]=None, force_download: bool=False, force_filename: Optional[str]=None, proxies: Optional[Dict]=None, etag_timeout: float=constants.DEFAULT_ETAG_TIMEOUT, resume_download: Optional[bool]=None, token: Union[bool, str, None]=None, local_files_only: bool=False, legacy_cache_layout: bool=False) -> str: if constants.HF_HUB_ETAG_TIMEOUT != constants.DEFAULT_ETAG_TIMEOUT: etag_timeout = constants.HF_HUB_ETAG_TIMEOUT if not legacy_cache_layout: warnings.warn("'cached_download' is the legacy way to download files from the HF hub, please consider upgrading to 'hf_hub_download'", FutureWarning) if resume_download is not None: warnings.warn('`resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.', FutureWarning) if cache_dir is None: cache_dir = constants.HF_HUB_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) headers = build_hf_headers(token=token, library_name=library_name, library_version=library_version, user_agent=user_agent) url_to_download = url etag = None expected_size = None if not local_files_only: try: headers['Accept-Encoding'] = 'identity' r = _request_wrapper(method='HEAD', url=url, headers=headers, allow_redirects=False, follow_relative_redirects=True, proxies=proxies, timeout=etag_timeout) headers.pop('Accept-Encoding', None) hf_raise_for_status(r) etag = r.headers.get(constants.HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get('ETag') if etag is None: raise FileMetadataError("Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility.") expected_size = _int_or_none(r.headers.get('Content-Length')) if 300 <= r.status_code <= 399: url_to_download = r.headers['Location'] headers.pop('authorization', None) expected_size = None except (requests.exceptions.SSLError, requests.exceptions.ProxyError): raise except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, OfflineModeIsEnabled): pass filename = force_filename if force_filename is not None else url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if etag is None: if os.path.exists(cache_path) and (not force_download): return cache_path else: matching_files = [file for file in fnmatch.filter(os.listdir(cache_dir), filename.split('.')[0] + '.*') if not file.endswith('.json') and (not file.endswith('.lock'))] if len(matching_files) > 0 and (not force_download) and (force_filename is None): return os.path.join(cache_dir, matching_files[-1]) elif local_files_only: raise LocalEntryNotFoundError("Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable model look-ups and downloads online, set 'local_files_only' to False.") else: raise LocalEntryNotFoundError('Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on.') if os.path.exists(cache_path) and (not force_download): return cache_path lock_path = cache_path + '.lock' if os.name == 'nt' and len(os.path.abspath(lock_path)) > 255: lock_path = '\\\\?\\' + os.path.abspath(lock_path) if os.name == 'nt' and len(os.path.abspath(cache_path)) > 255: cache_path = '\\\\?\\' + os.path.abspath(cache_path) with WeakFileLock(lock_path): _download_to_tmp_and_move(incomplete_path=Path(cache_path + '.incomplete'), destination_path=Path(cache_path), url_to_download=url_to_download, proxies=proxies, headers=headers, expected_size=expected_size, filename=filename, force_download=force_download) if force_filename is None: logger.info('creating metadata file for %s', cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) return cache_path def _normalize_etag(etag: Optional[str]) -> Optional[str]: if etag is None: return None return etag.lstrip('W/').strip('"') def _create_relative_symlink(src: str, dst: str, new_blob: bool=False) -> None: return _create_symlink(src=src, dst=dst, new_blob=new_blob) def _create_symlink(src: str, dst: str, new_blob: bool=False) -> None: try: os.remove(dst) except OSError: pass abs_src = os.path.abspath(os.path.expanduser(src)) abs_dst = os.path.abspath(os.path.expanduser(dst)) abs_dst_folder = os.path.dirname(abs_dst) try: relative_src = os.path.relpath(abs_src, abs_dst_folder) except ValueError: relative_src = None try: commonpath = os.path.commonpath([abs_src, abs_dst]) _support_symlinks = are_symlinks_supported(commonpath) except ValueError: _support_symlinks = os.name != 'nt' except PermissionError: _support_symlinks = are_symlinks_supported(abs_dst_folder) except OSError as e: if e.errno == errno.EROFS: _support_symlinks = are_symlinks_supported(abs_dst_folder) else: raise if _support_symlinks: src_rel_or_abs = relative_src or abs_src logger.debug(f'Creating pointer from {src_rel_or_abs} to {abs_dst}') try: os.symlink(src_rel_or_abs, abs_dst) return except FileExistsError: if os.path.islink(abs_dst) and os.path.realpath(abs_dst) == os.path.realpath(abs_src): return else: raise except PermissionError: pass if new_blob: logger.info(f'Symlink not supported. Moving file from {abs_src} to {abs_dst}') shutil.move(abs_src, abs_dst, copy_function=_copy_no_matter_what) else: logger.info(f'Symlink not supported. Copying file from {abs_src} to {abs_dst}') shutil.copyfile(abs_src, abs_dst) def _cache_commit_hash_for_specific_revision(storage_folder: str, revision: str, commit_hash: str) -> None: if revision != commit_hash: ref_path = Path(storage_folder) / 'refs' / revision ref_path.parent.mkdir(parents=True, exist_ok=True) if not ref_path.exists() or commit_hash != ref_path.read_text(): ref_path.write_text(commit_hash) @validate_hf_hub_args def repo_folder_name(*, repo_id: str, repo_type: str) -> str: parts = [f'{repo_type}s', *repo_id.split('/')] return constants.REPO_ID_SEPARATOR.join(parts) def _check_disk_space(expected_size: int, target_dir: Union[str, Path]) -> None: target_dir = Path(target_dir) for path in [target_dir] + list(target_dir.parents): try: target_dir_free = shutil.disk_usage(path).free if target_dir_free < expected_size: warnings.warn(f'Not enough free disk space to download the file. The expected file size is: {expected_size / 1000000.0:.2f} MB. The target location {target_dir} only has {target_dir_free / 1000000.0:.2f} MB free disk space.') return except OSError: pass @_deprecate_arguments(version='0.26.0', deprecated_args=['legacy_cache_layout'], custom_message='Legacy cache layout has been deprecated since August 2022 and will soon be removed. See https://huggingface.co/docs/huggingface_hub/guides/manage-cache for more details.') @validate_hf_hub_args def hf_hub_download(repo_id: str, filename: str, *, subfolder: Optional[str]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, library_name: Optional[str]=None, library_version: Optional[str]=None, cache_dir: Union[str, Path, None]=None, local_dir: Union[str, Path, None]=None, user_agent: Union[Dict, str, None]=None, force_download: bool=False, proxies: Optional[Dict]=None, etag_timeout: float=constants.DEFAULT_ETAG_TIMEOUT, token: Union[bool, str, None]=None, local_files_only: bool=False, headers: Optional[Dict[str, str]]=None, endpoint: Optional[str]=None, legacy_cache_layout: bool=False, resume_download: Optional[bool]=None, force_filename: Optional[str]=None, local_dir_use_symlinks: Union[bool, Literal['auto']]='auto') -> str: if constants.HF_HUB_ETAG_TIMEOUT != constants.DEFAULT_ETAG_TIMEOUT: etag_timeout = constants.HF_HUB_ETAG_TIMEOUT if force_filename is not None: warnings.warn('The `force_filename` parameter is deprecated as a new caching system, which keeps the filenames as they are on the Hub, is now in place.', FutureWarning) legacy_cache_layout = True if resume_download is not None: warnings.warn('`resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.', FutureWarning) if legacy_cache_layout: url = hf_hub_url(repo_id, filename, subfolder=subfolder, repo_type=repo_type, revision=revision, endpoint=endpoint) return cached_download(url, library_name=library_name, library_version=library_version, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, force_filename=force_filename, proxies=proxies, etag_timeout=etag_timeout, token=token, local_files_only=local_files_only, legacy_cache_layout=legacy_cache_layout) if cache_dir is None: cache_dir = constants.HF_HUB_CACHE if revision is None: revision = constants.DEFAULT_REVISION if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if isinstance(local_dir, Path): local_dir = str(local_dir) if subfolder == '': subfolder = None if subfolder is not None: filename = f'{subfolder}/{filename}' if repo_type is None: repo_type = 'model' if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type: {repo_type}. Accepted repo types are: {str(constants.REPO_TYPES)}') headers = build_hf_headers(token=token, library_name=library_name, library_version=library_version, user_agent=user_agent, headers=headers) if local_dir is not None: if local_dir_use_symlinks != 'auto': warnings.warn('`local_dir_use_symlinks` parameter is deprecated and will be ignored. The process to download files to a local folder has been updated and do not rely on symlinks anymore. You only need to pass a destination folder as`local_dir`.\nFor more details, check out https://huggingface.co/docs/huggingface_hub/main/en/guides/download#download-files-to-local-folder.') return _hf_hub_download_to_local_dir(local_dir=local_dir, repo_id=repo_id, repo_type=repo_type, filename=filename, revision=revision, endpoint=endpoint, etag_timeout=etag_timeout, headers=headers, proxies=proxies, token=token, cache_dir=cache_dir, force_download=force_download, local_files_only=local_files_only) else: return _hf_hub_download_to_cache_dir(cache_dir=cache_dir, repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision, endpoint=endpoint, etag_timeout=etag_timeout, headers=headers, proxies=proxies, token=token, local_files_only=local_files_only, force_download=force_download) def _hf_hub_download_to_cache_dir(*, cache_dir: str, repo_id: str, filename: str, repo_type: str, revision: str, endpoint: Optional[str], etag_timeout: float, headers: Dict[str, str], proxies: Optional[Dict], token: Optional[Union[bool, str]], local_files_only: bool, force_download: bool) -> str: locks_dir = os.path.join(cache_dir, '.locks') storage_folder = os.path.join(cache_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type)) relative_filename = os.path.join(*filename.split('/')) if os.name == 'nt': if relative_filename.startswith('..\\') or '\\..\\' in relative_filename: raise ValueError(f"Invalid filename: cannot handle filename '{relative_filename}' on Windows. Please ask the repository owner to rename this file.") if REGEX_COMMIT_HASH.match(revision): pointer_path = _get_pointer_path(storage_folder, revision, relative_filename) if os.path.exists(pointer_path) and (not force_download): return pointer_path (url_to_download, etag, commit_hash, expected_size, head_call_error) = _get_metadata_or_catch_error(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision, endpoint=endpoint, proxies=proxies, etag_timeout=etag_timeout, headers=headers, token=token, local_files_only=local_files_only, storage_folder=storage_folder, relative_filename=relative_filename) if head_call_error is not None: if not force_download: commit_hash = None if REGEX_COMMIT_HASH.match(revision): commit_hash = revision else: ref_path = os.path.join(storage_folder, 'refs', revision) if os.path.isfile(ref_path): with open(ref_path) as f: commit_hash = f.read() if commit_hash is not None: pointer_path = _get_pointer_path(storage_folder, commit_hash, relative_filename) if os.path.exists(pointer_path) and (not force_download): return pointer_path _raise_on_head_call_error(head_call_error, force_download, local_files_only) assert etag is not None, 'etag must have been retrieved from server' assert commit_hash is not None, 'commit_hash must have been retrieved from server' assert url_to_download is not None, 'file location must have been retrieved from server' assert expected_size is not None, 'expected_size must have been retrieved from server' blob_path = os.path.join(storage_folder, 'blobs', etag) pointer_path = _get_pointer_path(storage_folder, commit_hash, relative_filename) os.makedirs(os.path.dirname(blob_path), exist_ok=True) os.makedirs(os.path.dirname(pointer_path), exist_ok=True) _cache_commit_hash_for_specific_revision(storage_folder, revision, commit_hash) if not force_download: if os.path.exists(pointer_path): return pointer_path if os.path.exists(blob_path): _create_symlink(blob_path, pointer_path, new_blob=False) return pointer_path lock_path = os.path.join(locks_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type), f'{etag}.lock') if os.name == 'nt' and len(os.path.abspath(lock_path)) > 255: lock_path = '\\\\?\\' + os.path.abspath(lock_path) if os.name == 'nt' and len(os.path.abspath(blob_path)) > 255: blob_path = '\\\\?\\' + os.path.abspath(blob_path) Path(lock_path).parent.mkdir(parents=True, exist_ok=True) with WeakFileLock(lock_path): _download_to_tmp_and_move(incomplete_path=Path(blob_path + '.incomplete'), destination_path=Path(blob_path), url_to_download=url_to_download, proxies=proxies, headers=headers, expected_size=expected_size, filename=filename, force_download=force_download) if not os.path.exists(pointer_path): _create_symlink(blob_path, pointer_path, new_blob=True) return pointer_path def _hf_hub_download_to_local_dir(*, local_dir: Union[str, Path], repo_id: str, repo_type: str, filename: str, revision: str, endpoint: Optional[str], etag_timeout: float, headers: Dict[str, str], proxies: Optional[Dict], token: Union[bool, str, None], cache_dir: str, force_download: bool, local_files_only: bool) -> str: if os.name == 'nt' and len(os.path.abspath(local_dir)) > 255: local_dir = '\\\\?\\' + os.path.abspath(local_dir) local_dir = Path(local_dir) paths = get_local_download_paths(local_dir=local_dir, filename=filename) local_metadata = read_download_metadata(local_dir=local_dir, filename=filename) if not force_download and REGEX_COMMIT_HASH.match(revision) and paths.file_path.is_file() and (local_metadata is not None) and (local_metadata.commit_hash == revision): return str(paths.file_path) (url_to_download, etag, commit_hash, expected_size, head_call_error) = _get_metadata_or_catch_error(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision, endpoint=endpoint, proxies=proxies, etag_timeout=etag_timeout, headers=headers, token=token, local_files_only=local_files_only) if head_call_error is not None: if not force_download and paths.file_path.is_file(): logger.warning(f"Couldn't access the Hub to check for update but local file already exists. Defaulting to existing file. (error: {head_call_error})") return str(paths.file_path) _raise_on_head_call_error(head_call_error, force_download, local_files_only) assert etag is not None, 'etag must have been retrieved from server' assert commit_hash is not None, 'commit_hash must have been retrieved from server' assert url_to_download is not None, 'file location must have been retrieved from server' assert expected_size is not None, 'expected_size must have been retrieved from server' if not force_download and paths.file_path.is_file(): if local_metadata is not None and local_metadata.etag == etag: write_download_metadata(local_dir=local_dir, filename=filename, commit_hash=commit_hash, etag=etag) return str(paths.file_path) if local_metadata is None and REGEX_SHA256.match(etag) is not None: with open(paths.file_path, 'rb') as f: file_hash = sha_fileobj(f).hex() if file_hash == etag: write_download_metadata(local_dir=local_dir, filename=filename, commit_hash=commit_hash, etag=etag) return str(paths.file_path) if not force_download: cached_path = try_to_load_from_cache(repo_id=repo_id, filename=filename, cache_dir=cache_dir, revision=commit_hash, repo_type=repo_type) if isinstance(cached_path, str): with WeakFileLock(paths.lock_path): paths.file_path.parent.mkdir(parents=True, exist_ok=True) shutil.copyfile(cached_path, paths.file_path) write_download_metadata(local_dir=local_dir, filename=filename, commit_hash=commit_hash, etag=etag) return str(paths.file_path) with WeakFileLock(paths.lock_path): paths.file_path.unlink(missing_ok=True) _download_to_tmp_and_move(incomplete_path=paths.incomplete_path(etag), destination_path=paths.file_path, url_to_download=url_to_download, proxies=proxies, headers=headers, expected_size=expected_size, filename=filename, force_download=force_download) write_download_metadata(local_dir=local_dir, filename=filename, commit_hash=commit_hash, etag=etag) return str(paths.file_path) @validate_hf_hub_args def try_to_load_from_cache(repo_id: str, filename: str, cache_dir: Union[str, Path, None]=None, revision: Optional[str]=None, repo_type: Optional[str]=None) -> Union[str, _CACHED_NO_EXIST_T, None]: if revision is None: revision = 'main' if repo_type is None: repo_type = 'model' if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type: {repo_type}. Accepted repo types are: {str(constants.REPO_TYPES)}') if cache_dir is None: cache_dir = constants.HF_HUB_CACHE object_id = repo_id.replace('/', '--') repo_cache = os.path.join(cache_dir, f'{repo_type}s--{object_id}') if not os.path.isdir(repo_cache): return None refs_dir = os.path.join(repo_cache, 'refs') snapshots_dir = os.path.join(repo_cache, 'snapshots') no_exist_dir = os.path.join(repo_cache, '.no_exist') if os.path.isdir(refs_dir): revision_file = os.path.join(refs_dir, revision) if os.path.isfile(revision_file): with open(revision_file) as f: revision = f.read() if os.path.isfile(os.path.join(no_exist_dir, revision, filename)): return _CACHED_NO_EXIST if not os.path.exists(snapshots_dir): return None cached_shas = os.listdir(snapshots_dir) if revision not in cached_shas: return None cached_file = os.path.join(snapshots_dir, revision, filename) return cached_file if os.path.isfile(cached_file) else None @validate_hf_hub_args def get_hf_file_metadata(url: str, token: Union[bool, str, None]=None, proxies: Optional[Dict]=None, timeout: Optional[float]=constants.DEFAULT_REQUEST_TIMEOUT, library_name: Optional[str]=None, library_version: Optional[str]=None, user_agent: Union[Dict, str, None]=None, headers: Optional[Dict[str, str]]=None) -> HfFileMetadata: headers = build_hf_headers(token=token, library_name=library_name, library_version=library_version, user_agent=user_agent, headers=headers) headers['Accept-Encoding'] = 'identity' r = _request_wrapper(method='HEAD', url=url, headers=headers, allow_redirects=False, follow_relative_redirects=True, proxies=proxies, timeout=timeout) hf_raise_for_status(r) return HfFileMetadata(commit_hash=r.headers.get(constants.HUGGINGFACE_HEADER_X_REPO_COMMIT), etag=_normalize_etag(r.headers.get(constants.HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get('ETag')), location=r.headers.get('Location') or r.request.url, size=_int_or_none(r.headers.get(constants.HUGGINGFACE_HEADER_X_LINKED_SIZE) or r.headers.get('Content-Length'))) def _get_metadata_or_catch_error(*, repo_id: str, filename: str, repo_type: str, revision: str, endpoint: Optional[str], proxies: Optional[Dict], etag_timeout: Optional[float], headers: Dict[str, str], token: Union[bool, str, None], local_files_only: bool, relative_filename: Optional[str]=None, storage_folder: Optional[str]=None) -> Union[Tuple[None, None, None, None, Exception], Tuple[str, str, str, int, None]]: if local_files_only: return (None, None, None, None, OfflineModeIsEnabled(f"Cannot access file since 'local_files_only=True' as been set. (repo_id: {repo_id}, repo_type: {repo_type}, revision: {revision}, filename: {filename})")) url = hf_hub_url(repo_id, filename, repo_type=repo_type, revision=revision, endpoint=endpoint) url_to_download: str = url etag: Optional[str] = None commit_hash: Optional[str] = None expected_size: Optional[int] = None head_error_call: Optional[Exception] = None if not local_files_only: try: try: metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers, token=token) except EntryNotFoundError as http_error: if storage_folder is not None and relative_filename is not None: commit_hash = http_error.response.headers.get(constants.HUGGINGFACE_HEADER_X_REPO_COMMIT) if commit_hash is not None: no_exist_file_path = Path(storage_folder) / '.no_exist' / commit_hash / relative_filename no_exist_file_path.parent.mkdir(parents=True, exist_ok=True) try: no_exist_file_path.touch() except OSError as e: logger.error(f'Could not cache non-existence of file. Will ignore error and continue. Error: {e}') _cache_commit_hash_for_specific_revision(storage_folder, revision, commit_hash) raise commit_hash = metadata.commit_hash if commit_hash is None: raise FileMetadataError('Distant resource does not seem to be on huggingface.co. It is possible that a configuration issue prevents you from downloading resources from https://huggingface.co. Please check your firewall and proxy settings and make sure your SSL certificates are updated.') etag = metadata.etag if etag is None: raise FileMetadataError("Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility.") expected_size = metadata.size if expected_size is None: raise FileMetadataError('Distant resource does not have a Content-Length.') if url != metadata.location: url_to_download = metadata.location if urlparse(url).netloc != urlparse(metadata.location).netloc: headers.pop('authorization', None) except (requests.exceptions.SSLError, requests.exceptions.ProxyError): raise except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, OfflineModeIsEnabled) as error: head_error_call = error except (RevisionNotFoundError, EntryNotFoundError): raise except requests.HTTPError as error: head_error_call = error except FileMetadataError as error: head_error_call = error if not (local_files_only or etag is not None or head_error_call is not None): raise RuntimeError('etag is empty due to uncovered problems') return (url_to_download, etag, commit_hash, expected_size, head_error_call) def _raise_on_head_call_error(head_call_error: Exception, force_download: bool, local_files_only: bool) -> NoReturn: if force_download: if local_files_only: raise ValueError("Cannot pass 'force_download=True' and 'local_files_only=True' at the same time.") elif isinstance(head_call_error, OfflineModeIsEnabled): raise ValueError("Cannot pass 'force_download=True' when offline mode is enabled.") from head_call_error else: raise ValueError('Force download failed due to the above error.') from head_call_error if local_files_only: raise LocalEntryNotFoundError("Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False.") elif isinstance(head_call_error, RepositoryNotFoundError) or isinstance(head_call_error, GatedRepoError): raise head_call_error else: raise LocalEntryNotFoundError('An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on.') from head_call_error def _download_to_tmp_and_move(incomplete_path: Path, destination_path: Path, url_to_download: str, proxies: Optional[Dict], headers: Dict[str, str], expected_size: Optional[int], filename: str, force_download: bool) -> None: if destination_path.exists() and (not force_download): return if incomplete_path.exists() and (force_download or (constants.HF_HUB_ENABLE_HF_TRANSFER and (not proxies))): message = f"Removing incomplete file '{incomplete_path}'" if force_download: message += ' (force_download=True)' elif constants.HF_HUB_ENABLE_HF_TRANSFER and (not proxies): message += ' (hf_transfer=True)' logger.info(message) incomplete_path.unlink(missing_ok=True) with incomplete_path.open('ab') as f: resume_size = f.tell() message = f"Downloading '{filename}' to '{incomplete_path}'" if resume_size > 0 and expected_size is not None: message += f' (resume from {resume_size}/{expected_size})' logger.info(message) if expected_size is not None: _check_disk_space(expected_size, incomplete_path.parent) _check_disk_space(expected_size, destination_path.parent) http_get(url_to_download, f, proxies=proxies, resume_size=resume_size, headers=headers, expected_size=expected_size) logger.info(f'Download complete. Moving file to {destination_path}') _chmod_and_move(incomplete_path, destination_path) def _int_or_none(value: Optional[str]) -> Optional[int]: try: return int(value) except (TypeError, ValueError): return None def _chmod_and_move(src: Path, dst: Path) -> None: tmp_file = dst.parent.parent / f'tmp_{uuid.uuid4()}' try: tmp_file.touch() cache_dir_mode = Path(tmp_file).stat().st_mode os.chmod(str(src), stat.S_IMODE(cache_dir_mode)) except OSError as e: logger.warning(f"Could not set the permissions on the file '{src}'. Error: {e}.\nContinuing without setting permissions.") finally: try: tmp_file.unlink() except OSError: pass shutil.move(str(src), str(dst), copy_function=_copy_no_matter_what) def _copy_no_matter_what(src: str, dst: str) -> None: try: shutil.copy2(src, dst) except OSError: shutil.copyfile(src, dst) def _get_pointer_path(storage_folder: str, revision: str, relative_filename: str) -> str: snapshot_path = os.path.join(storage_folder, 'snapshots') pointer_path = os.path.join(snapshot_path, revision, relative_filename) if Path(os.path.abspath(snapshot_path)) not in Path(os.path.abspath(pointer_path)).parents: raise ValueError(f"Invalid pointer path: cannot create pointer path in snapshot folder if `storage_folder='{storage_folder}'`, `revision='{revision}'` and `relative_filename='{relative_filename}'`.") return pointer_path # File: huggingface_hub-main/src/huggingface_hub/hf_api.py from __future__ import annotations import inspect import json import re import struct import warnings from collections import defaultdict from concurrent.futures import Future, ThreadPoolExecutor from dataclasses import asdict, dataclass, field from datetime import datetime from functools import wraps from itertools import islice from pathlib import Path from typing import Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Literal, Optional, Tuple, TypeVar, Union, overload from urllib.parse import quote import requests from requests.exceptions import HTTPError from tqdm.auto import tqdm as base_tqdm from tqdm.contrib.concurrent import thread_map from . import constants from ._commit_api import CommitOperation, CommitOperationAdd, CommitOperationCopy, CommitOperationDelete, _fetch_files_to_copy, _fetch_upload_modes, _prepare_commit_payload, _upload_lfs_files, _warn_on_overwriting_operations from ._inference_endpoints import InferenceEndpoint, InferenceEndpointType from ._multi_commits import MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_BAD_REQUEST_TEMPLATE, MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_NO_CHANGES_TEMPLATE, MULTI_COMMIT_PR_CLOSING_COMMENT_TEMPLATE, MULTI_COMMIT_PR_COMPLETION_COMMENT_TEMPLATE, MultiCommitException, MultiCommitStep, MultiCommitStrategy, multi_commit_create_pull_request, multi_commit_generate_comment, multi_commit_parse_pr_description, plan_multi_commits from ._space_api import SpaceHardware, SpaceRuntime, SpaceStorage, SpaceVariable from ._upload_large_folder import upload_large_folder_internal from .community import Discussion, DiscussionComment, DiscussionStatusChange, DiscussionTitleChange, DiscussionWithDetails, deserialize_event from .constants import DEFAULT_ETAG_TIMEOUT, DEFAULT_REQUEST_TIMEOUT, DEFAULT_REVISION, DISCUSSION_STATUS, DISCUSSION_TYPES, ENDPOINT, INFERENCE_ENDPOINTS_ENDPOINT, REGEX_COMMIT_OID, REPO_TYPE_MODEL, REPO_TYPES, REPO_TYPES_MAPPING, REPO_TYPES_URL_PREFIXES, SAFETENSORS_INDEX_FILE, SAFETENSORS_MAX_HEADER_LENGTH, SAFETENSORS_SINGLE_FILE, SPACES_SDK_TYPES, WEBHOOK_DOMAIN_T, DiscussionStatusFilter, DiscussionTypeFilter from .errors import BadRequestError, EntryNotFoundError, GatedRepoError, HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError from .file_download import HfFileMetadata, get_hf_file_metadata, hf_hub_url from .repocard_data import DatasetCardData, ModelCardData, SpaceCardData from .utils import DEFAULT_IGNORE_PATTERNS, HfFolder, LocalTokenNotFoundError, NotASafetensorsRepoError, SafetensorsFileMetadata, SafetensorsParsingError, SafetensorsRepoMetadata, TensorInfo, build_hf_headers, experimental, filter_repo_objects, fix_hf_endpoint_in_url, get_session, hf_raise_for_status, logging, paginate, parse_datetime, validate_hf_hub_args from .utils import tqdm as hf_tqdm from .utils._typing import CallableT from .utils.endpoint_helpers import _is_emission_within_threshold R = TypeVar('R') CollectionItemType_T = Literal['model', 'dataset', 'space', 'paper'] ExpandModelProperty_T = Literal['author', 'baseModels', 'cardData', 'childrenModelCount', 'config', 'createdAt', 'disabled', 'downloads', 'downloadsAllTime', 'gated', 'inference', 'lastModified', 'library_name', 'likes', 'mask_token', 'model-index', 'pipeline_tag', 'private', 'safetensors', 'sha', 'siblings', 'spaces', 'tags', 'transformersInfo', 'trendingScore', 'widgetData'] ExpandDatasetProperty_T = Literal['author', 'cardData', 'citation', 'createdAt', 'disabled', 'description', 'downloads', 'downloadsAllTime', 'gated', 'lastModified', 'likes', 'paperswithcode_id', 'private', 'siblings', 'sha', 'trendingScore', 'tags'] ExpandSpaceProperty_T = Literal['author', 'cardData', 'createdAt', 'datasets', 'disabled', 'lastModified', 'likes', 'models', 'private', 'runtime', 'sdk', 'siblings', 'sha', 'subdomain', 'tags', 'trendingScore'] USERNAME_PLACEHOLDER = 'hf_user' _REGEX_DISCUSSION_URL = re.compile('.*/discussions/(\\d+)$') _CREATE_COMMIT_NO_REPO_ERROR_MESSAGE = "\nNote: Creating a commit assumes that the repo already exists on the Huggingface Hub. Please use `create_repo` if it's not the case." _AUTH_CHECK_NO_REPO_ERROR_MESSAGE = '\nNote: The repository either does not exist or you do not have access rights. Please check the repository ID and your access permissions. If this is a private repository, ensure that your token is correct.' logger = logging.get_logger(__name__) def repo_type_and_id_from_hf_id(hf_id: str, hub_url: Optional[str]=None) -> Tuple[Optional[str], Optional[str], str]: input_hf_id = hf_id hub_url = re.sub('https?://', '', hub_url if hub_url is not None else constants.ENDPOINT) is_hf_url = hub_url in hf_id and '@' not in hf_id HFFS_PREFIX = 'hf://' if hf_id.startswith(HFFS_PREFIX): hf_id = hf_id[len(HFFS_PREFIX):] url_segments = hf_id.split('/') is_hf_id = len(url_segments) <= 3 namespace: Optional[str] if is_hf_url: (namespace, repo_id) = url_segments[-2:] if namespace == hub_url: namespace = None if len(url_segments) > 2 and hub_url not in url_segments[-3]: repo_type = url_segments[-3] elif namespace in constants.REPO_TYPES_MAPPING: repo_type = constants.REPO_TYPES_MAPPING[namespace] namespace = None else: repo_type = None elif is_hf_id: if len(url_segments) == 3: (repo_type, namespace, repo_id) = url_segments[-3:] elif len(url_segments) == 2: if url_segments[0] in constants.REPO_TYPES_MAPPING: repo_type = constants.REPO_TYPES_MAPPING[url_segments[0]] namespace = None repo_id = hf_id.split('/')[-1] else: (namespace, repo_id) = hf_id.split('/')[-2:] repo_type = None else: repo_id = url_segments[0] (namespace, repo_type) = (None, None) else: raise ValueError(f'Unable to retrieve user and repo ID from the passed HF ID: {hf_id}') if repo_type in constants.REPO_TYPES_MAPPING: repo_type = constants.REPO_TYPES_MAPPING[repo_type] if repo_type == '': repo_type = None if repo_type not in constants.REPO_TYPES: raise ValueError(f"Unknown `repo_type`: '{repo_type}' ('{input_hf_id}')") return (repo_type, namespace, repo_id) @dataclass class LastCommitInfo(dict): oid: str title: str date: datetime def __post_init__(self): self.update(asdict(self)) @dataclass class BlobLfsInfo(dict): size: int sha256: str pointer_size: int def __post_init__(self): self.update(asdict(self)) @dataclass class BlobSecurityInfo(dict): safe: bool av_scan: Optional[Dict] pickle_import_scan: Optional[Dict] def __post_init__(self): self.update(asdict(self)) @dataclass class TransformersInfo(dict): auto_model: str custom_class: Optional[str] = None pipeline_tag: Optional[str] = None processor: Optional[str] = None def __post_init__(self): self.update(asdict(self)) @dataclass class SafeTensorsInfo(dict): parameters: List[Dict[str, int]] total: int def __post_init__(self): self.update(asdict(self)) @dataclass class CommitInfo(str): commit_url: str commit_message: str commit_description: str oid: str pr_url: Optional[str] = None repo_url: RepoUrl = field(init=False) pr_revision: Optional[str] = field(init=False) pr_num: Optional[str] = field(init=False) _url: str = field(repr=False, default=None) def __new__(cls, *args, commit_url: str, _url: Optional[str]=None, **kwargs): return str.__new__(cls, _url or commit_url) def __post_init__(self): self.repo_url = RepoUrl(self.commit_url.split('/commit/')[0]) if self.pr_url is not None: self.pr_revision = _parse_revision_from_pr_url(self.pr_url) self.pr_num = int(self.pr_revision.split('/')[-1]) else: self.pr_revision = None self.pr_num = None @dataclass class AccessRequest: username: str fullname: str email: Optional[str] timestamp: datetime status: Literal['pending', 'accepted', 'rejected'] fields: Optional[Dict[str, Any]] = None @dataclass class WebhookWatchedItem: type: Literal['dataset', 'model', 'org', 'space', 'user'] name: str @dataclass class WebhookInfo: id: str url: str watched: List[WebhookWatchedItem] domains: List[constants.WEBHOOK_DOMAIN_T] secret: Optional[str] disabled: bool class RepoUrl(str): def __new__(cls, url: Any, endpoint: Optional[str]=None): url = fix_hf_endpoint_in_url(url, endpoint=endpoint) return super(RepoUrl, cls).__new__(cls, url) def __init__(self, url: Any, endpoint: Optional[str]=None) -> None: super().__init__() self.endpoint = endpoint or constants.ENDPOINT (repo_type, namespace, repo_name) = repo_type_and_id_from_hf_id(self, hub_url=self.endpoint) self.namespace = namespace self.repo_name = repo_name self.repo_id = repo_name if namespace is None else f'{namespace}/{repo_name}' self.repo_type = repo_type or constants.REPO_TYPE_MODEL self.url = str(self) def __repr__(self) -> str: return f"RepoUrl('{self}', endpoint='{self.endpoint}', repo_type='{self.repo_type}', repo_id='{self.repo_id}')" @dataclass class RepoSibling: rfilename: str size: Optional[int] = None blob_id: Optional[str] = None lfs: Optional[BlobLfsInfo] = None @dataclass class RepoFile: path: str size: int blob_id: str lfs: Optional[BlobLfsInfo] = None last_commit: Optional[LastCommitInfo] = None security: Optional[BlobSecurityInfo] = None def __init__(self, **kwargs): self.path = kwargs.pop('path') self.size = kwargs.pop('size') self.blob_id = kwargs.pop('oid') lfs = kwargs.pop('lfs', None) if lfs is not None: lfs = BlobLfsInfo(size=lfs['size'], sha256=lfs['oid'], pointer_size=lfs['pointerSize']) self.lfs = lfs last_commit = kwargs.pop('lastCommit', None) or kwargs.pop('last_commit', None) if last_commit is not None: last_commit = LastCommitInfo(oid=last_commit['id'], title=last_commit['title'], date=parse_datetime(last_commit['date'])) self.last_commit = last_commit security = kwargs.pop('security', None) if security is not None: security = BlobSecurityInfo(safe=security['safe'], av_scan=security['avScan'], pickle_import_scan=security['pickleImportScan']) self.security = security self.rfilename = self.path self.lastCommit = self.last_commit @dataclass class RepoFolder: path: str tree_id: str last_commit: Optional[LastCommitInfo] = None def __init__(self, **kwargs): self.path = kwargs.pop('path') self.tree_id = kwargs.pop('oid') last_commit = kwargs.pop('lastCommit', None) or kwargs.pop('last_commit', None) if last_commit is not None: last_commit = LastCommitInfo(oid=last_commit['id'], title=last_commit['title'], date=parse_datetime(last_commit['date'])) self.last_commit = last_commit @dataclass class ModelInfo: id: str author: Optional[str] sha: Optional[str] created_at: Optional[datetime] last_modified: Optional[datetime] private: Optional[bool] disabled: Optional[bool] downloads: Optional[int] downloads_all_time: Optional[int] gated: Optional[Literal['auto', 'manual', False]] inference: Optional[Literal['warm', 'cold', 'frozen']] likes: Optional[int] library_name: Optional[str] tags: Optional[List[str]] pipeline_tag: Optional[str] mask_token: Optional[str] card_data: Optional[ModelCardData] widget_data: Optional[Any] model_index: Optional[Dict] config: Optional[Dict] transformers_info: Optional[TransformersInfo] trending_score: Optional[int] siblings: Optional[List[RepoSibling]] spaces: Optional[List[str]] safetensors: Optional[SafeTensorsInfo] def __init__(self, **kwargs): self.id = kwargs.pop('id') self.author = kwargs.pop('author', None) self.sha = kwargs.pop('sha', None) last_modified = kwargs.pop('lastModified', None) or kwargs.pop('last_modified', None) self.last_modified = parse_datetime(last_modified) if last_modified else None created_at = kwargs.pop('createdAt', None) or kwargs.pop('created_at', None) self.created_at = parse_datetime(created_at) if created_at else None self.private = kwargs.pop('private', None) self.gated = kwargs.pop('gated', None) self.disabled = kwargs.pop('disabled', None) self.downloads = kwargs.pop('downloads', None) self.downloads_all_time = kwargs.pop('downloadsAllTime', None) self.likes = kwargs.pop('likes', None) self.library_name = kwargs.pop('library_name', None) self.inference = kwargs.pop('inference', None) self.tags = kwargs.pop('tags', None) self.pipeline_tag = kwargs.pop('pipeline_tag', None) self.mask_token = kwargs.pop('mask_token', None) self.trending_score = kwargs.pop('trendingScore', None) card_data = kwargs.pop('cardData', None) or kwargs.pop('card_data', None) self.card_data = ModelCardData(**card_data, ignore_metadata_errors=True) if isinstance(card_data, dict) else card_data self.widget_data = kwargs.pop('widgetData', None) self.model_index = kwargs.pop('model-index', None) or kwargs.pop('model_index', None) self.config = kwargs.pop('config', None) transformers_info = kwargs.pop('transformersInfo', None) or kwargs.pop('transformers_info', None) self.transformers_info = TransformersInfo(**transformers_info) if transformers_info else None siblings = kwargs.pop('siblings', None) self.siblings = [RepoSibling(rfilename=sibling['rfilename'], size=sibling.get('size'), blob_id=sibling.get('blobId'), lfs=BlobLfsInfo(size=sibling['lfs']['size'], sha256=sibling['lfs']['sha256'], pointer_size=sibling['lfs']['pointerSize']) if sibling.get('lfs') else None) for sibling in siblings] if siblings is not None else None self.spaces = kwargs.pop('spaces', None) safetensors = kwargs.pop('safetensors', None) self.safetensors = SafeTensorsInfo(parameters=safetensors['parameters'], total=safetensors['total']) if safetensors else None self.lastModified = self.last_modified self.cardData = self.card_data self.transformersInfo = self.transformers_info self.__dict__.update(**kwargs) @dataclass class DatasetInfo: id: str author: Optional[str] sha: Optional[str] created_at: Optional[datetime] last_modified: Optional[datetime] private: Optional[bool] gated: Optional[Literal['auto', 'manual', False]] disabled: Optional[bool] downloads: Optional[int] downloads_all_time: Optional[int] likes: Optional[int] paperswithcode_id: Optional[str] tags: Optional[List[str]] trending_score: Optional[int] card_data: Optional[DatasetCardData] siblings: Optional[List[RepoSibling]] def __init__(self, **kwargs): self.id = kwargs.pop('id') self.author = kwargs.pop('author', None) self.sha = kwargs.pop('sha', None) created_at = kwargs.pop('createdAt', None) or kwargs.pop('created_at', None) self.created_at = parse_datetime(created_at) if created_at else None last_modified = kwargs.pop('lastModified', None) or kwargs.pop('last_modified', None) self.last_modified = parse_datetime(last_modified) if last_modified else None self.private = kwargs.pop('private', None) self.gated = kwargs.pop('gated', None) self.disabled = kwargs.pop('disabled', None) self.downloads = kwargs.pop('downloads', None) self.downloads_all_time = kwargs.pop('downloadsAllTime', None) self.likes = kwargs.pop('likes', None) self.paperswithcode_id = kwargs.pop('paperswithcode_id', None) self.tags = kwargs.pop('tags', None) self.trending_score = kwargs.pop('trendingScore', None) card_data = kwargs.pop('cardData', None) or kwargs.pop('card_data', None) self.card_data = DatasetCardData(**card_data, ignore_metadata_errors=True) if isinstance(card_data, dict) else card_data siblings = kwargs.pop('siblings', None) self.siblings = [RepoSibling(rfilename=sibling['rfilename'], size=sibling.get('size'), blob_id=sibling.get('blobId'), lfs=BlobLfsInfo(size=sibling['lfs']['size'], sha256=sibling['lfs']['sha256'], pointer_size=sibling['lfs']['pointerSize']) if sibling.get('lfs') else None) for sibling in siblings] if siblings is not None else None self.lastModified = self.last_modified self.cardData = self.card_data self.__dict__.update(**kwargs) @dataclass class SpaceInfo: id: str author: Optional[str] sha: Optional[str] created_at: Optional[datetime] last_modified: Optional[datetime] private: Optional[bool] gated: Optional[Literal['auto', 'manual', False]] disabled: Optional[bool] host: Optional[str] subdomain: Optional[str] likes: Optional[int] sdk: Optional[str] tags: Optional[List[str]] siblings: Optional[List[RepoSibling]] trending_score: Optional[int] card_data: Optional[SpaceCardData] runtime: Optional[SpaceRuntime] models: Optional[List[str]] datasets: Optional[List[str]] def __init__(self, **kwargs): self.id = kwargs.pop('id') self.author = kwargs.pop('author', None) self.sha = kwargs.pop('sha', None) created_at = kwargs.pop('createdAt', None) or kwargs.pop('created_at', None) self.created_at = parse_datetime(created_at) if created_at else None last_modified = kwargs.pop('lastModified', None) or kwargs.pop('last_modified', None) self.last_modified = parse_datetime(last_modified) if last_modified else None self.private = kwargs.pop('private', None) self.gated = kwargs.pop('gated', None) self.disabled = kwargs.pop('disabled', None) self.host = kwargs.pop('host', None) self.subdomain = kwargs.pop('subdomain', None) self.likes = kwargs.pop('likes', None) self.sdk = kwargs.pop('sdk', None) self.tags = kwargs.pop('tags', None) self.trending_score = kwargs.pop('trendingScore', None) card_data = kwargs.pop('cardData', None) or kwargs.pop('card_data', None) self.card_data = SpaceCardData(**card_data, ignore_metadata_errors=True) if isinstance(card_data, dict) else card_data siblings = kwargs.pop('siblings', None) self.siblings = [RepoSibling(rfilename=sibling['rfilename'], size=sibling.get('size'), blob_id=sibling.get('blobId'), lfs=BlobLfsInfo(size=sibling['lfs']['size'], sha256=sibling['lfs']['sha256'], pointer_size=sibling['lfs']['pointerSize']) if sibling.get('lfs') else None) for sibling in siblings] if siblings is not None else None runtime = kwargs.pop('runtime', None) self.runtime = SpaceRuntime(runtime) if runtime else None self.models = kwargs.pop('models', None) self.datasets = kwargs.pop('datasets', None) self.lastModified = self.last_modified self.cardData = self.card_data self.__dict__.update(**kwargs) @dataclass class MetricInfo: id: str space_id: str description: Optional[str] def __init__(self, **kwargs): self.id = kwargs.pop('id') self.space_id = kwargs.pop('spaceId') self.description = kwargs.pop('description', None) self.spaceId = self.space_id self.__dict__.update(**kwargs) @dataclass class CollectionItem: item_object_id: str item_id: str item_type: str position: int note: Optional[str] = None def __init__(self, _id: str, id: str, type: CollectionItemType_T, position: int, note: Optional[Dict]=None, **kwargs) -> None: self.item_object_id: str = _id self.item_id: str = id self.item_type: CollectionItemType_T = type self.position: int = position self.note: str = note['text'] if note is not None else None @dataclass class Collection: slug: str title: str owner: str items: List[CollectionItem] last_updated: datetime position: int private: bool theme: str upvotes: int description: Optional[str] = None def __init__(self, **kwargs) -> None: self.slug = kwargs.pop('slug') self.title = kwargs.pop('title') self.owner = kwargs.pop('owner') self.items = [CollectionItem(**item) for item in kwargs.pop('items')] self.last_updated = parse_datetime(kwargs.pop('lastUpdated')) self.position = kwargs.pop('position') self.private = kwargs.pop('private') self.theme = kwargs.pop('theme') self.upvotes = kwargs.pop('upvotes') self.description = kwargs.pop('description', None) endpoint = kwargs.pop('endpoint', None) if endpoint is None: endpoint = constants.ENDPOINT self._url = f'{endpoint}/collections/{self.slug}' @property def url(self) -> str: return self._url @dataclass class GitRefInfo: name: str ref: str target_commit: str @dataclass class GitRefs: branches: List[GitRefInfo] converts: List[GitRefInfo] tags: List[GitRefInfo] pull_requests: Optional[List[GitRefInfo]] = None @dataclass class GitCommitInfo: commit_id: str authors: List[str] created_at: datetime title: str message: str formatted_title: Optional[str] formatted_message: Optional[str] @dataclass class UserLikes: user: str total: int datasets: List[str] models: List[str] spaces: List[str] @dataclass class Organization: avatar_url: str name: str fullname: str def __init__(self, **kwargs) -> None: self.avatar_url = kwargs.pop('avatarUrl', '') self.name = kwargs.pop('name', '') self.fullname = kwargs.pop('fullname', '') self.__dict__.update(**kwargs) @dataclass class User: username: str fullname: str avatar_url: str details: Optional[str] = None is_following: Optional[bool] = None is_pro: Optional[bool] = None num_models: Optional[int] = None num_datasets: Optional[int] = None num_spaces: Optional[int] = None num_discussions: Optional[int] = None num_papers: Optional[int] = None num_upvotes: Optional[int] = None num_likes: Optional[int] = None orgs: List[Organization] = field(default_factory=list) def __init__(self, **kwargs) -> None: self.username = kwargs.pop('user', '') self.fullname = kwargs.pop('fullname', '') self.avatar_url = kwargs.pop('avatarUrl', '') self.is_following = kwargs.pop('isFollowing', None) self.is_pro = kwargs.pop('isPro', None) self.details = kwargs.pop('details', None) self.num_models = kwargs.pop('numModels', None) self.num_datasets = kwargs.pop('numDatasets', None) self.num_spaces = kwargs.pop('numSpaces', None) self.num_discussions = kwargs.pop('numDiscussions', None) self.num_papers = kwargs.pop('numPapers', None) self.num_upvotes = kwargs.pop('numUpvotes', None) self.num_likes = kwargs.pop('numLikes', None) self.user_type = kwargs.pop('type', None) self.orgs = [Organization(**org) for org in kwargs.pop('orgs', [])] self.__dict__.update(**kwargs) def future_compatible(fn: CallableT) -> CallableT: sig = inspect.signature(fn) args_params = list(sig.parameters)[1:] @wraps(fn) def _inner(self, *args, **kwargs): if 'run_as_future' in kwargs: run_as_future = kwargs['run_as_future'] kwargs['run_as_future'] = False else: run_as_future = False for (param, value) in zip(args_params, args): if param == 'run_as_future': run_as_future = value break if run_as_future: return self.run_as_future(fn, self, *args, **kwargs) return fn(self, *args, **kwargs) _inner.is_future_compatible = True return _inner class HfApi: def __init__(self, endpoint: Optional[str]=None, token: Union[str, bool, None]=None, library_name: Optional[str]=None, library_version: Optional[str]=None, user_agent: Union[Dict, str, None]=None, headers: Optional[Dict[str, str]]=None) -> None: self.endpoint = endpoint if endpoint is not None else constants.ENDPOINT self.token = token self.library_name = library_name self.library_version = library_version self.user_agent = user_agent self.headers = headers self._thread_pool: Optional[ThreadPoolExecutor] = None def run_as_future(self, fn: Callable[..., R], *args, **kwargs) -> Future[R]: if self._thread_pool is None: self._thread_pool = ThreadPoolExecutor(max_workers=1) self._thread_pool return self._thread_pool.submit(fn, *args, **kwargs) @validate_hf_hub_args def whoami(self, token: Union[bool, str, None]=None) -> Dict: r = get_session().get(f'{self.endpoint}/api/whoami-v2', headers=self._build_hf_headers(token=token or self.token or True)) try: hf_raise_for_status(r) except HTTPError as e: raise HTTPError("Invalid user token. If you didn't pass a user token, make sure you are properly logged in by executing `huggingface-cli login`, and if you did pass a user token, double-check it's correct.", request=e.request, response=e.response) from e return r.json() def get_token_permission(self, token: Union[bool, str, None]=None) -> Literal['read', 'write', None]: try: return self.whoami(token=token)['auth']['accessToken']['role'] except (LocalTokenNotFoundError, HTTPError): return None def get_model_tags(self) -> Dict: path = f'{self.endpoint}/api/models-tags-by-type' r = get_session().get(path) hf_raise_for_status(r) return r.json() def get_dataset_tags(self) -> Dict: path = f'{self.endpoint}/api/datasets-tags-by-type' r = get_session().get(path) hf_raise_for_status(r) return r.json() @validate_hf_hub_args def list_models(self, *, filter: Union[str, Iterable[str], None]=None, author: Optional[str]=None, gated: Optional[bool]=None, inference: Optional[Literal['cold', 'frozen', 'warm']]=None, library: Optional[Union[str, List[str]]]=None, language: Optional[Union[str, List[str]]]=None, model_name: Optional[str]=None, task: Optional[Union[str, List[str]]]=None, trained_dataset: Optional[Union[str, List[str]]]=None, tags: Optional[Union[str, List[str]]]=None, search: Optional[str]=None, pipeline_tag: Optional[str]=None, emissions_thresholds: Optional[Tuple[float, float]]=None, sort: Union[Literal['last_modified'], str, None]=None, direction: Optional[Literal[-1]]=None, limit: Optional[int]=None, expand: Optional[List[ExpandModelProperty_T]]=None, full: Optional[bool]=None, cardData: bool=False, fetch_config: bool=False, token: Union[bool, str, None]=None) -> Iterable[ModelInfo]: if expand and (full or cardData or fetch_config): raise ValueError('`expand` cannot be used if `full`, `cardData` or `fetch_config` are passed.') if emissions_thresholds is not None and cardData is None: raise ValueError('`emissions_thresholds` were passed without setting `cardData=True`.') path = f'{self.endpoint}/api/models' headers = self._build_hf_headers(token=token) params: Dict[str, Any] = {} filter_list: List[str] = [] if filter: filter_list.extend([filter] if isinstance(filter, str) else filter) if library: filter_list.extend([library] if isinstance(library, str) else library) if task: filter_list.extend([task] if isinstance(task, str) else task) if trained_dataset: if isinstance(trained_dataset, str): trained_dataset = [trained_dataset] for dataset in trained_dataset: if not dataset.startswith('dataset:'): dataset = f'dataset:{dataset}' filter_list.append(dataset) if language: filter_list.extend([language] if isinstance(language, str) else language) if tags: filter_list.extend([tags] if isinstance(tags, str) else tags) if len(filter_list) > 0: params['filter'] = filter_list if author: params['author'] = author if gated is not None: params['gated'] = gated if inference is not None: params['inference'] = inference if pipeline_tag: params['pipeline_tag'] = pipeline_tag search_list = [] if model_name: search_list.append(model_name) if search: search_list.append(search) if len(search_list) > 0: params['search'] = search_list if sort is not None: params['sort'] = 'lastModified' if sort == 'last_modified' else sort if direction is not None: params['direction'] = direction if limit is not None: params['limit'] = limit if full: params['full'] = True if fetch_config: params['config'] = True if cardData: params['cardData'] = True if expand: params['expand'] = expand items = paginate(path, params=params, headers=headers) if limit is not None: items = islice(items, limit) for item in items: if 'siblings' not in item: item['siblings'] = None model_info = ModelInfo(**item) if emissions_thresholds is None or _is_emission_within_threshold(model_info, *emissions_thresholds): yield model_info @validate_hf_hub_args def list_datasets(self, *, filter: Union[str, Iterable[str], None]=None, author: Optional[str]=None, benchmark: Optional[Union[str, List[str]]]=None, dataset_name: Optional[str]=None, gated: Optional[bool]=None, language_creators: Optional[Union[str, List[str]]]=None, language: Optional[Union[str, List[str]]]=None, multilinguality: Optional[Union[str, List[str]]]=None, size_categories: Optional[Union[str, List[str]]]=None, tags: Optional[Union[str, List[str]]]=None, task_categories: Optional[Union[str, List[str]]]=None, task_ids: Optional[Union[str, List[str]]]=None, search: Optional[str]=None, sort: Optional[Union[Literal['last_modified'], str]]=None, direction: Optional[Literal[-1]]=None, limit: Optional[int]=None, expand: Optional[List[ExpandDatasetProperty_T]]=None, full: Optional[bool]=None, token: Union[bool, str, None]=None) -> Iterable[DatasetInfo]: if expand and full: raise ValueError('`expand` cannot be used if `full` is passed.') path = f'{self.endpoint}/api/datasets' headers = self._build_hf_headers(token=token) params: Dict[str, Any] = {} filter_list = [] if filter is not None: if isinstance(filter, str): filter_list.append(filter) else: filter_list.extend(filter) for (key, value) in (('benchmark', benchmark), ('language_creators', language_creators), ('language', language), ('multilinguality', multilinguality), ('size_categories', size_categories), ('task_categories', task_categories), ('task_ids', task_ids)): if value: if isinstance(value, str): value = [value] for value_item in value: if not value_item.startswith(f'{key}:'): data = f'{key}:{value_item}' filter_list.append(data) if tags is not None: filter_list.extend([tags] if isinstance(tags, str) else tags) if len(filter_list) > 0: params['filter'] = filter_list if author: params['author'] = author if gated is not None: params['gated'] = gated search_list = [] if dataset_name: search_list.append(dataset_name) if search: search_list.append(search) if len(search_list) > 0: params['search'] = search_list if sort is not None: params['sort'] = 'lastModified' if sort == 'last_modified' else sort if direction is not None: params['direction'] = direction if limit is not None: params['limit'] = limit if expand: params['expand'] = expand if full: params['full'] = True items = paginate(path, params=params, headers=headers) if limit is not None: items = islice(items, limit) for item in items: if 'siblings' not in item: item['siblings'] = None yield DatasetInfo(**item) def list_metrics(self) -> List[MetricInfo]: path = f'{self.endpoint}/api/metrics' r = get_session().get(path) hf_raise_for_status(r) d = r.json() return [MetricInfo(**x) for x in d] @validate_hf_hub_args def list_spaces(self, *, filter: Union[str, Iterable[str], None]=None, author: Optional[str]=None, search: Optional[str]=None, datasets: Union[str, Iterable[str], None]=None, models: Union[str, Iterable[str], None]=None, linked: bool=False, sort: Union[Literal['last_modified'], str, None]=None, direction: Optional[Literal[-1]]=None, limit: Optional[int]=None, expand: Optional[List[ExpandSpaceProperty_T]]=None, full: Optional[bool]=None, token: Union[bool, str, None]=None) -> Iterable[SpaceInfo]: if expand and full: raise ValueError('`expand` cannot be used if `full` is passed.') path = f'{self.endpoint}/api/spaces' headers = self._build_hf_headers(token=token) params: Dict[str, Any] = {} if filter is not None: params['filter'] = filter if author is not None: params['author'] = author if search is not None: params['search'] = search if sort is not None: params['sort'] = 'lastModified' if sort == 'last_modified' else sort if direction is not None: params['direction'] = direction if limit is not None: params['limit'] = limit if linked: params['linked'] = True if datasets is not None: params['datasets'] = datasets if models is not None: params['models'] = models if expand: params['expand'] = expand if full: params['full'] = True items = paginate(path, params=params, headers=headers) if limit is not None: items = islice(items, limit) for item in items: if 'siblings' not in item: item['siblings'] = None yield SpaceInfo(**item) @validate_hf_hub_args def like(self, repo_id: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL response = get_session().post(url=f'{self.endpoint}/api/{repo_type}s/{repo_id}/like', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) @validate_hf_hub_args def unlike(self, repo_id: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL response = get_session().delete(url=f'{self.endpoint}/api/{repo_type}s/{repo_id}/like', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) @validate_hf_hub_args def list_liked_repos(self, user: Optional[str]=None, *, token: Union[bool, str, None]=None) -> UserLikes: if user is None: me = self.whoami(token=token) if me['type'] == 'user': user = me['name'] else: raise ValueError("Cannot list liked repos. You must provide a 'user' as input or be logged in as a user.") path = f'{self.endpoint}/api/users/{user}/likes' headers = self._build_hf_headers(token=token) likes = list(paginate(path, params={}, headers=headers)) return UserLikes(user=user, total=len(likes), models=[like['repo']['name'] for like in likes if like['repo']['type'] == 'model'], datasets=[like['repo']['name'] for like in likes if like['repo']['type'] == 'dataset'], spaces=[like['repo']['name'] for like in likes if like['repo']['type'] == 'space']) @validate_hf_hub_args def list_repo_likers(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> Iterable[User]: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL path = f'{self.endpoint}/api/{repo_type}s/{repo_id}/likers' for liker in paginate(path, params={}, headers=self._build_hf_headers(token=token)): yield User(username=liker['user'], fullname=liker['fullname'], avatar_url=liker['avatarUrl']) @validate_hf_hub_args def model_info(self, repo_id: str, *, revision: Optional[str]=None, timeout: Optional[float]=None, securityStatus: Optional[bool]=None, files_metadata: bool=False, expand: Optional[List[ExpandModelProperty_T]]=None, token: Union[bool, str, None]=None) -> ModelInfo: if expand and (securityStatus or files_metadata): raise ValueError('`expand` cannot be used if `securityStatus` or `files_metadata` are set.') headers = self._build_hf_headers(token=token) path = f'{self.endpoint}/api/models/{repo_id}' if revision is None else f"{self.endpoint}/api/models/{repo_id}/revision/{quote(revision, safe='')}" params: Dict = {} if securityStatus: params['securityStatus'] = True if files_metadata: params['blobs'] = True if expand: params['expand'] = expand r = get_session().get(path, headers=headers, timeout=timeout, params=params) hf_raise_for_status(r) data = r.json() return ModelInfo(**data) @validate_hf_hub_args def dataset_info(self, repo_id: str, *, revision: Optional[str]=None, timeout: Optional[float]=None, files_metadata: bool=False, expand: Optional[List[ExpandDatasetProperty_T]]=None, token: Union[bool, str, None]=None) -> DatasetInfo: if expand and files_metadata: raise ValueError('`expand` cannot be used if `files_metadata` is set.') headers = self._build_hf_headers(token=token) path = f'{self.endpoint}/api/datasets/{repo_id}' if revision is None else f"{self.endpoint}/api/datasets/{repo_id}/revision/{quote(revision, safe='')}" params: Dict = {} if files_metadata: params['blobs'] = True if expand: params['expand'] = expand r = get_session().get(path, headers=headers, timeout=timeout, params=params) hf_raise_for_status(r) data = r.json() return DatasetInfo(**data) @validate_hf_hub_args def space_info(self, repo_id: str, *, revision: Optional[str]=None, timeout: Optional[float]=None, files_metadata: bool=False, expand: Optional[List[ExpandModelProperty_T]]=None, token: Union[bool, str, None]=None) -> SpaceInfo: if expand and files_metadata: raise ValueError('`expand` cannot be used if `files_metadata` is set.') headers = self._build_hf_headers(token=token) path = f'{self.endpoint}/api/spaces/{repo_id}' if revision is None else f"{self.endpoint}/api/spaces/{repo_id}/revision/{quote(revision, safe='')}" params: Dict = {} if files_metadata: params['blobs'] = True if expand: params['expand'] = expand r = get_session().get(path, headers=headers, timeout=timeout, params=params) hf_raise_for_status(r) data = r.json() return SpaceInfo(**data) @validate_hf_hub_args def repo_info(self, repo_id: str, *, revision: Optional[str]=None, repo_type: Optional[str]=None, timeout: Optional[float]=None, files_metadata: bool=False, expand: Optional[Union[ExpandModelProperty_T, ExpandDatasetProperty_T, ExpandSpaceProperty_T]]=None, token: Union[bool, str, None]=None) -> Union[ModelInfo, DatasetInfo, SpaceInfo]: if repo_type is None or repo_type == 'model': method = self.model_info elif repo_type == 'dataset': method = self.dataset_info elif repo_type == 'space': method = self.space_info else: raise ValueError('Unsupported repo type.') return method(repo_id, revision=revision, token=token, timeout=timeout, expand=expand, files_metadata=files_metadata) @validate_hf_hub_args def repo_exists(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> bool: try: self.repo_info(repo_id=repo_id, repo_type=repo_type, token=token) return True except GatedRepoError: return True except RepositoryNotFoundError: return False @validate_hf_hub_args def revision_exists(self, repo_id: str, revision: str, *, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> bool: try: self.repo_info(repo_id=repo_id, revision=revision, repo_type=repo_type, token=token) return True except RevisionNotFoundError: return False except RepositoryNotFoundError: return False @validate_hf_hub_args def file_exists(self, repo_id: str, filename: str, *, repo_type: Optional[str]=None, revision: Optional[str]=None, token: Union[str, bool, None]=None) -> bool: url = hf_hub_url(repo_id=repo_id, repo_type=repo_type, revision=revision, filename=filename, endpoint=self.endpoint) try: if token is None: token = self.token get_hf_file_metadata(url, token=token) return True except GatedRepoError: raise except (RepositoryNotFoundError, EntryNotFoundError, RevisionNotFoundError): return False @validate_hf_hub_args def list_repo_files(self, repo_id: str, *, revision: Optional[str]=None, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> List[str]: return [f.rfilename for f in self.list_repo_tree(repo_id=repo_id, recursive=True, revision=revision, repo_type=repo_type, token=token) if isinstance(f, RepoFile)] @validate_hf_hub_args def list_repo_tree(self, repo_id: str, path_in_repo: Optional[str]=None, *, recursive: bool=False, expand: bool=False, revision: Optional[str]=None, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> Iterable[Union[RepoFile, RepoFolder]]: repo_type = repo_type or constants.REPO_TYPE_MODEL revision = quote(revision, safe='') if revision is not None else constants.DEFAULT_REVISION headers = self._build_hf_headers(token=token) encoded_path_in_repo = '/' + quote(path_in_repo, safe='') if path_in_repo else '' tree_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/tree/{revision}{encoded_path_in_repo}' for path_info in paginate(path=tree_url, headers=headers, params={'recursive': recursive, 'expand': expand}): yield (RepoFile(**path_info) if path_info['type'] == 'file' else RepoFolder(**path_info)) @validate_hf_hub_args def list_repo_refs(self, repo_id: str, *, repo_type: Optional[str]=None, include_pull_requests: bool=False, token: Union[str, bool, None]=None) -> GitRefs: repo_type = repo_type or constants.REPO_TYPE_MODEL response = get_session().get(f'{self.endpoint}/api/{repo_type}s/{repo_id}/refs', headers=self._build_hf_headers(token=token), params={'include_prs': 1} if include_pull_requests else {}) hf_raise_for_status(response) data = response.json() def _format_as_git_ref_info(item: Dict) -> GitRefInfo: return GitRefInfo(name=item['name'], ref=item['ref'], target_commit=item['targetCommit']) return GitRefs(branches=[_format_as_git_ref_info(item) for item in data['branches']], converts=[_format_as_git_ref_info(item) for item in data['converts']], tags=[_format_as_git_ref_info(item) for item in data['tags']], pull_requests=[_format_as_git_ref_info(item) for item in data['pullRequests']] if include_pull_requests else None) @validate_hf_hub_args def list_repo_commits(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None, revision: Optional[str]=None, formatted: bool=False) -> List[GitCommitInfo]: repo_type = repo_type or constants.REPO_TYPE_MODEL revision = quote(revision, safe='') if revision is not None else constants.DEFAULT_REVISION return [GitCommitInfo(commit_id=item['id'], authors=[author['user'] for author in item['authors']], created_at=parse_datetime(item['date']), title=item['title'], message=item['message'], formatted_title=item.get('formatted', {}).get('title'), formatted_message=item.get('formatted', {}).get('message')) for item in paginate(f'{self.endpoint}/api/{repo_type}s/{repo_id}/commits/{revision}', headers=self._build_hf_headers(token=token), params={'expand[]': 'formatted'} if formatted else {})] @validate_hf_hub_args def get_paths_info(self, repo_id: str, paths: Union[List[str], str], *, expand: bool=False, revision: Optional[str]=None, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> List[Union[RepoFile, RepoFolder]]: repo_type = repo_type or constants.REPO_TYPE_MODEL revision = quote(revision, safe='') if revision is not None else constants.DEFAULT_REVISION headers = self._build_hf_headers(token=token) response = get_session().post(f'{self.endpoint}/api/{repo_type}s/{repo_id}/paths-info/{revision}', data={'paths': paths if isinstance(paths, list) else [paths], 'expand': expand}, headers=headers) hf_raise_for_status(response) paths_info = response.json() return [RepoFile(**path_info) if path_info['type'] == 'file' else RepoFolder(**path_info) for path_info in paths_info] @validate_hf_hub_args def super_squash_history(self, repo_id: str, *, branch: Optional[str]=None, commit_message: Optional[str]=None, repo_type: Optional[str]=None, token: Union[str, bool, None]=None) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL if repo_type not in constants.REPO_TYPES: raise ValueError('Invalid repo type') if branch is None: branch = constants.DEFAULT_REVISION url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/super-squash/{branch}' headers = self._build_hf_headers(token=token) commit_message = commit_message or f"Super-squash branch '{branch}' using huggingface_hub" response = get_session().post(url=url, headers=headers, json={'message': commit_message}) hf_raise_for_status(response) @validate_hf_hub_args def create_repo(self, repo_id: str, *, token: Union[str, bool, None]=None, private: bool=False, repo_type: Optional[str]=None, exist_ok: bool=False, resource_group_id: Optional[str]=None, space_sdk: Optional[str]=None, space_hardware: Optional[SpaceHardware]=None, space_storage: Optional[SpaceStorage]=None, space_sleep_time: Optional[int]=None, space_secrets: Optional[List[Dict[str, str]]]=None, space_variables: Optional[List[Dict[str, str]]]=None) -> RepoUrl: (organization, name) = repo_id.split('/') if '/' in repo_id else (None, repo_id) path = f'{self.endpoint}/api/repos/create' if repo_type not in constants.REPO_TYPES: raise ValueError('Invalid repo type') json: Dict[str, Any] = {'name': name, 'organization': organization, 'private': private} if repo_type is not None: json['type'] = repo_type if repo_type == 'space': if space_sdk is None: raise ValueError(f"No space_sdk provided. `create_repo` expects space_sdk to be one of {constants.SPACES_SDK_TYPES} when repo_type is 'space'`") if space_sdk not in constants.SPACES_SDK_TYPES: raise ValueError(f'Invalid space_sdk. Please choose one of {constants.SPACES_SDK_TYPES}.') json['sdk'] = space_sdk if space_sdk is not None and repo_type != 'space': warnings.warn("Ignoring provided space_sdk because repo_type is not 'space'.") function_args = ['space_hardware', 'space_storage', 'space_sleep_time', 'space_secrets', 'space_variables'] json_keys = ['hardware', 'storageTier', 'sleepTimeSeconds', 'secrets', 'variables'] values = [space_hardware, space_storage, space_sleep_time, space_secrets, space_variables] if repo_type == 'space': json.update({k: v for (k, v) in zip(json_keys, values) if v is not None}) else: provided_space_args = [key for (key, value) in zip(function_args, values) if value is not None] if provided_space_args: warnings.warn(f"Ignoring provided {', '.join(provided_space_args)} because repo_type is not 'space'.") if getattr(self, '_lfsmultipartthresh', None): json['lfsmultipartthresh'] = self._lfsmultipartthresh if resource_group_id is not None: json['resourceGroupId'] = resource_group_id headers = self._build_hf_headers(token=token) while True: r = get_session().post(path, headers=headers, json=json) if r.status_code == 409 and 'Cannot create repo: another conflicting operation is in progress' in r.text: logger.debug('Create repo failed due to a concurrency issue. Retrying...') continue break try: hf_raise_for_status(r) except HTTPError as err: if exist_ok and err.response.status_code == 409: pass elif exist_ok and err.response.status_code == 403: try: self.repo_info(repo_id=repo_id, repo_type=repo_type, token=token) if repo_type is None or repo_type == constants.REPO_TYPE_MODEL: return RepoUrl(f'{self.endpoint}/{repo_id}') return RepoUrl(f'{self.endpoint}/{repo_type}/{repo_id}') except HfHubHTTPError: raise err else: raise d = r.json() return RepoUrl(d['url'], endpoint=self.endpoint) @validate_hf_hub_args def delete_repo(self, repo_id: str, *, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, missing_ok: bool=False) -> None: (organization, name) = repo_id.split('/') if '/' in repo_id else (None, repo_id) path = f'{self.endpoint}/api/repos/delete' if repo_type not in constants.REPO_TYPES: raise ValueError('Invalid repo type') json = {'name': name, 'organization': organization} if repo_type is not None: json['type'] = repo_type headers = self._build_hf_headers(token=token) r = get_session().delete(path, headers=headers, json=json) try: hf_raise_for_status(r) except RepositoryNotFoundError: if not missing_ok: raise @validate_hf_hub_args def update_repo_visibility(self, repo_id: str, private: bool=False, *, token: Union[str, bool, None]=None, repo_type: Optional[str]=None) -> Dict[str, bool]: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL r = get_session().put(url=f'{self.endpoint}/api/{repo_type}s/{repo_id}/settings', headers=self._build_hf_headers(token=token), json={'private': private}) hf_raise_for_status(r) return r.json() @validate_hf_hub_args def update_repo_settings(self, repo_id: str, *, gated: Literal['auto', 'manual', False]=False, token: Union[str, bool, None]=None, repo_type: Optional[str]=None) -> None: if gated not in ['auto', 'manual', False]: raise ValueError(f"Invalid gated status, must be one of 'auto', 'manual', or False. Got '{gated}'.") if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL headers = self._build_hf_headers(token=token) r = get_session().put(url=f'{self.endpoint}/api/{repo_type}s/{repo_id}/settings', headers=headers, json={'gated': gated}) hf_raise_for_status(r) def move_repo(self, from_id: str, to_id: str, *, repo_type: Optional[str]=None, token: Union[str, bool, None]=None): if len(from_id.split('/')) != 2: raise ValueError(f'Invalid repo_id: {from_id}. It should have a namespace (:namespace:/:repo_name:)') if len(to_id.split('/')) != 2: raise ValueError(f'Invalid repo_id: {to_id}. It should have a namespace (:namespace:/:repo_name:)') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL json = {'fromRepo': from_id, 'toRepo': to_id, 'type': repo_type} path = f'{self.endpoint}/api/repos/move' headers = self._build_hf_headers(token=token) r = get_session().post(path, headers=headers, json=json) try: hf_raise_for_status(r) except HfHubHTTPError as e: e.append_to_message('\nFor additional documentation please see https://hf.co/docs/hub/repositories-settings#renaming-or-transferring-a-repo.') raise @overload def create_commit(self, repo_id: str, operations: Iterable[CommitOperation], *, commit_message: str, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, num_threads: int=5, parent_commit: Optional[str]=None, run_as_future: Literal[False]=...) -> CommitInfo: ... @overload def create_commit(self, repo_id: str, operations: Iterable[CommitOperation], *, commit_message: str, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, num_threads: int=5, parent_commit: Optional[str]=None, run_as_future: Literal[True]=...) -> Future[CommitInfo]: ... @validate_hf_hub_args @future_compatible def create_commit(self, repo_id: str, operations: Iterable[CommitOperation], *, commit_message: str, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, num_threads: int=5, parent_commit: Optional[str]=None, run_as_future: bool=False) -> Union[CommitInfo, Future[CommitInfo]]: if parent_commit is not None and (not constants.REGEX_COMMIT_OID.fullmatch(parent_commit)): raise ValueError(f'`parent_commit` is not a valid commit OID. It must match the following regex: {constants.REGEX_COMMIT_OID}') if commit_message is None or len(commit_message) == 0: raise ValueError("`commit_message` can't be empty, please pass a value.") commit_description = commit_description if commit_description is not None else '' repo_type = repo_type if repo_type is not None else constants.REPO_TYPE_MODEL if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') unquoted_revision = revision or constants.DEFAULT_REVISION revision = quote(unquoted_revision, safe='') create_pr = create_pr if create_pr is not None else False headers = self._build_hf_headers(token=token) operations = list(operations) additions = [op for op in operations if isinstance(op, CommitOperationAdd)] copies = [op for op in operations if isinstance(op, CommitOperationCopy)] nb_additions = len(additions) nb_copies = len(copies) nb_deletions = len(operations) - nb_additions - nb_copies for addition in additions: if addition._is_committed: raise ValueError(f'CommitOperationAdd {addition} has already being committed and cannot be reused. Please create a new CommitOperationAdd object if you want to create a new commit.') if repo_type != 'dataset': for addition in additions: if addition.path_in_repo.endswith(('.arrow', '.parquet')): warnings.warn(f"It seems that you are about to commit a data file ({addition.path_in_repo}) to a {repo_type} repository. You are sure this is intended? If you are trying to upload a dataset, please set `repo_type='dataset'` or `--repo-type=dataset` in a CLI.") logger.debug(f'About to commit to the hub: {len(additions)} addition(s), {len(copies)} copie(s) and {nb_deletions} deletion(s).') for addition in additions: if addition.path_in_repo == 'README.md': with addition.as_file() as file: content = file.read().decode() self._validate_yaml(content, repo_type=repo_type, token=token) break _warn_on_overwriting_operations(operations) self.preupload_lfs_files(repo_id=repo_id, additions=additions, token=token, repo_type=repo_type, revision=unquoted_revision, create_pr=create_pr, num_threads=num_threads, free_memory=False) operations_without_no_op = [] for operation in operations: if isinstance(operation, CommitOperationAdd) and operation._remote_oid is not None and (operation._remote_oid == operation._local_oid): logger.debug(f"Skipping upload for '{operation.path_in_repo}' as the file has not changed.") continue operations_without_no_op.append(operation) if len(operations) != len(operations_without_no_op): logger.info(f'Removing {len(operations) - len(operations_without_no_op)} file(s) from commit that have not changed.') if len(operations_without_no_op) == 0: logger.warning('No files have been modified since last commit. Skipping to prevent empty commit.') try: info = self.repo_info(repo_id=repo_id, repo_type=repo_type, revision=unquoted_revision, token=token) except RepositoryNotFoundError as e: e.append_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE) raise url_prefix = self.endpoint if repo_type is not None and repo_type != constants.REPO_TYPE_MODEL: url_prefix = f'{url_prefix}/{repo_type}s' return CommitInfo(commit_url=f'{url_prefix}/{repo_id}/commit/{info.sha}', commit_message=commit_message, commit_description=commit_description, oid=info.sha) files_to_copy = _fetch_files_to_copy(copies=copies, repo_type=repo_type, repo_id=repo_id, headers=headers, revision=unquoted_revision, endpoint=self.endpoint) commit_payload = _prepare_commit_payload(operations=operations, files_to_copy=files_to_copy, commit_message=commit_message, commit_description=commit_description, parent_commit=parent_commit) commit_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/commit/{revision}' def _payload_as_ndjson() -> Iterable[bytes]: for item in commit_payload: yield json.dumps(item).encode() yield b'\n' headers = {'Content-Type': 'application/x-ndjson', **headers} data = b''.join(_payload_as_ndjson()) params = {'create_pr': '1'} if create_pr else None try: commit_resp = get_session().post(url=commit_url, headers=headers, data=data, params=params) hf_raise_for_status(commit_resp, endpoint_name='commit') except RepositoryNotFoundError as e: e.append_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE) raise except EntryNotFoundError as e: if nb_deletions > 0 and "A file with this name doesn't exist" in str(e): e.append_to_message("\nMake sure to differentiate file and folder paths in delete operations with a trailing '/' or using `is_folder=True/False`.") raise for addition in additions: addition._is_committed = True commit_data = commit_resp.json() return CommitInfo(commit_url=commit_data['commitUrl'], commit_message=commit_message, commit_description=commit_description, oid=commit_data['commitOid'], pr_url=commit_data['pullRequestUrl'] if create_pr else None) @experimental @validate_hf_hub_args def create_commits_on_pr(self, *, repo_id: str, addition_commits: List[List[CommitOperationAdd]], deletion_commits: List[List[CommitOperationDelete]], commit_message: str, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, merge_pr: bool=True, num_threads: int=5, verbose: bool=False) -> str: logger = logging.get_logger(__name__ + '.create_commits_on_pr') if verbose: logger.setLevel('INFO') logger.info(f'Will create {len(deletion_commits)} deletion commit(s) and {len(addition_commits)} addition commit(s), totalling {sum((len(ops) for ops in addition_commits + deletion_commits))} atomic operations.') strategy = MultiCommitStrategy(addition_commits=[MultiCommitStep(operations=operations) for operations in addition_commits], deletion_commits=[MultiCommitStep(operations=operations) for operations in deletion_commits]) logger.info(f'Multi-commits strategy with ID {strategy.id}.') for discussion in self.get_repo_discussions(repo_id=repo_id, repo_type=repo_type, token=token): if discussion.is_pull_request and discussion.status == 'draft' and (strategy.id in discussion.title): pr = self.get_discussion_details(repo_id=repo_id, discussion_num=discussion.num, repo_type=repo_type, token=token) logger.info(f'PR already exists: {pr.url}. Will resume process where it stopped.') break else: pr = multi_commit_create_pull_request(self, repo_id=repo_id, commit_message=commit_message, commit_description=commit_description, strategy=strategy, token=token, repo_type=repo_type) logger.info(f'New PR created: {pr.url}') for event in pr.events: if isinstance(event, DiscussionComment): pr_comment = event break else: raise MultiCommitException(f'PR #{pr.num} must have at least 1 comment') description_commits = multi_commit_parse_pr_description(pr_comment.content) if len(description_commits) != len(strategy.all_steps): raise MultiCommitException(f'Corrupted multi-commit PR #{pr.num}: got {len(description_commits)} steps in description but {len(strategy.all_steps)} in strategy.') for step_id in strategy.all_steps: if step_id not in description_commits: raise MultiCommitException(f"Corrupted multi-commit PR #{pr.num}: expected step {step_id} but didn't find it (have {', '.join(description_commits)}).") commits_on_main_branch = {commit.commit_id for commit in self.list_repo_commits(repo_id=repo_id, repo_type=repo_type, token=token, revision=constants.DEFAULT_REVISION)} pr_commits = [commit for commit in self.list_repo_commits(repo_id=repo_id, repo_type=repo_type, token=token, revision=pr.git_reference) if commit.commit_id not in commits_on_main_branch] if len(pr_commits) > 0: logger.info(f'Found {len(pr_commits)} existing commits on the PR.') if len(pr_commits) > len(strategy.all_steps): raise MultiCommitException(f'Corrupted multi-commit PR #{pr.num}: scheduled {len(strategy.all_steps)} steps but {len(pr_commits)} commits have already been pushed to the PR.') remaining_additions = {step.id: step for step in strategy.addition_commits} remaining_deletions = {step.id: step for step in strategy.deletion_commits} for commit in pr_commits: if commit.title in remaining_additions: step = remaining_additions.pop(commit.title) step.completed = True elif commit.title in remaining_deletions: step = remaining_deletions.pop(commit.title) step.completed = True if len(remaining_deletions) > 0 and len(remaining_additions) < len(strategy.addition_commits): raise MultiCommitException(f'Corrupted multi-commit PR #{pr.num}: some addition commits have already been pushed to the PR but deletion commits are not all completed yet.') nb_remaining = len(remaining_deletions) + len(remaining_additions) if len(pr_commits) > 0: logger.info(f'{nb_remaining} commits remaining ({len(remaining_deletions)} deletion commits and {len(remaining_additions)} addition commits)') for step in list(remaining_deletions.values()) + list(remaining_additions.values()): self.create_commit(repo_id=repo_id, repo_type=repo_type, token=token, commit_message=step.id, revision=pr.git_reference, num_threads=num_threads, operations=step.operations, create_pr=False) step.completed = True nb_remaining -= 1 logger.info(f' step {step.id} completed (still {nb_remaining} to go).') self.edit_discussion_comment(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, comment_id=pr_comment.id, new_content=multi_commit_generate_comment(commit_message=commit_message, commit_description=commit_description, strategy=strategy)) logger.info('All commits have been pushed.') self.rename_discussion(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, new_title=commit_message) self.change_discussion_status(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, new_status='open', comment=MULTI_COMMIT_PR_COMPLETION_COMMENT_TEMPLATE) logger.info('PR is now open for reviews.') if merge_pr: try: self.merge_pull_request(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, comment=MULTI_COMMIT_PR_CLOSING_COMMENT_TEMPLATE) logger.info('PR has been automatically merged (`merge_pr=True` was passed).') except BadRequestError as error: if error.server_message is not None and 'no associated changes' in error.server_message: self.change_discussion_status(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, comment=MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_NO_CHANGES_TEMPLATE, new_status='closed') logger.warning("Couldn't merge the PR: no associated changes.") else: self.comment_discussion(repo_id=repo_id, repo_type=repo_type, token=token, discussion_num=pr.num, comment=MULTI_COMMIT_PR_CLOSE_COMMENT_FAILURE_BAD_REQUEST_TEMPLATE.format(error_message=error.server_message)) raise MultiCommitException(f"Couldn't merge Pull Request in multi-commit: {error.server_message}") from error return pr.url def preupload_lfs_files(self, repo_id: str, additions: Iterable[CommitOperationAdd], *, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, num_threads: int=5, free_memory: bool=True, gitignore_content: Optional[str]=None): repo_type = repo_type if repo_type is not None else constants.REPO_TYPE_MODEL if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') revision = quote(revision, safe='') if revision is not None else constants.DEFAULT_REVISION create_pr = create_pr if create_pr is not None else False headers = self._build_hf_headers(token=token) additions = list(additions) if gitignore_content is None: for addition in additions: if addition.path_in_repo == '.gitignore': with addition.as_file() as f: gitignore_content = f.read().decode() break new_additions = [addition for addition in additions if not addition._is_uploaded] try: _fetch_upload_modes(additions=new_additions, repo_type=repo_type, repo_id=repo_id, headers=headers, revision=revision, endpoint=self.endpoint, create_pr=create_pr or False, gitignore_content=gitignore_content) except RepositoryNotFoundError as e: e.append_to_message(_CREATE_COMMIT_NO_REPO_ERROR_MESSAGE) raise new_lfs_additions = [addition for addition in new_additions if addition._upload_mode == 'lfs'] new_lfs_additions_to_upload = [] for addition in new_lfs_additions: if addition._should_ignore: logger.debug(f"Skipping upload for LFS file '{addition.path_in_repo}' (ignored by gitignore file).") else: new_lfs_additions_to_upload.append(addition) if len(new_lfs_additions) != len(new_lfs_additions_to_upload): logger.info(f'Skipped upload for {len(new_lfs_additions) - len(new_lfs_additions_to_upload)} LFS file(s) (ignored by gitignore file).') _upload_lfs_files(additions=new_lfs_additions_to_upload, repo_type=repo_type, repo_id=repo_id, headers=headers, endpoint=self.endpoint, num_threads=num_threads, revision=revision if not create_pr else None) for addition in new_lfs_additions_to_upload: addition._is_uploaded = True if free_memory: addition.path_or_fileobj = b'' @overload def upload_file(self, *, path_or_fileobj: Union[str, Path, bytes, BinaryIO], path_in_repo: str, repo_id: str, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, run_as_future: Literal[False]=...) -> CommitInfo: ... @overload def upload_file(self, *, path_or_fileobj: Union[str, Path, bytes, BinaryIO], path_in_repo: str, repo_id: str, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, run_as_future: Literal[True]=...) -> Future[CommitInfo]: ... @validate_hf_hub_args @future_compatible def upload_file(self, *, path_or_fileobj: Union[str, Path, bytes, BinaryIO], path_in_repo: str, repo_id: str, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, run_as_future: bool=False) -> Union[CommitInfo, Future[CommitInfo]]: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') commit_message = commit_message if commit_message is not None else f'Upload {path_in_repo} with huggingface_hub' operation = CommitOperationAdd(path_or_fileobj=path_or_fileobj, path_in_repo=path_in_repo) commit_info = self.create_commit(repo_id=repo_id, repo_type=repo_type, operations=[operation], commit_message=commit_message, commit_description=commit_description, token=token, revision=revision, create_pr=create_pr, parent_commit=parent_commit) if commit_info.pr_url is not None: revision = quote(_parse_revision_from_pr_url(commit_info.pr_url), safe='') if repo_type in constants.REPO_TYPES_URL_PREFIXES: repo_id = constants.REPO_TYPES_URL_PREFIXES[repo_type] + repo_id revision = revision if revision is not None else constants.DEFAULT_REVISION return CommitInfo(commit_url=commit_info.commit_url, commit_message=commit_info.commit_message, commit_description=commit_info.commit_description, oid=commit_info.oid, pr_url=commit_info.pr_url, _url=f'{self.endpoint}/{repo_id}/blob/{revision}/{path_in_repo}') @overload def upload_folder(self, *, repo_id: str, folder_path: Union[str, Path], path_in_repo: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, multi_commits: Literal[False]=..., multi_commits_verbose: bool=False, run_as_future: Literal[False]=...) -> CommitInfo: ... @overload def upload_folder(self, *, repo_id: str, folder_path: Union[str, Path], path_in_repo: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, multi_commits: Literal[True]=..., multi_commits_verbose: bool=False, run_as_future: Literal[False]=...) -> str: ... @overload def upload_folder(self, *, repo_id: str, folder_path: Union[str, Path], path_in_repo: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, multi_commits: Literal[False]=..., multi_commits_verbose: bool=False, run_as_future: Literal[True]=...) -> Future[CommitInfo]: ... @overload def upload_folder(self, *, repo_id: str, folder_path: Union[str, Path], path_in_repo: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, multi_commits: Literal[True]=..., multi_commits_verbose: bool=False, run_as_future: Literal[True]=...) -> Future[str]: ... @validate_hf_hub_args @future_compatible def upload_folder(self, *, repo_id: str, folder_path: Union[str, Path], path_in_repo: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, multi_commits: bool=False, multi_commits_verbose: bool=False, run_as_future: bool=False) -> Union[CommitInfo, str, Future[CommitInfo], Future[str]]: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if multi_commits: if revision is not None and revision != constants.DEFAULT_REVISION: raise ValueError('Cannot use `multi_commit` to commit changes other than the main branch.') if path_in_repo is None: path_in_repo = '' if ignore_patterns is None: ignore_patterns = [] elif isinstance(ignore_patterns, str): ignore_patterns = [ignore_patterns] ignore_patterns += DEFAULT_IGNORE_PATTERNS delete_operations = self._prepare_folder_deletions(repo_id=repo_id, repo_type=repo_type, revision=constants.DEFAULT_REVISION if create_pr else revision, token=token, path_in_repo=path_in_repo, delete_patterns=delete_patterns) add_operations = self._prepare_upload_folder_additions(folder_path, path_in_repo, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, token=token, repo_type=repo_type) if len(add_operations) > 0: added_paths = set((op.path_in_repo for op in add_operations)) delete_operations = [delete_op for delete_op in delete_operations if delete_op.path_in_repo not in added_paths] commit_operations = delete_operations + add_operations commit_message = commit_message or 'Upload folder using huggingface_hub' if multi_commits: (addition_commits, deletion_commits) = plan_multi_commits(operations=commit_operations) pr_url = self.create_commits_on_pr(repo_id=repo_id, repo_type=repo_type, addition_commits=addition_commits, deletion_commits=deletion_commits, commit_message=commit_message, commit_description=commit_description, token=token, merge_pr=not create_pr, verbose=multi_commits_verbose) return pr_url commit_info = self.create_commit(repo_type=repo_type, repo_id=repo_id, operations=commit_operations, commit_message=commit_message, commit_description=commit_description, token=token, revision=revision, create_pr=create_pr, parent_commit=parent_commit) if create_pr and commit_info.pr_url is not None: revision = quote(_parse_revision_from_pr_url(commit_info.pr_url), safe='') if repo_type in constants.REPO_TYPES_URL_PREFIXES: repo_id = constants.REPO_TYPES_URL_PREFIXES[repo_type] + repo_id revision = revision if revision is not None else constants.DEFAULT_REVISION return CommitInfo(commit_url=commit_info.commit_url, commit_message=commit_info.commit_message, commit_description=commit_info.commit_description, oid=commit_info.oid, pr_url=commit_info.pr_url, _url=f'{self.endpoint}/{repo_id}/tree/{revision}/{path_in_repo}') @validate_hf_hub_args def delete_file(self, path_in_repo: str, repo_id: str, *, token: Union[str, bool, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None) -> CommitInfo: commit_message = commit_message if commit_message is not None else f'Delete {path_in_repo} with huggingface_hub' operations = [CommitOperationDelete(path_in_repo=path_in_repo)] return self.create_commit(repo_id=repo_id, repo_type=repo_type, token=token, operations=operations, revision=revision, commit_message=commit_message, commit_description=commit_description, create_pr=create_pr, parent_commit=parent_commit) @validate_hf_hub_args def delete_files(self, repo_id: str, delete_patterns: List[str], *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None) -> CommitInfo: operations = self._prepare_folder_deletions(repo_id=repo_id, repo_type=repo_type, delete_patterns=delete_patterns, path_in_repo='', revision=revision) if commit_message is None: commit_message = f"Delete files {' '.join(delete_patterns)} with huggingface_hub" return self.create_commit(repo_id=repo_id, repo_type=repo_type, token=token, operations=operations, revision=revision, commit_message=commit_message, commit_description=commit_description, create_pr=create_pr, parent_commit=parent_commit) @validate_hf_hub_args def delete_folder(self, path_in_repo: str, repo_id: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None) -> CommitInfo: return self.create_commit(repo_id=repo_id, repo_type=repo_type, token=token, operations=[CommitOperationDelete(path_in_repo=path_in_repo, is_folder=True)], revision=revision, commit_message=commit_message if commit_message is not None else f'Delete folder {path_in_repo} with huggingface_hub', commit_description=commit_description, create_pr=create_pr, parent_commit=parent_commit) def upload_large_folder(self, repo_id: str, folder_path: Union[str, Path], *, repo_type: str, revision: Optional[str]=None, private: bool=False, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, num_workers: Optional[int]=None, print_report: bool=True, print_report_every: int=60) -> None: return upload_large_folder_internal(self, repo_id=repo_id, folder_path=folder_path, repo_type=repo_type, revision=revision, private=private, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, num_workers=num_workers, print_report=print_report, print_report_every=print_report_every) @validate_hf_hub_args def get_hf_file_metadata(self, *, url: str, token: Union[bool, str, None]=None, proxies: Optional[Dict]=None, timeout: Optional[float]=constants.DEFAULT_REQUEST_TIMEOUT) -> HfFileMetadata: if token is None: token = self.token return get_hf_file_metadata(url=url, token=token, proxies=proxies, timeout=timeout, library_name=self.library_name, library_version=self.library_version, user_agent=self.user_agent) @validate_hf_hub_args def hf_hub_download(self, repo_id: str, filename: str, *, subfolder: Optional[str]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, cache_dir: Union[str, Path, None]=None, local_dir: Union[str, Path, None]=None, force_download: bool=False, proxies: Optional[Dict]=None, etag_timeout: float=constants.DEFAULT_ETAG_TIMEOUT, token: Union[bool, str, None]=None, local_files_only: bool=False, resume_download: Optional[bool]=None, legacy_cache_layout: bool=False, force_filename: Optional[str]=None, local_dir_use_symlinks: Union[bool, Literal['auto']]='auto') -> str: from .file_download import hf_hub_download if token is None: token = self.token return hf_hub_download(repo_id=repo_id, filename=filename, subfolder=subfolder, repo_type=repo_type, revision=revision, endpoint=self.endpoint, library_name=self.library_name, library_version=self.library_version, cache_dir=cache_dir, local_dir=local_dir, local_dir_use_symlinks=local_dir_use_symlinks, user_agent=self.user_agent, force_download=force_download, force_filename=force_filename, proxies=proxies, etag_timeout=etag_timeout, resume_download=resume_download, token=token, headers=self.headers, local_files_only=local_files_only, legacy_cache_layout=legacy_cache_layout) @validate_hf_hub_args def snapshot_download(self, repo_id: str, *, repo_type: Optional[str]=None, revision: Optional[str]=None, cache_dir: Union[str, Path, None]=None, local_dir: Union[str, Path, None]=None, proxies: Optional[Dict]=None, etag_timeout: float=constants.DEFAULT_ETAG_TIMEOUT, force_download: bool=False, token: Union[bool, str, None]=None, local_files_only: bool=False, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, max_workers: int=8, tqdm_class: Optional[base_tqdm]=None, local_dir_use_symlinks: Union[bool, Literal['auto']]='auto', resume_download: Optional[bool]=None) -> str: from ._snapshot_download import snapshot_download if token is None: token = self.token return snapshot_download(repo_id=repo_id, repo_type=repo_type, revision=revision, endpoint=self.endpoint, cache_dir=cache_dir, local_dir=local_dir, local_dir_use_symlinks=local_dir_use_symlinks, library_name=self.library_name, library_version=self.library_version, user_agent=self.user_agent, proxies=proxies, etag_timeout=etag_timeout, resume_download=resume_download, force_download=force_download, token=token, local_files_only=local_files_only, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, max_workers=max_workers, tqdm_class=tqdm_class) def get_safetensors_metadata(self, repo_id: str, *, repo_type: Optional[str]=None, revision: Optional[str]=None, token: Union[bool, str, None]=None) -> SafetensorsRepoMetadata: if self.file_exists(repo_id=repo_id, filename=constants.SAFETENSORS_SINGLE_FILE, repo_type=repo_type, revision=revision, token=token): file_metadata = self.parse_safetensors_file_metadata(repo_id=repo_id, filename=constants.SAFETENSORS_SINGLE_FILE, repo_type=repo_type, revision=revision, token=token) return SafetensorsRepoMetadata(metadata=None, sharded=False, weight_map={tensor_name: constants.SAFETENSORS_SINGLE_FILE for tensor_name in file_metadata.tensors.keys()}, files_metadata={constants.SAFETENSORS_SINGLE_FILE: file_metadata}) elif self.file_exists(repo_id=repo_id, filename=constants.SAFETENSORS_INDEX_FILE, repo_type=repo_type, revision=revision, token=token): index_file = self.hf_hub_download(repo_id=repo_id, filename=constants.SAFETENSORS_INDEX_FILE, repo_type=repo_type, revision=revision, token=token) with open(index_file) as f: index = json.load(f) weight_map = index.get('weight_map', {}) files_metadata = {} def _parse(filename: str) -> None: files_metadata[filename] = self.parse_safetensors_file_metadata(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision, token=token) thread_map(_parse, set(weight_map.values()), desc='Parse safetensors files', tqdm_class=hf_tqdm) return SafetensorsRepoMetadata(metadata=index.get('metadata', None), sharded=True, weight_map=weight_map, files_metadata=files_metadata) else: raise NotASafetensorsRepoError(f"'{repo_id}' is not a safetensors repo. Couldn't find '{constants.SAFETENSORS_INDEX_FILE}' or '{constants.SAFETENSORS_SINGLE_FILE}' files.") def parse_safetensors_file_metadata(self, repo_id: str, filename: str, *, repo_type: Optional[str]=None, revision: Optional[str]=None, token: Union[bool, str, None]=None) -> SafetensorsFileMetadata: url = hf_hub_url(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision, endpoint=self.endpoint) _headers = self._build_hf_headers(token=token) response = get_session().get(url, headers={**_headers, 'range': 'bytes=0-100000'}) hf_raise_for_status(response) metadata_size = struct.unpack(' constants.SAFETENSORS_MAX_HEADER_LENGTH: raise SafetensorsParsingError(f"Failed to parse safetensors header for '{filename}' (repo '{repo_id}', revision '{revision or constants.DEFAULT_REVISION}'): safetensors header is too big. Maximum supported size is {constants.SAFETENSORS_MAX_HEADER_LENGTH} bytes (got {metadata_size}).") if metadata_size <= 100000: metadata_as_bytes = response.content[8:8 + metadata_size] else: response = get_session().get(url, headers={**_headers, 'range': f'bytes=8-{metadata_size + 7}'}) hf_raise_for_status(response) metadata_as_bytes = response.content try: metadata_as_dict = json.loads(metadata_as_bytes.decode(errors='ignore')) except json.JSONDecodeError as e: raise SafetensorsParsingError(f"Failed to parse safetensors header for '{filename}' (repo '{repo_id}', revision '{revision or constants.DEFAULT_REVISION}'): header is not json-encoded string. Please make sure this is a correctly formatted safetensors file.") from e try: return SafetensorsFileMetadata(metadata=metadata_as_dict.get('__metadata__', {}), tensors={key: TensorInfo(dtype=tensor['dtype'], shape=tensor['shape'], data_offsets=tuple(tensor['data_offsets'])) for (key, tensor) in metadata_as_dict.items() if key != '__metadata__'}) except (KeyError, IndexError) as e: raise SafetensorsParsingError(f"Failed to parse safetensors header for '{filename}' (repo '{repo_id}', revision '{revision or constants.DEFAULT_REVISION}'): header format not recognized. Please make sure this is a correctly formatted safetensors file.") from e @validate_hf_hub_args def create_branch(self, repo_id: str, *, branch: str, revision: Optional[str]=None, token: Union[bool, str, None]=None, repo_type: Optional[str]=None, exist_ok: bool=False) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL branch = quote(branch, safe='') branch_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/branch/{branch}' headers = self._build_hf_headers(token=token) payload = {} if revision is not None: payload['startingPoint'] = revision response = get_session().post(url=branch_url, headers=headers, json=payload) try: hf_raise_for_status(response) except HfHubHTTPError as e: if exist_ok and e.response.status_code == 409: return elif exist_ok and e.response.status_code == 403: try: refs = self.list_repo_refs(repo_id=repo_id, repo_type=repo_type, token=token) for branch_ref in refs.branches: if branch_ref.name == branch: return except HfHubHTTPError: pass raise @validate_hf_hub_args def delete_branch(self, repo_id: str, *, branch: str, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL branch = quote(branch, safe='') branch_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/branch/{branch}' headers = self._build_hf_headers(token=token) response = get_session().delete(url=branch_url, headers=headers) hf_raise_for_status(response) @validate_hf_hub_args def create_tag(self, repo_id: str, *, tag: str, tag_message: Optional[str]=None, revision: Optional[str]=None, token: Union[bool, str, None]=None, repo_type: Optional[str]=None, exist_ok: bool=False) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL revision = quote(revision, safe='') if revision is not None else constants.DEFAULT_REVISION tag_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/tag/{revision}' headers = self._build_hf_headers(token=token) payload = {'tag': tag} if tag_message is not None: payload['message'] = tag_message response = get_session().post(url=tag_url, headers=headers, json=payload) try: hf_raise_for_status(response) except HfHubHTTPError as e: if not (e.response.status_code == 409 and exist_ok): raise @validate_hf_hub_args def delete_tag(self, repo_id: str, *, tag: str, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> None: if repo_type is None: repo_type = constants.REPO_TYPE_MODEL tag = quote(tag, safe='') tag_url = f'{self.endpoint}/api/{repo_type}s/{repo_id}/tag/{tag}' headers = self._build_hf_headers(token=token) response = get_session().delete(url=tag_url, headers=headers) hf_raise_for_status(response) @validate_hf_hub_args def get_full_repo_name(self, model_id: str, *, organization: Optional[str]=None, token: Union[bool, str, None]=None): if organization is None: if '/' in model_id: username = model_id.split('/')[0] else: username = self.whoami(token=token)['name'] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' @validate_hf_hub_args def get_repo_discussions(self, repo_id: str, *, author: Optional[str]=None, discussion_type: Optional[constants.DiscussionTypeFilter]=None, discussion_status: Optional[constants.DiscussionStatusFilter]=None, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> Iterator[Discussion]: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL if discussion_type is not None and discussion_type not in constants.DISCUSSION_TYPES: raise ValueError(f'Invalid discussion_type, must be one of {constants.DISCUSSION_TYPES}') if discussion_status is not None and discussion_status not in constants.DISCUSSION_STATUS: raise ValueError(f'Invalid discussion_status, must be one of {constants.DISCUSSION_STATUS}') headers = self._build_hf_headers(token=token) path = f'{self.endpoint}/api/{repo_type}s/{repo_id}/discussions' params: Dict[str, Union[str, int]] = {} if discussion_type is not None: params['type'] = discussion_type if discussion_status is not None: params['status'] = discussion_status if author is not None: params['author'] = author def _fetch_discussion_page(page_index: int): params['p'] = page_index resp = get_session().get(path, headers=headers, params=params) hf_raise_for_status(resp) paginated_discussions = resp.json() total = paginated_discussions['count'] start = paginated_discussions['start'] discussions = paginated_discussions['discussions'] has_next = start + len(discussions) < total return (discussions, has_next) (has_next, page_index) = (True, 0) while has_next: (discussions, has_next) = _fetch_discussion_page(page_index=page_index) for discussion in discussions: yield Discussion(title=discussion['title'], num=discussion['num'], author=discussion.get('author', {}).get('name', 'deleted'), created_at=parse_datetime(discussion['createdAt']), status=discussion['status'], repo_id=discussion['repo']['name'], repo_type=discussion['repo']['type'], is_pull_request=discussion['isPullRequest'], endpoint=self.endpoint) page_index = page_index + 1 @validate_hf_hub_args def get_discussion_details(self, repo_id: str, discussion_num: int, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> DiscussionWithDetails: if not isinstance(discussion_num, int) or discussion_num <= 0: raise ValueError('Invalid discussion_num, must be a positive integer') if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL path = f'{self.endpoint}/api/{repo_type}s/{repo_id}/discussions/{discussion_num}' headers = self._build_hf_headers(token=token) resp = get_session().get(path, params={'diff': '1'}, headers=headers) hf_raise_for_status(resp) discussion_details = resp.json() is_pull_request = discussion_details['isPullRequest'] target_branch = discussion_details['changes']['base'] if is_pull_request else None conflicting_files = discussion_details['filesWithConflicts'] if is_pull_request else None merge_commit_oid = discussion_details['changes'].get('mergeCommitId', None) if is_pull_request else None return DiscussionWithDetails(title=discussion_details['title'], num=discussion_details['num'], author=discussion_details.get('author', {}).get('name', 'deleted'), created_at=parse_datetime(discussion_details['createdAt']), status=discussion_details['status'], repo_id=discussion_details['repo']['name'], repo_type=discussion_details['repo']['type'], is_pull_request=discussion_details['isPullRequest'], events=[deserialize_event(evt) for evt in discussion_details['events']], conflicting_files=conflicting_files, target_branch=target_branch, merge_commit_oid=merge_commit_oid, diff=discussion_details.get('diff'), endpoint=self.endpoint) @validate_hf_hub_args def create_discussion(self, repo_id: str, title: str, *, token: Union[bool, str, None]=None, description: Optional[str]=None, repo_type: Optional[str]=None, pull_request: bool=False) -> DiscussionWithDetails: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL if description is not None: description = description.strip() description = description if description else f"{('Pull Request' if pull_request else 'Discussion')} opened with the [huggingface_hub Python library](https://huggingface.co/docs/huggingface_hub)" headers = self._build_hf_headers(token=token) resp = get_session().post(f'{self.endpoint}/api/{repo_type}s/{repo_id}/discussions', json={'title': title.strip(), 'description': description, 'pullRequest': pull_request}, headers=headers) hf_raise_for_status(resp) num = resp.json()['num'] return self.get_discussion_details(repo_id=repo_id, repo_type=repo_type, discussion_num=num, token=token) @validate_hf_hub_args def create_pull_request(self, repo_id: str, title: str, *, token: Union[bool, str, None]=None, description: Optional[str]=None, repo_type: Optional[str]=None) -> DiscussionWithDetails: return self.create_discussion(repo_id=repo_id, title=title, token=token, description=description, repo_type=repo_type, pull_request=True) def _post_discussion_changes(self, *, repo_id: str, discussion_num: int, resource: str, body: Optional[dict]=None, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> requests.Response: if not isinstance(discussion_num, int) or discussion_num <= 0: raise ValueError('Invalid discussion_num, must be a positive integer') if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL repo_id = f'{repo_type}s/{repo_id}' path = f'{self.endpoint}/api/{repo_id}/discussions/{discussion_num}/{resource}' headers = self._build_hf_headers(token=token) resp = requests.post(path, headers=headers, json=body) hf_raise_for_status(resp) return resp @validate_hf_hub_args def comment_discussion(self, repo_id: str, discussion_num: int, comment: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> DiscussionComment: resp = self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource='comment', body={'comment': comment}) return deserialize_event(resp.json()['newMessage']) @validate_hf_hub_args def rename_discussion(self, repo_id: str, discussion_num: int, new_title: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> DiscussionTitleChange: resp = self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource='title', body={'title': new_title}) return deserialize_event(resp.json()['newTitle']) @validate_hf_hub_args def change_discussion_status(self, repo_id: str, discussion_num: int, new_status: Literal['open', 'closed'], *, token: Union[bool, str, None]=None, comment: Optional[str]=None, repo_type: Optional[str]=None) -> DiscussionStatusChange: if new_status not in ['open', 'closed']: raise ValueError("Invalid status, valid statuses are: 'open' and 'closed'") body: Dict[str, str] = {'status': new_status} if comment and comment.strip(): body['comment'] = comment.strip() resp = self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource='status', body=body) return deserialize_event(resp.json()['newStatus']) @validate_hf_hub_args def merge_pull_request(self, repo_id: str, discussion_num: int, *, token: Union[bool, str, None]=None, comment: Optional[str]=None, repo_type: Optional[str]=None): self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource='merge', body={'comment': comment.strip()} if comment and comment.strip() else None) @validate_hf_hub_args def edit_discussion_comment(self, repo_id: str, discussion_num: int, comment_id: str, new_content: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> DiscussionComment: resp = self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource=f'comment/{comment_id.lower()}/edit', body={'content': new_content}) return deserialize_event(resp.json()['updatedComment']) @validate_hf_hub_args def hide_discussion_comment(self, repo_id: str, discussion_num: int, comment_id: str, *, token: Union[bool, str, None]=None, repo_type: Optional[str]=None) -> DiscussionComment: warnings.warn("Hidden comments' content cannot be retrieved anymore. Hiding a comment is irreversible.", UserWarning) resp = self._post_discussion_changes(repo_id=repo_id, repo_type=repo_type, discussion_num=discussion_num, token=token, resource=f'comment/{comment_id.lower()}/hide') return deserialize_event(resp.json()['updatedComment']) @validate_hf_hub_args def add_space_secret(self, repo_id: str, key: str, value: str, *, description: Optional[str]=None, token: Union[bool, str, None]=None) -> None: payload = {'key': key, 'value': value} if description is not None: payload['description'] = description r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/secrets', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(r) @validate_hf_hub_args def delete_space_secret(self, repo_id: str, key: str, *, token: Union[bool, str, None]=None) -> None: r = get_session().delete(f'{self.endpoint}/api/spaces/{repo_id}/secrets', headers=self._build_hf_headers(token=token), json={'key': key}) hf_raise_for_status(r) @validate_hf_hub_args def get_space_variables(self, repo_id: str, *, token: Union[bool, str, None]=None) -> Dict[str, SpaceVariable]: r = get_session().get(f'{self.endpoint}/api/spaces/{repo_id}/variables', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return {k: SpaceVariable(k, v) for (k, v) in r.json().items()} @validate_hf_hub_args def add_space_variable(self, repo_id: str, key: str, value: str, *, description: Optional[str]=None, token: Union[bool, str, None]=None) -> Dict[str, SpaceVariable]: payload = {'key': key, 'value': value} if description is not None: payload['description'] = description r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/variables', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(r) return {k: SpaceVariable(k, v) for (k, v) in r.json().items()} @validate_hf_hub_args def delete_space_variable(self, repo_id: str, key: str, *, token: Union[bool, str, None]=None) -> Dict[str, SpaceVariable]: r = get_session().delete(f'{self.endpoint}/api/spaces/{repo_id}/variables', headers=self._build_hf_headers(token=token), json={'key': key}) hf_raise_for_status(r) return {k: SpaceVariable(k, v) for (k, v) in r.json().items()} @validate_hf_hub_args def get_space_runtime(self, repo_id: str, *, token: Union[bool, str, None]=None) -> SpaceRuntime: r = get_session().get(f'{self.endpoint}/api/spaces/{repo_id}/runtime', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return SpaceRuntime(r.json()) @validate_hf_hub_args def request_space_hardware(self, repo_id: str, hardware: SpaceHardware, *, token: Union[bool, str, None]=None, sleep_time: Optional[int]=None) -> SpaceRuntime: if sleep_time is not None and hardware == SpaceHardware.CPU_BASIC: warnings.warn("If your Space runs on the default 'cpu-basic' hardware, it will go to sleep if inactive for more than 48 hours. This value is not configurable. If you don't want your Space to deactivate or if you want to set a custom sleep time, you need to upgrade to a paid Hardware.", UserWarning) payload: Dict[str, Any] = {'flavor': hardware} if sleep_time is not None: payload['sleepTimeSeconds'] = sleep_time r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/hardware', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(r) return SpaceRuntime(r.json()) @validate_hf_hub_args def set_space_sleep_time(self, repo_id: str, sleep_time: int, *, token: Union[bool, str, None]=None) -> SpaceRuntime: r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/sleeptime', headers=self._build_hf_headers(token=token), json={'seconds': sleep_time}) hf_raise_for_status(r) runtime = SpaceRuntime(r.json()) hardware = runtime.requested_hardware or runtime.hardware if hardware == SpaceHardware.CPU_BASIC: warnings.warn("If your Space runs on the default 'cpu-basic' hardware, it will go to sleep if inactive for more than 48 hours. This value is not configurable. If you don't want your Space to deactivate or if you want to set a custom sleep time, you need to upgrade to a paid Hardware.", UserWarning) return runtime @validate_hf_hub_args def pause_space(self, repo_id: str, *, token: Union[bool, str, None]=None) -> SpaceRuntime: r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/pause', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return SpaceRuntime(r.json()) @validate_hf_hub_args def restart_space(self, repo_id: str, *, token: Union[bool, str, None]=None, factory_reboot: bool=False) -> SpaceRuntime: params = {} if factory_reboot: params['factory'] = 'true' r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/restart', headers=self._build_hf_headers(token=token), params=params) hf_raise_for_status(r) return SpaceRuntime(r.json()) @validate_hf_hub_args def duplicate_space(self, from_id: str, to_id: Optional[str]=None, *, private: Optional[bool]=None, token: Union[bool, str, None]=None, exist_ok: bool=False, hardware: Optional[SpaceHardware]=None, storage: Optional[SpaceStorage]=None, sleep_time: Optional[int]=None, secrets: Optional[List[Dict[str, str]]]=None, variables: Optional[List[Dict[str, str]]]=None) -> RepoUrl: parsed_to_id = RepoUrl(to_id) if to_id is not None else None to_namespace = parsed_to_id.namespace if parsed_to_id is not None and parsed_to_id.namespace is not None else self.whoami(token)['name'] to_repo_name = parsed_to_id.repo_name if to_id is not None else RepoUrl(from_id).repo_name payload: Dict[str, Any] = {'repository': f'{to_namespace}/{to_repo_name}'} keys = ['private', 'hardware', 'storageTier', 'sleepTimeSeconds', 'secrets', 'variables'] values = [private, hardware, storage, sleep_time, secrets, variables] payload.update({k: v for (k, v) in zip(keys, values) if v is not None}) if sleep_time is not None and hardware == SpaceHardware.CPU_BASIC: warnings.warn("If your Space runs on the default 'cpu-basic' hardware, it will go to sleep if inactive for more than 48 hours. This value is not configurable. If you don't want your Space to deactivate or if you want to set a custom sleep time, you need to upgrade to a paid Hardware.", UserWarning) r = get_session().post(f'{self.endpoint}/api/spaces/{from_id}/duplicate', headers=self._build_hf_headers(token=token), json=payload) try: hf_raise_for_status(r) except HTTPError as err: if exist_ok and err.response.status_code == 409: pass else: raise return RepoUrl(r.json()['url'], endpoint=self.endpoint) @validate_hf_hub_args def request_space_storage(self, repo_id: str, storage: SpaceStorage, *, token: Union[bool, str, None]=None) -> SpaceRuntime: payload: Dict[str, SpaceStorage] = {'tier': storage} r = get_session().post(f'{self.endpoint}/api/spaces/{repo_id}/storage', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(r) return SpaceRuntime(r.json()) @validate_hf_hub_args def delete_space_storage(self, repo_id: str, *, token: Union[bool, str, None]=None) -> SpaceRuntime: r = get_session().delete(f'{self.endpoint}/api/spaces/{repo_id}/storage', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return SpaceRuntime(r.json()) def list_inference_endpoints(self, namespace: Optional[str]=None, *, token: Union[bool, str, None]=None) -> List[InferenceEndpoint]: if namespace == '*': user = self.whoami(token=token) endpoints: List[InferenceEndpoint] = list_inference_endpoints(namespace=self._get_namespace(token=token)) for org in user.get('orgs', []): try: endpoints += list_inference_endpoints(namespace=org['name'], token=token) except HfHubHTTPError as error: if error.response.status_code == 401: logger.debug("Cannot list Inference Endpoints for org '%s': %s", org['name'], error) pass return endpoints namespace = namespace or self._get_namespace(token=token) response = get_session().get(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) return [InferenceEndpoint.from_raw(endpoint, namespace=namespace, token=token) for endpoint in response.json()['items']] def create_inference_endpoint(self, name: str, *, repository: str, framework: str, accelerator: str, instance_size: str, instance_type: str, region: str, vendor: str, account_id: Optional[str]=None, min_replica: int=0, max_replica: int=1, scale_to_zero_timeout: int=15, revision: Optional[str]=None, task: Optional[str]=None, custom_image: Optional[Dict]=None, secrets: Optional[Dict[str, str]]=None, type: InferenceEndpointType=InferenceEndpointType.PROTECTED, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) image = {'custom': custom_image} if custom_image is not None else {'huggingface': {}} payload: Dict = {'accountId': account_id, 'compute': {'accelerator': accelerator, 'instanceSize': instance_size, 'instanceType': instance_type, 'scaling': {'maxReplica': max_replica, 'minReplica': min_replica, 'scaleToZeroTimeout': scale_to_zero_timeout}}, 'model': {'framework': framework, 'repository': repository, 'revision': revision, 'task': task, 'image': image}, 'name': name, 'provider': {'region': region, 'vendor': vendor}, 'type': type} if secrets: payload['model']['secrets'] = secrets response = get_session().post(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(response) return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def get_inference_endpoint(self, name: str, *, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) response = get_session().get(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def update_inference_endpoint(self, name: str, *, accelerator: Optional[str]=None, instance_size: Optional[str]=None, instance_type: Optional[str]=None, min_replica: Optional[int]=None, max_replica: Optional[int]=None, scale_to_zero_timeout: Optional[int]=None, repository: Optional[str]=None, framework: Optional[str]=None, revision: Optional[str]=None, task: Optional[str]=None, custom_image: Optional[Dict]=None, secrets: Optional[Dict[str, str]]=None, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) payload: Dict = defaultdict(lambda : defaultdict(dict)) if accelerator is not None: payload['compute']['accelerator'] = accelerator if instance_size is not None: payload['compute']['instanceSize'] = instance_size if instance_type is not None: payload['compute']['instanceType'] = instance_type if max_replica is not None: payload['compute']['scaling']['maxReplica'] = max_replica if min_replica is not None: payload['compute']['scaling']['minReplica'] = min_replica if scale_to_zero_timeout is not None: payload['compute']['scaling']['scaleToZeroTimeout'] = scale_to_zero_timeout if repository is not None: payload['model']['repository'] = repository if framework is not None: payload['model']['framework'] = framework if revision is not None: payload['model']['revision'] = revision if task is not None: payload['model']['task'] = task if custom_image is not None: payload['model']['image'] = {'custom': custom_image} if secrets is not None: payload['model']['secrets'] = secrets response = get_session().put(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}', headers=self._build_hf_headers(token=token), json=payload) hf_raise_for_status(response) return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def delete_inference_endpoint(self, name: str, *, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> None: namespace = namespace or self._get_namespace(token=token) response = get_session().delete(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) def pause_inference_endpoint(self, name: str, *, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) response = get_session().post(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}/pause', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def resume_inference_endpoint(self, name: str, *, namespace: Optional[str]=None, running_ok: bool=True, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) response = get_session().post(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}/resume', headers=self._build_hf_headers(token=token)) try: hf_raise_for_status(response) except HfHubHTTPError as error: if running_ok and error.response.status_code == 400 and ('already running' in error.response.text): return self.get_inference_endpoint(name, namespace=namespace, token=token) raise return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def scale_to_zero_inference_endpoint(self, name: str, *, namespace: Optional[str]=None, token: Union[bool, str, None]=None) -> InferenceEndpoint: namespace = namespace or self._get_namespace(token=token) response = get_session().post(f'{constants.INFERENCE_ENDPOINTS_ENDPOINT}/endpoint/{namespace}/{name}/scale-to-zero', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) return InferenceEndpoint.from_raw(response.json(), namespace=namespace, token=token) def _get_namespace(self, token: Union[bool, str, None]=None) -> str: me = self.whoami(token=token) if me['type'] == 'user': return me['name'] else: raise ValueError("Cannot determine default namespace. You must provide a 'namespace' as input or be logged in as a user.") @validate_hf_hub_args def list_collections(self, *, owner: Union[List[str], str, None]=None, item: Union[List[str], str, None]=None, sort: Optional[Literal['lastModified', 'trending', 'upvotes']]=None, limit: Optional[int]=None, token: Union[bool, str, None]=None) -> Iterable[Collection]: path = f'{self.endpoint}/api/collections' headers = self._build_hf_headers(token=token) params: Dict = {} if owner is not None: params.update({'owner': owner}) if item is not None: params.update({'item': item}) if sort is not None: params.update({'sort': sort}) if limit is not None: params.update({'limit': limit}) items = paginate(path, headers=headers, params=params) if limit is not None: items = islice(items, limit) for (position, collection_data) in enumerate(items): yield Collection(position=position, **collection_data) def get_collection(self, collection_slug: str, *, token: Union[bool, str, None]=None) -> Collection: r = get_session().get(f'{self.endpoint}/api/collections/{collection_slug}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return Collection(**{**r.json(), 'endpoint': self.endpoint}) def create_collection(self, title: str, *, namespace: Optional[str]=None, description: Optional[str]=None, private: bool=False, exists_ok: bool=False, token: Union[bool, str, None]=None) -> Collection: if namespace is None: namespace = self.whoami(token)['name'] payload = {'title': title, 'namespace': namespace, 'private': private} if description is not None: payload['description'] = description r = get_session().post(f'{self.endpoint}/api/collections', headers=self._build_hf_headers(token=token), json=payload) try: hf_raise_for_status(r) except HTTPError as err: if exists_ok and err.response.status_code == 409: slug = r.json()['slug'] return self.get_collection(slug, token=token) else: raise return Collection(**{**r.json(), 'endpoint': self.endpoint}) def update_collection_metadata(self, collection_slug: str, *, title: Optional[str]=None, description: Optional[str]=None, position: Optional[int]=None, private: Optional[bool]=None, theme: Optional[str]=None, token: Union[bool, str, None]=None) -> Collection: payload = {'position': position, 'private': private, 'theme': theme, 'title': title, 'description': description} r = get_session().patch(f'{self.endpoint}/api/collections/{collection_slug}', headers=self._build_hf_headers(token=token), json={key: value for (key, value) in payload.items() if value is not None}) hf_raise_for_status(r) return Collection(**{**r.json()['data'], 'endpoint': self.endpoint}) def delete_collection(self, collection_slug: str, *, missing_ok: bool=False, token: Union[bool, str, None]=None) -> None: r = get_session().delete(f'{self.endpoint}/api/collections/{collection_slug}', headers=self._build_hf_headers(token=token)) try: hf_raise_for_status(r) except HTTPError as err: if missing_ok and err.response.status_code == 404: return else: raise def add_collection_item(self, collection_slug: str, item_id: str, item_type: CollectionItemType_T, *, note: Optional[str]=None, exists_ok: bool=False, token: Union[bool, str, None]=None) -> Collection: payload: Dict[str, Any] = {'item': {'id': item_id, 'type': item_type}} if note is not None: payload['note'] = note r = get_session().post(f'{self.endpoint}/api/collections/{collection_slug}/items', headers=self._build_hf_headers(token=token), json=payload) try: hf_raise_for_status(r) except HTTPError as err: if exists_ok and err.response.status_code == 409: return self.get_collection(collection_slug, token=token) else: raise return Collection(**{**r.json(), 'endpoint': self.endpoint}) def update_collection_item(self, collection_slug: str, item_object_id: str, *, note: Optional[str]=None, position: Optional[int]=None, token: Union[bool, str, None]=None) -> None: payload = {'position': position, 'note': note} r = get_session().patch(f'{self.endpoint}/api/collections/{collection_slug}/items/{item_object_id}', headers=self._build_hf_headers(token=token), json={key: value for (key, value) in payload.items() if value is not None}) hf_raise_for_status(r) def delete_collection_item(self, collection_slug: str, item_object_id: str, *, missing_ok: bool=False, token: Union[bool, str, None]=None) -> None: r = get_session().delete(f'{self.endpoint}/api/collections/{collection_slug}/items/{item_object_id}', headers=self._build_hf_headers(token=token)) try: hf_raise_for_status(r) except HTTPError as err: if missing_ok and err.response.status_code == 404: return else: raise @validate_hf_hub_args def list_pending_access_requests(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> List[AccessRequest]: return self._list_access_requests(repo_id, 'pending', repo_type=repo_type, token=token) @validate_hf_hub_args def list_accepted_access_requests(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> List[AccessRequest]: return self._list_access_requests(repo_id, 'accepted', repo_type=repo_type, token=token) @validate_hf_hub_args def list_rejected_access_requests(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> List[AccessRequest]: return self._list_access_requests(repo_id, 'rejected', repo_type=repo_type, token=token) def _list_access_requests(self, repo_id: str, status: Literal['accepted', 'rejected', 'pending'], repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> List[AccessRequest]: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL response = get_session().get(f'{constants.ENDPOINT}/api/{repo_type}s/{repo_id}/user-access-request/{status}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) return [AccessRequest(username=request['user']['user'], fullname=request['user']['fullname'], email=request['user'].get('email'), status=request['status'], timestamp=parse_datetime(request['timestamp']), fields=request.get('fields')) for request in response.json()] @validate_hf_hub_args def cancel_access_request(self, repo_id: str, user: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: self._handle_access_request(repo_id, user, 'pending', repo_type=repo_type, token=token) @validate_hf_hub_args def accept_access_request(self, repo_id: str, user: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: self._handle_access_request(repo_id, user, 'accepted', repo_type=repo_type, token=token) @validate_hf_hub_args def reject_access_request(self, repo_id: str, user: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: self._handle_access_request(repo_id, user, 'rejected', repo_type=repo_type, token=token) @validate_hf_hub_args def _handle_access_request(self, repo_id: str, user: str, status: Literal['accepted', 'rejected', 'pending'], repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL response = get_session().post(f'{constants.ENDPOINT}/api/{repo_type}s/{repo_id}/user-access-request/handle', headers=self._build_hf_headers(token=token), json={'user': user, 'status': status}) hf_raise_for_status(response) @validate_hf_hub_args def grant_access(self, repo_id: str, user: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') if repo_type is None: repo_type = constants.REPO_TYPE_MODEL response = get_session().post(f'{constants.ENDPOINT}/api/models/{repo_id}/user-access-request/grant', headers=self._build_hf_headers(token=token), json={'user': user}) hf_raise_for_status(response) return response.json() @validate_hf_hub_args def get_webhook(self, webhook_id: str, *, token: Union[bool, str, None]=None) -> WebhookInfo: response = get_session().get(f'{constants.ENDPOINT}/api/settings/webhooks/{webhook_id}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhook_data = response.json()['webhook'] watched_items = [WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook_data['watched']] webhook = WebhookInfo(id=webhook_data['id'], url=webhook_data['url'], watched=watched_items, domains=webhook_data['domains'], secret=webhook_data.get('secret'), disabled=webhook_data['disabled']) return webhook @validate_hf_hub_args def list_webhooks(self, *, token: Union[bool, str, None]=None) -> List[WebhookInfo]: response = get_session().get(f'{constants.ENDPOINT}/api/settings/webhooks', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhooks_data = response.json() return [WebhookInfo(id=webhook['id'], url=webhook['url'], watched=[WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook['watched']], domains=webhook['domains'], secret=webhook.get('secret'), disabled=webhook['disabled']) for webhook in webhooks_data] @validate_hf_hub_args def create_webhook(self, *, url: str, watched: List[Union[Dict, WebhookWatchedItem]], domains: Optional[List[constants.WEBHOOK_DOMAIN_T]]=None, secret: Optional[str]=None, token: Union[bool, str, None]=None) -> WebhookInfo: watched_dicts = [asdict(item) if isinstance(item, WebhookWatchedItem) else item for item in watched] response = get_session().post(f'{constants.ENDPOINT}/api/settings/webhooks', json={'watched': watched_dicts, 'url': url, 'domains': domains, 'secret': secret}, headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhook_data = response.json()['webhook'] watched_items = [WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook_data['watched']] webhook = WebhookInfo(id=webhook_data['id'], url=webhook_data['url'], watched=watched_items, domains=webhook_data['domains'], secret=webhook_data.get('secret'), disabled=webhook_data['disabled']) return webhook @validate_hf_hub_args def update_webhook(self, webhook_id: str, *, url: Optional[str]=None, watched: Optional[List[Union[Dict, WebhookWatchedItem]]]=None, domains: Optional[List[constants.WEBHOOK_DOMAIN_T]]=None, secret: Optional[str]=None, token: Union[bool, str, None]=None) -> WebhookInfo: if watched is None: watched = [] watched_dicts = [asdict(item) if isinstance(item, WebhookWatchedItem) else item for item in watched] response = get_session().post(f'{constants.ENDPOINT}/api/settings/webhooks/{webhook_id}', json={'watched': watched_dicts, 'url': url, 'domains': domains, 'secret': secret}, headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhook_data = response.json()['webhook'] watched_items = [WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook_data['watched']] webhook = WebhookInfo(id=webhook_data['id'], url=webhook_data['url'], watched=watched_items, domains=webhook_data['domains'], secret=webhook_data.get('secret'), disabled=webhook_data['disabled']) return webhook @validate_hf_hub_args def enable_webhook(self, webhook_id: str, *, token: Union[bool, str, None]=None) -> WebhookInfo: response = get_session().post(f'{constants.ENDPOINT}/api/settings/webhooks/{webhook_id}/enable', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhook_data = response.json()['webhook'] watched_items = [WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook_data['watched']] webhook = WebhookInfo(id=webhook_data['id'], url=webhook_data['url'], watched=watched_items, domains=webhook_data['domains'], secret=webhook_data.get('secret'), disabled=webhook_data['disabled']) return webhook @validate_hf_hub_args def disable_webhook(self, webhook_id: str, *, token: Union[bool, str, None]=None) -> WebhookInfo: response = get_session().post(f'{constants.ENDPOINT}/api/settings/webhooks/{webhook_id}/disable', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) webhook_data = response.json()['webhook'] watched_items = [WebhookWatchedItem(type=item['type'], name=item['name']) for item in webhook_data['watched']] webhook = WebhookInfo(id=webhook_data['id'], url=webhook_data['url'], watched=watched_items, domains=webhook_data['domains'], secret=webhook_data.get('secret'), disabled=webhook_data['disabled']) return webhook @validate_hf_hub_args def delete_webhook(self, webhook_id: str, *, token: Union[bool, str, None]=None) -> None: response = get_session().delete(f'{constants.ENDPOINT}/api/settings/webhooks/{webhook_id}', headers=self._build_hf_headers(token=token)) hf_raise_for_status(response) def _build_hf_headers(self, token: Union[bool, str, None]=None, is_write_action: bool=False, library_name: Optional[str]=None, library_version: Optional[str]=None, user_agent: Union[Dict, str, None]=None) -> Dict[str, str]: if token is None: token = self.token return build_hf_headers(token=token, is_write_action=is_write_action, library_name=library_name or self.library_name, library_version=library_version or self.library_version, user_agent=user_agent or self.user_agent, headers=self.headers) def _prepare_folder_deletions(self, repo_id: str, repo_type: Optional[str], revision: Optional[str], path_in_repo: str, delete_patterns: Optional[Union[List[str], str]], token: Union[bool, str, None]=None) -> List[CommitOperationDelete]: if delete_patterns is None: return [] filenames = self.list_repo_files(repo_id=repo_id, revision=revision, repo_type=repo_type, token=token) if path_in_repo and path_in_repo not in ('.', './'): path_in_repo = path_in_repo.strip('/') + '/' relpath_to_abspath = {file[len(path_in_repo):]: file for file in filenames if file.startswith(path_in_repo)} else: relpath_to_abspath = {file: file for file in filenames} return [CommitOperationDelete(path_in_repo=relpath_to_abspath[relpath], is_folder=False) for relpath in filter_repo_objects(relpath_to_abspath.keys(), allow_patterns=delete_patterns) if relpath_to_abspath[relpath] != '.gitattributes'] def _prepare_upload_folder_additions(self, folder_path: Union[str, Path], path_in_repo: str, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> List[CommitOperationAdd]: folder_path = Path(folder_path).expanduser().resolve() if not folder_path.is_dir(): raise ValueError(f"Provided path: '{folder_path}' is not a directory") relpath_to_abspath = {path.relative_to(folder_path).as_posix(): path for path in sorted(folder_path.glob('**/*')) if path.is_file()} filtered_repo_objects = list(filter_repo_objects(relpath_to_abspath.keys(), allow_patterns=allow_patterns, ignore_patterns=ignore_patterns)) prefix = f"{path_in_repo.strip('/')}/" if path_in_repo else '' if 'README.md' in filtered_repo_objects: self._validate_yaml(content=relpath_to_abspath['README.md'].read_text(), repo_type=repo_type, token=token) return [CommitOperationAdd(path_or_fileobj=relpath_to_abspath[relpath], path_in_repo=prefix + relpath) for relpath in filtered_repo_objects] def _validate_yaml(self, content: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None): repo_type = repo_type if repo_type is not None else constants.REPO_TYPE_MODEL headers = self._build_hf_headers(token=token) response = get_session().post(f'{self.endpoint}/api/validate-yaml', json={'content': content, 'repoType': repo_type}, headers=headers) response_content = response.json() message = '\n'.join([f"- {warning.get('message')}" for warning in response_content.get('warnings', [])]) if message: warnings.warn(f'Warnings while validating metadata in README.md:\n{message}') try: hf_raise_for_status(response) except BadRequestError as e: errors = response_content.get('errors', []) message = '\n'.join([f"- {error.get('message')}" for error in errors]) raise ValueError(f'Invalid metadata in README.md.\n{message}') from e def get_user_overview(self, username: str, token: Union[bool, str, None]=None) -> User: r = get_session().get(f'{constants.ENDPOINT}/api/users/{username}/overview', headers=self._build_hf_headers(token=token)) hf_raise_for_status(r) return User(**r.json()) def list_organization_members(self, organization: str, token: Union[bool, str, None]=None) -> Iterable[User]: for member in paginate(path=f'{constants.ENDPOINT}/api/organizations/{organization}/members', params={}, headers=self._build_hf_headers(token=token)): yield User(**member) def list_user_followers(self, username: str, token: Union[bool, str, None]=None) -> Iterable[User]: for follower in paginate(path=f'{constants.ENDPOINT}/api/users/{username}/followers', params={}, headers=self._build_hf_headers(token=token)): yield User(**follower) def list_user_following(self, username: str, token: Union[bool, str, None]=None) -> Iterable[User]: for followed_user in paginate(path=f'{constants.ENDPOINT}/api/users/{username}/following', params={}, headers=self._build_hf_headers(token=token)): yield User(**followed_user) def auth_check(self, repo_id: str, *, repo_type: Optional[str]=None, token: Union[bool, str, None]=None) -> None: headers = self._build_hf_headers(token=token) if repo_type is None: repo_type = constants.REPO_TYPE_MODEL if repo_type not in constants.REPO_TYPES: raise ValueError(f'Invalid repo type, must be one of {constants.REPO_TYPES}') path = f'{self.endpoint}/api/{repo_type}s/{repo_id}/auth-check' r = get_session().get(path, headers=headers) hf_raise_for_status(r) def _parse_revision_from_pr_url(pr_url: str) -> str: re_match = re.match(_REGEX_DISCUSSION_URL, pr_url) if re_match is None: raise RuntimeError(f"Unexpected response from the hub, expected a Pull Request URL but got: '{pr_url}'") return f'refs/pr/{re_match[1]}' api = HfApi() whoami = api.whoami auth_check = api.auth_check get_token_permission = api.get_token_permission list_models = api.list_models model_info = api.model_info list_datasets = api.list_datasets dataset_info = api.dataset_info list_spaces = api.list_spaces space_info = api.space_info repo_exists = api.repo_exists revision_exists = api.revision_exists file_exists = api.file_exists repo_info = api.repo_info list_repo_files = api.list_repo_files list_repo_refs = api.list_repo_refs list_repo_commits = api.list_repo_commits list_repo_tree = api.list_repo_tree get_paths_info = api.get_paths_info list_metrics = api.list_metrics get_model_tags = api.get_model_tags get_dataset_tags = api.get_dataset_tags create_commit = api.create_commit create_repo = api.create_repo delete_repo = api.delete_repo update_repo_visibility = api.update_repo_visibility update_repo_settings = api.update_repo_settings super_squash_history = api.super_squash_history move_repo = api.move_repo upload_file = api.upload_file upload_folder = api.upload_folder delete_file = api.delete_file delete_folder = api.delete_folder delete_files = api.delete_files create_commits_on_pr = api.create_commits_on_pr upload_large_folder = api.upload_large_folder preupload_lfs_files = api.preupload_lfs_files create_branch = api.create_branch delete_branch = api.delete_branch create_tag = api.create_tag delete_tag = api.delete_tag get_full_repo_name = api.get_full_repo_name get_safetensors_metadata = api.get_safetensors_metadata parse_safetensors_file_metadata = api.parse_safetensors_file_metadata run_as_future = api.run_as_future list_liked_repos = api.list_liked_repos list_repo_likers = api.list_repo_likers like = api.like unlike = api.unlike get_discussion_details = api.get_discussion_details get_repo_discussions = api.get_repo_discussions create_discussion = api.create_discussion create_pull_request = api.create_pull_request change_discussion_status = api.change_discussion_status comment_discussion = api.comment_discussion edit_discussion_comment = api.edit_discussion_comment rename_discussion = api.rename_discussion merge_pull_request = api.merge_pull_request add_space_secret = api.add_space_secret delete_space_secret = api.delete_space_secret get_space_variables = api.get_space_variables add_space_variable = api.add_space_variable delete_space_variable = api.delete_space_variable get_space_runtime = api.get_space_runtime request_space_hardware = api.request_space_hardware set_space_sleep_time = api.set_space_sleep_time pause_space = api.pause_space restart_space = api.restart_space duplicate_space = api.duplicate_space request_space_storage = api.request_space_storage delete_space_storage = api.delete_space_storage list_inference_endpoints = api.list_inference_endpoints create_inference_endpoint = api.create_inference_endpoint get_inference_endpoint = api.get_inference_endpoint update_inference_endpoint = api.update_inference_endpoint delete_inference_endpoint = api.delete_inference_endpoint pause_inference_endpoint = api.pause_inference_endpoint resume_inference_endpoint = api.resume_inference_endpoint scale_to_zero_inference_endpoint = api.scale_to_zero_inference_endpoint get_collection = api.get_collection list_collections = api.list_collections create_collection = api.create_collection update_collection_metadata = api.update_collection_metadata delete_collection = api.delete_collection add_collection_item = api.add_collection_item update_collection_item = api.update_collection_item delete_collection_item = api.delete_collection_item delete_collection_item = api.delete_collection_item list_pending_access_requests = api.list_pending_access_requests list_accepted_access_requests = api.list_accepted_access_requests list_rejected_access_requests = api.list_rejected_access_requests cancel_access_request = api.cancel_access_request accept_access_request = api.accept_access_request reject_access_request = api.reject_access_request grant_access = api.grant_access create_webhook = api.create_webhook disable_webhook = api.disable_webhook delete_webhook = api.delete_webhook enable_webhook = api.enable_webhook get_webhook = api.get_webhook list_webhooks = api.list_webhooks update_webhook = api.update_webhook get_user_overview = api.get_user_overview list_organization_members = api.list_organization_members list_user_followers = api.list_user_followers list_user_following = api.list_user_following # File: huggingface_hub-main/src/huggingface_hub/hf_file_system.py import inspect import os import re import tempfile from collections import deque from dataclasses import dataclass, field from datetime import datetime from itertools import chain from pathlib import Path from typing import Any, Dict, List, NoReturn, Optional, Tuple, Union from urllib.parse import quote, unquote import fsspec from fsspec.callbacks import _DEFAULT_CALLBACK, NoOpCallback, TqdmCallback from fsspec.utils import isfilelike from requests import Response from . import constants from ._commit_api import CommitOperationCopy, CommitOperationDelete from .errors import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from .file_download import hf_hub_url, http_get from .hf_api import HfApi, LastCommitInfo, RepoFile from .utils import HFValidationError, hf_raise_for_status, http_backoff SPECIAL_REFS_REVISION_REGEX = re.compile('\n (^refs\\/convert\\/\\w+) # `refs/convert/parquet` revisions\n |\n (^refs\\/pr\\/\\d+) # PR revisions\n ', re.VERBOSE) @dataclass class HfFileSystemResolvedPath: repo_type: str repo_id: str revision: str path_in_repo: str _raw_revision: Optional[str] = field(default=None, repr=False) def unresolve(self) -> str: repo_path = constants.REPO_TYPES_URL_PREFIXES.get(self.repo_type, '') + self.repo_id if self._raw_revision: return f'{repo_path}@{self._raw_revision}/{self.path_in_repo}'.rstrip('/') elif self.revision != constants.DEFAULT_REVISION: return f'{repo_path}@{safe_revision(self.revision)}/{self.path_in_repo}'.rstrip('/') else: return f'{repo_path}/{self.path_in_repo}'.rstrip('/') class HfFileSystem(fsspec.AbstractFileSystem): root_marker = '' protocol = 'hf' def __init__(self, *args, endpoint: Optional[str]=None, token: Union[bool, str, None]=None, **storage_options): super().__init__(*args, **storage_options) self.endpoint = endpoint or constants.ENDPOINT self.token = token self._api = HfApi(endpoint=endpoint, token=token) self._repo_and_revision_exists_cache: Dict[Tuple[str, str, Optional[str]], Tuple[bool, Optional[Exception]]] = {} def _repo_and_revision_exist(self, repo_type: str, repo_id: str, revision: Optional[str]) -> Tuple[bool, Optional[Exception]]: if (repo_type, repo_id, revision) not in self._repo_and_revision_exists_cache: try: self._api.repo_info(repo_id, revision=revision, repo_type=repo_type, timeout=constants.HF_HUB_ETAG_TIMEOUT) except (RepositoryNotFoundError, HFValidationError) as e: self._repo_and_revision_exists_cache[repo_type, repo_id, revision] = (False, e) self._repo_and_revision_exists_cache[repo_type, repo_id, None] = (False, e) except RevisionNotFoundError as e: self._repo_and_revision_exists_cache[repo_type, repo_id, revision] = (False, e) self._repo_and_revision_exists_cache[repo_type, repo_id, None] = (True, None) else: self._repo_and_revision_exists_cache[repo_type, repo_id, revision] = (True, None) self._repo_and_revision_exists_cache[repo_type, repo_id, None] = (True, None) return self._repo_and_revision_exists_cache[repo_type, repo_id, revision] def resolve_path(self, path: str, revision: Optional[str]=None) -> HfFileSystemResolvedPath: def _align_revision_in_path_with_revision(revision_in_path: Optional[str], revision: Optional[str]) -> Optional[str]: if revision is not None: if revision_in_path is not None and revision_in_path != revision: raise ValueError(f'Revision specified in path ("{revision_in_path}") and in `revision` argument ("{revision}") are not the same.') else: revision = revision_in_path return revision path = self._strip_protocol(path) if not path: raise NotImplementedError('Access to repositories lists is not implemented.') elif path.split('/')[0] + '/' in constants.REPO_TYPES_URL_PREFIXES.values(): if '/' not in path: raise NotImplementedError('Access to repositories lists is not implemented.') (repo_type, path) = path.split('/', 1) repo_type = constants.REPO_TYPES_MAPPING[repo_type] else: repo_type = constants.REPO_TYPE_MODEL if path.count('/') > 0: if '@' in path: (repo_id, revision_in_path) = path.split('@', 1) if '/' in revision_in_path: match = SPECIAL_REFS_REVISION_REGEX.search(revision_in_path) if match is not None and revision in (None, match.group()): path_in_repo = SPECIAL_REFS_REVISION_REGEX.sub('', revision_in_path).lstrip('/') revision_in_path = match.group() else: (revision_in_path, path_in_repo) = revision_in_path.split('/', 1) else: path_in_repo = '' revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision) (repo_and_revision_exist, err) = self._repo_and_revision_exist(repo_type, repo_id, revision) if not repo_and_revision_exist: _raise_file_not_found(path, err) else: revision_in_path = None repo_id_with_namespace = '/'.join(path.split('/')[:2]) path_in_repo_with_namespace = '/'.join(path.split('/')[2:]) repo_id_without_namespace = path.split('/')[0] path_in_repo_without_namespace = '/'.join(path.split('/')[1:]) repo_id = repo_id_with_namespace path_in_repo = path_in_repo_with_namespace (repo_and_revision_exist, err) = self._repo_and_revision_exist(repo_type, repo_id, revision) if not repo_and_revision_exist: if isinstance(err, (RepositoryNotFoundError, HFValidationError)): repo_id = repo_id_without_namespace path_in_repo = path_in_repo_without_namespace (repo_and_revision_exist, _) = self._repo_and_revision_exist(repo_type, repo_id, revision) if not repo_and_revision_exist: _raise_file_not_found(path, err) else: _raise_file_not_found(path, err) else: repo_id = path path_in_repo = '' if '@' in path: (repo_id, revision_in_path) = path.split('@', 1) revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision) else: revision_in_path = None (repo_and_revision_exist, _) = self._repo_and_revision_exist(repo_type, repo_id, revision) if not repo_and_revision_exist: raise NotImplementedError('Access to repositories lists is not implemented.') revision = revision if revision is not None else constants.DEFAULT_REVISION return HfFileSystemResolvedPath(repo_type, repo_id, revision, path_in_repo, _raw_revision=revision_in_path) def invalidate_cache(self, path: Optional[str]=None) -> None: if not path: self.dircache.clear() self._repo_and_revision_exists_cache.clear() else: path = self.resolve_path(path).unresolve() while path: self.dircache.pop(path, None) path = self._parent(path) def _open(self, path: str, mode: str='rb', revision: Optional[str]=None, block_size: Optional[int]=None, **kwargs) -> 'HfFileSystemFile': if 'a' in mode: raise NotImplementedError('Appending to remote files is not yet supported.') if block_size == 0: return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs) else: return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs) def _rm(self, path: str, revision: Optional[str]=None, **kwargs) -> None: resolved_path = self.resolve_path(path, revision=revision) self._api.delete_file(path_in_repo=resolved_path.path_in_repo, repo_id=resolved_path.repo_id, token=self.token, repo_type=resolved_path.repo_type, revision=resolved_path.revision, commit_message=kwargs.get('commit_message'), commit_description=kwargs.get('commit_description')) self.invalidate_cache(path=resolved_path.unresolve()) def rm(self, path: str, recursive: bool=False, maxdepth: Optional[int]=None, revision: Optional[str]=None, **kwargs) -> None: resolved_path = self.resolve_path(path, revision=revision) paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth, revision=revision) paths_in_repo = [self.resolve_path(path).path_in_repo for path in paths if not self.isdir(path)] operations = [CommitOperationDelete(path_in_repo=path_in_repo) for path_in_repo in paths_in_repo] commit_message = f'Delete {path} ' commit_message += 'recursively ' if recursive else '' commit_message += f'up to depth {maxdepth} ' if maxdepth is not None else '' self._api.create_commit(repo_id=resolved_path.repo_id, repo_type=resolved_path.repo_type, token=self.token, operations=operations, revision=resolved_path.revision, commit_message=kwargs.get('commit_message', commit_message), commit_description=kwargs.get('commit_description')) self.invalidate_cache(path=resolved_path.unresolve()) def ls(self, path: str, detail: bool=True, refresh: bool=False, revision: Optional[str]=None, **kwargs) -> List[Union[str, Dict[str, Any]]]: resolved_path = self.resolve_path(path, revision=revision) path = resolved_path.unresolve() kwargs = {'expand_info': detail, **kwargs} try: out = self._ls_tree(path, refresh=refresh, revision=revision, **kwargs) except EntryNotFoundError: if not resolved_path.path_in_repo: _raise_file_not_found(path, None) out = self._ls_tree(self._parent(path), refresh=refresh, revision=revision, **kwargs) out = [o for o in out if o['name'] == path] if len(out) == 0: _raise_file_not_found(path, None) return out if detail else [o['name'] for o in out] def _ls_tree(self, path: str, recursive: bool=False, refresh: bool=False, revision: Optional[str]=None, expand_info: bool=True): resolved_path = self.resolve_path(path, revision=revision) path = resolved_path.unresolve() root_path = HfFileSystemResolvedPath(resolved_path.repo_type, resolved_path.repo_id, resolved_path.revision, path_in_repo='', _raw_revision=resolved_path._raw_revision).unresolve() out = [] if path in self.dircache and (not refresh): cached_path_infos = self.dircache[path] out.extend(cached_path_infos) dirs_not_in_dircache = [] if recursive: dirs_to_visit = deque([path_info for path_info in cached_path_infos if path_info['type'] == 'directory']) while dirs_to_visit: dir_info = dirs_to_visit.popleft() if dir_info['name'] not in self.dircache: dirs_not_in_dircache.append(dir_info['name']) else: cached_path_infos = self.dircache[dir_info['name']] out.extend(cached_path_infos) dirs_to_visit.extend([path_info for path_info in cached_path_infos if path_info['type'] == 'directory']) dirs_not_expanded = [] if expand_info: dirs_not_expanded = [self._parent(o['name']) for o in out if o['last_commit'] is None] if recursive and dirs_not_in_dircache or (expand_info and dirs_not_expanded): common_prefix = os.path.commonprefix(dirs_not_in_dircache + dirs_not_expanded) common_path = common_prefix.rstrip('/') if common_prefix.endswith('/') or common_prefix == root_path or common_prefix in chain(dirs_not_in_dircache, dirs_not_expanded) else self._parent(common_prefix) out = [o for o in out if not o['name'].startswith(common_path + '/')] for cached_path in self.dircache: if cached_path.startswith(common_path + '/'): self.dircache.pop(cached_path, None) self.dircache.pop(common_path, None) out.extend(self._ls_tree(common_path, recursive=recursive, refresh=True, revision=revision, expand_info=expand_info)) else: tree = self._api.list_repo_tree(resolved_path.repo_id, resolved_path.path_in_repo, recursive=recursive, expand=expand_info, revision=resolved_path.revision, repo_type=resolved_path.repo_type) for path_info in tree: if isinstance(path_info, RepoFile): cache_path_info = {'name': root_path + '/' + path_info.path, 'size': path_info.size, 'type': 'file', 'blob_id': path_info.blob_id, 'lfs': path_info.lfs, 'last_commit': path_info.last_commit, 'security': path_info.security} else: cache_path_info = {'name': root_path + '/' + path_info.path, 'size': 0, 'type': 'directory', 'tree_id': path_info.tree_id, 'last_commit': path_info.last_commit} parent_path = self._parent(cache_path_info['name']) self.dircache.setdefault(parent_path, []).append(cache_path_info) out.append(cache_path_info) return out def walk(self, path, *args, **kwargs): kwargs = {'expand_info': kwargs.get('detail', False), **kwargs} path = self.resolve_path(path, revision=kwargs.get('revision')).unresolve() yield from super().walk(path, *args, **kwargs) def glob(self, path, **kwargs): kwargs = {'expand_info': kwargs.get('detail', False), **kwargs} path = self.resolve_path(path, revision=kwargs.get('revision')).unresolve() return super().glob(path, **kwargs) def find(self, path: str, maxdepth: Optional[int]=None, withdirs: bool=False, detail: bool=False, refresh: bool=False, revision: Optional[str]=None, **kwargs) -> Union[List[str], Dict[str, Dict[str, Any]]]: if maxdepth: return super().find(path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, refresh=refresh, revision=revision, **kwargs) resolved_path = self.resolve_path(path, revision=revision) path = resolved_path.unresolve() kwargs = {'expand_info': detail, **kwargs} try: out = self._ls_tree(path, recursive=True, refresh=refresh, revision=resolved_path.revision, **kwargs) except EntryNotFoundError: if self.info(path, revision=revision, **kwargs)['type'] == 'file': out = {path: {}} else: out = {} else: if not withdirs: out = [o for o in out if o['type'] != 'directory'] else: path_info = self.info(path, revision=resolved_path.revision, **kwargs) out = [path_info] + out if path_info['type'] == 'directory' else out out = {o['name']: o for o in out} names = sorted(out) if not detail: return names else: return {name: out[name] for name in names} def cp_file(self, path1: str, path2: str, revision: Optional[str]=None, **kwargs) -> None: resolved_path1 = self.resolve_path(path1, revision=revision) resolved_path2 = self.resolve_path(path2, revision=revision) same_repo = resolved_path1.repo_type == resolved_path2.repo_type and resolved_path1.repo_id == resolved_path2.repo_id if same_repo: commit_message = f'Copy {path1} to {path2}' self._api.create_commit(repo_id=resolved_path1.repo_id, repo_type=resolved_path1.repo_type, revision=resolved_path2.revision, commit_message=kwargs.get('commit_message', commit_message), commit_description=kwargs.get('commit_description', ''), operations=[CommitOperationCopy(src_path_in_repo=resolved_path1.path_in_repo, path_in_repo=resolved_path2.path_in_repo, src_revision=resolved_path1.revision)]) else: with self.open(path1, 'rb', revision=resolved_path1.revision) as f: content = f.read() commit_message = f'Copy {path1} to {path2}' self._api.upload_file(path_or_fileobj=content, path_in_repo=resolved_path2.path_in_repo, repo_id=resolved_path2.repo_id, token=self.token, repo_type=resolved_path2.repo_type, revision=resolved_path2.revision, commit_message=kwargs.get('commit_message', commit_message), commit_description=kwargs.get('commit_description')) self.invalidate_cache(path=resolved_path1.unresolve()) self.invalidate_cache(path=resolved_path2.unresolve()) def modified(self, path: str, **kwargs) -> datetime: info = self.info(path, **kwargs) return info['last_commit']['date'] def info(self, path: str, refresh: bool=False, revision: Optional[str]=None, **kwargs) -> Dict[str, Any]: resolved_path = self.resolve_path(path, revision=revision) path = resolved_path.unresolve() expand_info = kwargs.get('expand_info', True) if not resolved_path.path_in_repo: out = {'name': path, 'size': 0, 'type': 'directory'} if expand_info: last_commit = self._api.list_repo_commits(resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision)[-1] out = {**out, 'tree_id': None, 'last_commit': LastCommitInfo(oid=last_commit.commit_id, title=last_commit.title, date=last_commit.created_at)} else: out = None parent_path = self._parent(path) if not expand_info and parent_path not in self.dircache: self.ls(parent_path, expand_info=False) if parent_path in self.dircache: out1 = [o for o in self.dircache[parent_path] if o['name'] == path] if not out1: _raise_file_not_found(path, None) out = out1[0] if refresh or out is None or (expand_info and out and (out['last_commit'] is None)): paths_info = self._api.get_paths_info(resolved_path.repo_id, resolved_path.path_in_repo, expand=expand_info, revision=resolved_path.revision, repo_type=resolved_path.repo_type) if not paths_info: _raise_file_not_found(path, None) path_info = paths_info[0] root_path = HfFileSystemResolvedPath(resolved_path.repo_type, resolved_path.repo_id, resolved_path.revision, path_in_repo='', _raw_revision=resolved_path._raw_revision).unresolve() if isinstance(path_info, RepoFile): out = {'name': root_path + '/' + path_info.path, 'size': path_info.size, 'type': 'file', 'blob_id': path_info.blob_id, 'lfs': path_info.lfs, 'last_commit': path_info.last_commit, 'security': path_info.security} else: out = {'name': root_path + '/' + path_info.path, 'size': 0, 'type': 'directory', 'tree_id': path_info.tree_id, 'last_commit': path_info.last_commit} if not expand_info: out = {k: out[k] for k in ['name', 'size', 'type']} assert out is not None return out def exists(self, path, **kwargs): try: self.info(path, **{**kwargs, 'expand_info': False}) return True except: return False def isdir(self, path): try: return self.info(path, expand_info=False)['type'] == 'directory' except OSError: return False def isfile(self, path): try: return self.info(path, expand_info=False)['type'] == 'file' except: return False def url(self, path: str) -> str: resolved_path = self.resolve_path(path) url = hf_hub_url(resolved_path.repo_id, resolved_path.path_in_repo, repo_type=resolved_path.repo_type, revision=resolved_path.revision, endpoint=self.endpoint) if self.isdir(path): url = url.replace('/resolve/', '/tree/', 1) return url def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, outfile=None, **kwargs) -> None: revision = kwargs.get('revision') unhandled_kwargs = set(kwargs.keys()) - {'revision'} if not isinstance(callback, (NoOpCallback, TqdmCallback)) or len(unhandled_kwargs) > 0: return super().get_file(rpath, lpath, callback=callback, outfile=outfile, **kwargs) if isfilelike(lpath): outfile = lpath elif self.isdir(rpath): os.makedirs(lpath, exist_ok=True) return None if isinstance(lpath, (str, Path)): os.makedirs(os.path.dirname(lpath), exist_ok=True) close_file = False if outfile is None: outfile = open(lpath, 'wb') close_file = True initial_pos = outfile.tell() resolve_remote_path = self.resolve_path(rpath, revision=revision) expected_size = self.info(rpath, revision=revision)['size'] callback.set_size(expected_size) try: http_get(url=hf_hub_url(repo_id=resolve_remote_path.repo_id, revision=resolve_remote_path.revision, filename=resolve_remote_path.path_in_repo, repo_type=resolve_remote_path.repo_type, endpoint=self.endpoint), temp_file=outfile, displayed_filename=rpath, expected_size=expected_size, resume_size=0, headers=self._api._build_hf_headers(), _tqdm_bar=callback.tqdm if isinstance(callback, TqdmCallback) else None) outfile.seek(initial_pos) finally: if close_file: outfile.close() @property def transaction(self): raise NotImplementedError('Transactional commits are not supported.') def start_transaction(self): raise NotImplementedError('Transactional commits are not supported.') class HfFileSystemFile(fsspec.spec.AbstractBufferedFile): def __init__(self, fs: HfFileSystem, path: str, revision: Optional[str]=None, **kwargs): try: self.resolved_path = fs.resolve_path(path, revision=revision) except FileNotFoundError as e: if 'w' in kwargs.get('mode', ''): raise FileNotFoundError(f'{e}.\nMake sure the repository and revision exist before writing data.') from e raise if kwargs.get('mode', 'rb') == 'rb': self.details = fs.info(self.resolved_path.unresolve(), expand_info=False) super().__init__(fs, self.resolved_path.unresolve(), **kwargs) self.fs: HfFileSystem def __del__(self): if not hasattr(self, 'resolved_path'): return return super().__del__() def _fetch_range(self, start: int, end: int) -> bytes: headers = {'range': f'bytes={start}-{end - 1}', **self.fs._api._build_hf_headers()} url = hf_hub_url(repo_id=self.resolved_path.repo_id, revision=self.resolved_path.revision, filename=self.resolved_path.path_in_repo, repo_type=self.resolved_path.repo_type, endpoint=self.fs.endpoint) r = http_backoff('GET', url, headers=headers, retry_on_status_codes=(502, 503, 504), timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT) hf_raise_for_status(r) return r.content def _initiate_upload(self) -> None: self.temp_file = tempfile.NamedTemporaryFile(prefix='hffs-', delete=False) def _upload_chunk(self, final: bool=False) -> None: self.buffer.seek(0) block = self.buffer.read() self.temp_file.write(block) if final: self.temp_file.close() self.fs._api.upload_file(path_or_fileobj=self.temp_file.name, path_in_repo=self.resolved_path.path_in_repo, repo_id=self.resolved_path.repo_id, token=self.fs.token, repo_type=self.resolved_path.repo_type, revision=self.resolved_path.revision, commit_message=self.kwargs.get('commit_message'), commit_description=self.kwargs.get('commit_description')) os.remove(self.temp_file.name) self.fs.invalidate_cache(path=self.resolved_path.unresolve()) def read(self, length=-1): if self.mode == 'rb' and (length is None or length == -1) and (self.loc == 0): with self.fs.open(self.path, 'rb', block_size=0) as f: return f.read() return super().read(length) def url(self) -> str: return self.fs.url(self.path) class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile): def __init__(self, fs: HfFileSystem, path: str, mode: str='rb', revision: Optional[str]=None, block_size: int=0, cache_type: str='none', **kwargs): if block_size != 0: raise ValueError(f'HfFileSystemStreamFile only supports block_size=0 but got {block_size}') if cache_type != 'none': raise ValueError(f"HfFileSystemStreamFile only supports cache_type='none' but got {cache_type}") if 'w' in mode: raise ValueError(f"HfFileSystemStreamFile only supports reading but got mode='{mode}'") try: self.resolved_path = fs.resolve_path(path, revision=revision) except FileNotFoundError as e: if 'w' in kwargs.get('mode', ''): raise FileNotFoundError(f'{e}.\nMake sure the repository and revision exist before writing data.') from e self.details = {'name': self.resolved_path.unresolve(), 'size': None} super().__init__(fs, self.resolved_path.unresolve(), mode=mode, block_size=block_size, cache_type=cache_type, **kwargs) self.response: Optional[Response] = None self.fs: HfFileSystem def seek(self, loc: int, whence: int=0): if loc == 0 and whence == 1: return if loc == self.loc and whence == 0: return raise ValueError('Cannot seek streaming HF file') def read(self, length: int=-1): read_args = (length,) if length >= 0 else () if self.response is None or self.response.raw.isclosed(): url = hf_hub_url(repo_id=self.resolved_path.repo_id, revision=self.resolved_path.revision, filename=self.resolved_path.path_in_repo, repo_type=self.resolved_path.repo_type, endpoint=self.fs.endpoint) self.response = http_backoff('GET', url, headers=self.fs._api._build_hf_headers(), retry_on_status_codes=(502, 503, 504), stream=True, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT) hf_raise_for_status(self.response) try: out = self.response.raw.read(*read_args) except Exception: self.response.close() url = hf_hub_url(repo_id=self.resolved_path.repo_id, revision=self.resolved_path.revision, filename=self.resolved_path.path_in_repo, repo_type=self.resolved_path.repo_type, endpoint=self.fs.endpoint) self.response = http_backoff('GET', url, headers={'Range': 'bytes=%d-' % self.loc, **self.fs._api._build_hf_headers()}, retry_on_status_codes=(502, 503, 504), stream=True, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT) hf_raise_for_status(self.response) try: out = self.response.raw.read(*read_args) except Exception: self.response.close() raise self.loc += len(out) return out def url(self) -> str: return self.fs.url(self.path) def __del__(self): if not hasattr(self, 'resolved_path'): return return super().__del__() def __reduce__(self): return (reopen, (self.fs, self.path, self.mode, self.blocksize, self.cache.name)) def safe_revision(revision: str) -> str: return revision if SPECIAL_REFS_REVISION_REGEX.match(revision) else safe_quote(revision) def safe_quote(s: str) -> str: return quote(s, safe='') def _raise_file_not_found(path: str, err: Optional[Exception]) -> NoReturn: msg = path if isinstance(err, RepositoryNotFoundError): msg = f'{path} (repository not found)' elif isinstance(err, RevisionNotFoundError): msg = f'{path} (revision not found)' elif isinstance(err, HFValidationError): msg = f'{path} (invalid repository id)' raise FileNotFoundError(msg) from err def reopen(fs: HfFileSystem, path: str, mode: str, block_size: int, cache_type: str): return fs.open(path, mode=mode, block_size=block_size, cache_type=cache_type) for (name, function) in inspect.getmembers(HfFileSystem, predicate=inspect.isfunction): parent = getattr(fsspec.AbstractFileSystem, name, None) if parent is not None and parent.__doc__ is not None: parent_doc = parent.__doc__ parent_doc = parent_doc.replace('Parameters\n ----------\n', 'Args:\n') parent_doc = parent_doc.replace('Returns\n -------\n', 'Return:\n') function.__doc__ = f'\n_Docstring taken from [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.{name})._' + '\n\n' + parent_doc # File: huggingface_hub-main/src/huggingface_hub/hub_mixin.py import inspect import json import os import warnings from dataclasses import asdict, dataclass, is_dataclass from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union from . import constants from .errors import EntryNotFoundError, HfHubHTTPError from .file_download import hf_hub_download from .hf_api import HfApi from .repocard import ModelCard, ModelCardData from .utils import SoftTemporaryDirectory, is_jsonable, is_safetensors_available, is_simple_optional_type, is_torch_available, logging, unwrap_simple_optional_type, validate_hf_hub_args if TYPE_CHECKING: from _typeshed import DataclassInstance if is_torch_available(): import torch if is_safetensors_available(): import packaging.version import safetensors from safetensors.torch import load_model as load_model_as_safetensor from safetensors.torch import save_model as save_model_as_safetensor logger = logging.get_logger(__name__) T = TypeVar('T', bound='ModelHubMixin') ARGS_T = TypeVar('ARGS_T') ENCODER_T = Callable[[ARGS_T], Any] DECODER_T = Callable[[Any], ARGS_T] CODER_T = Tuple[ENCODER_T, DECODER_T] DEFAULT_MODEL_CARD = '\n---\n# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1\n# Doc / guide: https://huggingface.co/docs/hub/model-cards\n{{ card_data }}\n---\n\nThis model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:\n- Library: {{ repo_url | default("[More Information Needed]", true) }}\n- Docs: {{ docs_url | default("[More Information Needed]", true) }}\n' @dataclass class MixinInfo: model_card_template: str model_card_data: ModelCardData repo_url: Optional[str] = None docs_url: Optional[str] = None class ModelHubMixin: _hub_mixin_config: Optional[Union[dict, 'DataclassInstance']] = None _hub_mixin_info: MixinInfo _hub_mixin_inject_config: bool _hub_mixin_init_parameters: Dict[str, inspect.Parameter] _hub_mixin_jsonable_default_values: Dict[str, Any] _hub_mixin_jsonable_custom_types: Tuple[Type, ...] _hub_mixin_coders: Dict[Type, CODER_T] def __init_subclass__(cls, *, repo_url: Optional[str]=None, docs_url: Optional[str]=None, model_card_template: str=DEFAULT_MODEL_CARD, language: Optional[List[str]]=None, library_name: Optional[str]=None, license: Optional[str]=None, license_name: Optional[str]=None, license_link: Optional[str]=None, pipeline_tag: Optional[str]=None, tags: Optional[List[str]]=None, coders: Optional[Dict[Type, CODER_T]]=None, languages: Optional[List[str]]=None) -> None: super().__init_subclass__() tags = tags or [] tags.append('model_hub_mixin') info = MixinInfo(model_card_template=model_card_template, model_card_data=ModelCardData()) if hasattr(cls, '_hub_mixin_info'): if model_card_template == DEFAULT_MODEL_CARD: info.model_card_template = cls._hub_mixin_info.model_card_template info.model_card_data = ModelCardData(**cls._hub_mixin_info.model_card_data.to_dict()) info.docs_url = cls._hub_mixin_info.docs_url info.repo_url = cls._hub_mixin_info.repo_url cls._hub_mixin_info = info if languages is not None: warnings.warn('The `languages` argument is deprecated. Use `language` instead. This will be removed in `huggingface_hub>=0.27.0`.', DeprecationWarning) language = languages if model_card_template is not None and model_card_template != DEFAULT_MODEL_CARD: info.model_card_template = model_card_template if repo_url is not None: info.repo_url = repo_url if docs_url is not None: info.docs_url = docs_url if language is not None: info.model_card_data.language = language if library_name is not None: info.model_card_data.library_name = library_name if license is not None: info.model_card_data.license = license if license_name is not None: info.model_card_data.license_name = license_name if license_link is not None: info.model_card_data.license_link = license_link if pipeline_tag is not None: info.model_card_data.pipeline_tag = pipeline_tag if tags is not None: if info.model_card_data.tags is not None: info.model_card_data.tags.extend(tags) else: info.model_card_data.tags = tags info.model_card_data.tags = sorted(set(info.model_card_data.tags)) cls._hub_mixin_coders = coders or {} cls._hub_mixin_jsonable_custom_types = tuple(cls._hub_mixin_coders.keys()) cls._hub_mixin_init_parameters = dict(inspect.signature(cls.__init__).parameters) cls._hub_mixin_jsonable_default_values = {param.name: cls._encode_arg(param.default) for param in cls._hub_mixin_init_parameters.values() if param.default is not inspect.Parameter.empty and cls._is_jsonable(param.default)} cls._hub_mixin_inject_config = 'config' in inspect.signature(cls._from_pretrained).parameters def __new__(cls, *args, **kwargs) -> 'ModelHubMixin': instance = super().__new__(cls) if instance._hub_mixin_config is not None: return instance passed_values = {**{key: value for (key, value) in zip(list(cls._hub_mixin_init_parameters)[1:], args)}, **kwargs} if is_dataclass(passed_values.get('config')): instance._hub_mixin_config = passed_values['config'] return instance init_config = {**cls._hub_mixin_jsonable_default_values, **{key: cls._encode_arg(value) for (key, value) in passed_values.items() if instance._is_jsonable(value)}} passed_config = init_config.pop('config', {}) if isinstance(passed_config, dict): init_config.update(passed_config) if init_config != {}: instance._hub_mixin_config = init_config return instance @classmethod def _is_jsonable(cls, value: Any) -> bool: if isinstance(value, cls._hub_mixin_jsonable_custom_types): return True return is_jsonable(value) @classmethod def _encode_arg(cls, arg: Any) -> Any: for (type_, (encoder, _)) in cls._hub_mixin_coders.items(): if isinstance(arg, type_): if arg is None: return None return encoder(arg) return arg @classmethod def _decode_arg(cls, expected_type: Type[ARGS_T], value: Any) -> Optional[ARGS_T]: if is_simple_optional_type(expected_type): if value is None: return None expected_type = unwrap_simple_optional_type(expected_type) if is_dataclass(expected_type): return _load_dataclass(expected_type, value) for (type_, (_, decoder)) in cls._hub_mixin_coders.items(): if inspect.isclass(expected_type) and issubclass(expected_type, type_): return decoder(value) return value def save_pretrained(self, save_directory: Union[str, Path], *, config: Optional[Union[dict, 'DataclassInstance']]=None, repo_id: Optional[str]=None, push_to_hub: bool=False, model_card_kwargs: Optional[Dict[str, Any]]=None, **push_to_hub_kwargs) -> Optional[str]: save_directory = Path(save_directory) save_directory.mkdir(parents=True, exist_ok=True) config_path = save_directory / constants.CONFIG_NAME config_path.unlink(missing_ok=True) self._save_pretrained(save_directory) if config is None: config = self._hub_mixin_config if config is not None: if is_dataclass(config): config = asdict(config) if not config_path.exists(): config_str = json.dumps(config, sort_keys=True, indent=2) config_path.write_text(config_str) model_card_path = save_directory / 'README.md' model_card_kwargs = model_card_kwargs if model_card_kwargs is not None else {} if not model_card_path.exists(): self.generate_model_card(**model_card_kwargs).save(save_directory / 'README.md') if push_to_hub: kwargs = push_to_hub_kwargs.copy() if config is not None: kwargs['config'] = config if repo_id is None: repo_id = save_directory.name return self.push_to_hub(repo_id=repo_id, model_card_kwargs=model_card_kwargs, **kwargs) return None def _save_pretrained(self, save_directory: Path) -> None: raise NotImplementedError @classmethod @validate_hf_hub_args def from_pretrained(cls: Type[T], pretrained_model_name_or_path: Union[str, Path], *, force_download: bool=False, resume_download: Optional[bool]=None, proxies: Optional[Dict]=None, token: Optional[Union[str, bool]]=None, cache_dir: Optional[Union[str, Path]]=None, local_files_only: bool=False, revision: Optional[str]=None, **model_kwargs) -> T: model_id = str(pretrained_model_name_or_path) config_file: Optional[str] = None if os.path.isdir(model_id): if constants.CONFIG_NAME in os.listdir(model_id): config_file = os.path.join(model_id, constants.CONFIG_NAME) else: logger.warning(f'{constants.CONFIG_NAME} not found in {Path(model_id).resolve()}') else: try: config_file = hf_hub_download(repo_id=model_id, filename=constants.CONFIG_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only) except HfHubHTTPError as e: logger.info(f'{constants.CONFIG_NAME} not found on the HuggingFace Hub: {str(e)}') config = None if config_file is not None: with open(config_file, 'r', encoding='utf-8') as f: config = json.load(f) for (key, value) in config.items(): if key in cls._hub_mixin_init_parameters: expected_type = cls._hub_mixin_init_parameters[key].annotation if expected_type is not inspect.Parameter.empty: config[key] = cls._decode_arg(expected_type, value) for param in cls._hub_mixin_init_parameters.values(): if param.name not in model_kwargs and param.name in config: model_kwargs[param.name] = config[param.name] if 'config' in cls._hub_mixin_init_parameters and 'config' not in model_kwargs: config_annotation = cls._hub_mixin_init_parameters['config'].annotation config = cls._decode_arg(config_annotation, config) model_kwargs['config'] = config if is_dataclass(cls): for key in cls.__dataclass_fields__: if key not in model_kwargs and key in config: model_kwargs[key] = config[key] elif any((param.kind == inspect.Parameter.VAR_KEYWORD for param in cls._hub_mixin_init_parameters.values())): for (key, value) in config.items(): if key not in model_kwargs: model_kwargs[key] = value if cls._hub_mixin_inject_config and 'config' not in model_kwargs: model_kwargs['config'] = config instance = cls._from_pretrained(model_id=str(model_id), revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, **model_kwargs) if config is not None and getattr(instance, '_hub_mixin_config', None) in (None, {}): instance._hub_mixin_config = config return instance @classmethod def _from_pretrained(cls: Type[T], *, model_id: str, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: Optional[bool], local_files_only: bool, token: Optional[Union[str, bool]], **model_kwargs) -> T: raise NotImplementedError @validate_hf_hub_args def push_to_hub(self, repo_id: str, *, config: Optional[Union[dict, 'DataclassInstance']]=None, commit_message: str='Push model using huggingface_hub.', private: bool=False, token: Optional[str]=None, branch: Optional[str]=None, create_pr: Optional[bool]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, model_card_kwargs: Optional[Dict[str, Any]]=None) -> str: api = HfApi(token=token) repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id with SoftTemporaryDirectory() as tmp: saved_path = Path(tmp) / repo_id self.save_pretrained(saved_path, config=config, model_card_kwargs=model_card_kwargs) return api.upload_folder(repo_id=repo_id, repo_type='model', folder_path=saved_path, commit_message=commit_message, revision=branch, create_pr=create_pr, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, delete_patterns=delete_patterns) def generate_model_card(self, *args, **kwargs) -> ModelCard: card = ModelCard.from_template(card_data=self._hub_mixin_info.model_card_data, template_str=self._hub_mixin_info.model_card_template, repo_url=self._hub_mixin_info.repo_url, docs_url=self._hub_mixin_info.docs_url, **kwargs) return card class PyTorchModelHubMixin(ModelHubMixin): def __init_subclass__(cls, *args, tags: Optional[List[str]]=None, **kwargs) -> None: tags = tags or [] tags.append('pytorch_model_hub_mixin') kwargs['tags'] = tags return super().__init_subclass__(*args, **kwargs) def _save_pretrained(self, save_directory: Path) -> None: model_to_save = self.module if hasattr(self, 'module') else self save_model_as_safetensor(model_to_save, str(save_directory / constants.SAFETENSORS_SINGLE_FILE)) @classmethod def _from_pretrained(cls, *, model_id: str, revision: Optional[str], cache_dir: Optional[Union[str, Path]], force_download: bool, proxies: Optional[Dict], resume_download: Optional[bool], local_files_only: bool, token: Union[str, bool, None], map_location: str='cpu', strict: bool=False, **model_kwargs): model = cls(**model_kwargs) if os.path.isdir(model_id): print('Loading weights from local directory') model_file = os.path.join(model_id, constants.SAFETENSORS_SINGLE_FILE) return cls._load_as_safetensor(model, model_file, map_location, strict) else: try: model_file = hf_hub_download(repo_id=model_id, filename=constants.SAFETENSORS_SINGLE_FILE, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only) return cls._load_as_safetensor(model, model_file, map_location, strict) except EntryNotFoundError: model_file = hf_hub_download(repo_id=model_id, filename=constants.PYTORCH_WEIGHTS_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only) return cls._load_as_pickle(model, model_file, map_location, strict) @classmethod def _load_as_pickle(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: state_dict = torch.load(model_file, map_location=torch.device(map_location), weights_only=True) model.load_state_dict(state_dict, strict=strict) model.eval() return model @classmethod def _load_as_safetensor(cls, model: T, model_file: str, map_location: str, strict: bool) -> T: if packaging.version.parse(safetensors.__version__) < packaging.version.parse('0.4.3'): load_model_as_safetensor(model, model_file, strict=strict) if map_location != 'cpu': logger.warning("Loading model weights on other devices than 'cpu' is not supported natively in your version of safetensors. This means that the model is loaded on 'cpu' first and then copied to the device. This leads to a slower loading time. Please update safetensors to version 0.4.3 or above for improved performance.") model.to(map_location) else: safetensors.torch.load_model(model, model_file, strict=strict, device=map_location) return model def _load_dataclass(datacls: Type['DataclassInstance'], data: dict) -> 'DataclassInstance': return datacls(**{k: v for (k, v) in data.items() if k in datacls.__dataclass_fields__}) # File: huggingface_hub-main/src/huggingface_hub/inference/_client.py import base64 import logging import re import time import warnings from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Literal, Optional, Union, overload from requests import HTTPError from requests.structures import CaseInsensitiveDict from huggingface_hub.constants import ALL_INFERENCE_API_FRAMEWORKS, INFERENCE_ENDPOINT, MAIN_INFERENCE_API_FRAMEWORKS from huggingface_hub.errors import BadRequestError, InferenceTimeoutError from huggingface_hub.inference._common import TASKS_EXPECTING_IMAGES, ContentT, ModelStatus, _b64_encode, _b64_to_image, _bytes_to_dict, _bytes_to_image, _bytes_to_list, _fetch_recommended_models, _get_unsupported_text_generation_kwargs, _import_numpy, _open_as_binary, _set_unsupported_text_generation_kwargs, _stream_chat_completion_response, _stream_text_generation_response, raise_text_generation_error from huggingface_hub.inference._generated.types import AudioClassificationOutputElement, AudioToAudioOutputElement, AutomaticSpeechRecognitionOutput, ChatCompletionInputGrammarType, ChatCompletionInputTool, ChatCompletionInputToolTypeClass, ChatCompletionOutput, ChatCompletionStreamOutput, DocumentQuestionAnsweringOutputElement, FillMaskOutputElement, ImageClassificationOutputElement, ImageSegmentationOutputElement, ImageToTextOutput, ObjectDetectionOutputElement, QuestionAnsweringOutputElement, SummarizationOutput, TableQuestionAnsweringOutputElement, TextClassificationOutputElement, TextGenerationInputGrammarType, TextGenerationOutput, TextGenerationStreamOutput, TokenClassificationOutputElement, TranslationOutput, VisualQuestionAnsweringOutputElement, ZeroShotClassificationOutputElement, ZeroShotImageClassificationOutputElement from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status from huggingface_hub.utils._deprecation import _deprecate_positional_args if TYPE_CHECKING: import numpy as np from PIL.Image import Image logger = logging.getLogger(__name__) MODEL_KWARGS_NOT_USED_REGEX = re.compile('The following `model_kwargs` are not used by the model: \\[(.*?)\\]') class InferenceClient: @_deprecate_positional_args(version='0.26') def __init__(self, model: Optional[str]=None, *, token: Union[str, bool, None]=None, timeout: Optional[float]=None, headers: Optional[Dict[str, str]]=None, cookies: Optional[Dict[str, str]]=None, proxies: Optional[Any]=None, base_url: Optional[str]=None, api_key: Optional[str]=None) -> None: if model is not None and base_url is not None: raise ValueError("Received both `model` and `base_url` arguments. Please provide only one of them. `base_url` is an alias for `model` to make the API compatible with OpenAI's client. If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url. When passing a URL as `model`, the client will not append any suffix path to it.") if token is not None and api_key is not None: raise ValueError("Received both `token` and `api_key` arguments. Please provide only one of them. `api_key` is an alias for `token` to make the API compatible with OpenAI's client. It has the exact same behavior as `token`.") self.model: Optional[str] = model self.token: Union[str, bool, None] = token if token is not None else api_key self.headers = CaseInsensitiveDict(build_hf_headers(token=self.token)) if headers is not None: self.headers.update(headers) self.cookies = cookies self.timeout = timeout self.proxies = proxies self.base_url = base_url def __repr__(self): return f"" @overload def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: Literal[False]=...) -> bytes: ... @overload def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: Literal[True]=...) -> Iterable[bytes]: ... @overload def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: bool=False) -> Union[bytes, Iterable[bytes]]: ... def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: bool=False) -> Union[bytes, Iterable[bytes]]: url = self._resolve_url(model, task) if data is not None and json is not None: warnings.warn('Ignoring `json` as `data` is passed as binary.') headers = self.headers.copy() if task in TASKS_EXPECTING_IMAGES and 'Accept' not in headers: headers['Accept'] = 'image/png' t0 = time.time() timeout = self.timeout while True: with _open_as_binary(data) as data_as_binary: try: response = get_session().post(url, json=json, data=data_as_binary, headers=headers, cookies=self.cookies, timeout=self.timeout, stream=stream, proxies=self.proxies) except TimeoutError as error: raise InferenceTimeoutError(f'Inference call timed out: {url}') from error try: hf_raise_for_status(response) return response.iter_lines() if stream else response.content except HTTPError as error: if error.response.status_code == 422 and task is not None: error.args = (f"{error.args[0]}\nMake sure '{task}' task is supported by the model.",) + error.args[1:] if error.response.status_code == 503: if timeout is not None and time.time() - t0 > timeout: raise InferenceTimeoutError(f'Model not loaded on the server: {url}. Please retry with a higher timeout (current: {self.timeout}).', request=error.request, response=error.response) from error logger.info(f'Waiting for model to be loaded on the server: {error}') time.sleep(1) if 'X-wait-for-model' not in headers and url.startswith(INFERENCE_ENDPOINT): headers['X-wait-for-model'] = '1' if timeout is not None: timeout = max(self.timeout - (time.time() - t0), 1) continue raise def audio_classification(self, audio: ContentT, *, model: Optional[str]=None) -> List[AudioClassificationOutputElement]: response = self.post(data=audio, model=model, task='audio-classification') return AudioClassificationOutputElement.parse_obj_as_list(response) def audio_to_audio(self, audio: ContentT, *, model: Optional[str]=None) -> List[AudioToAudioOutputElement]: response = self.post(data=audio, model=model, task='audio-to-audio') audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) for item in audio_output: item.blob = base64.b64decode(item.blob) return audio_output def automatic_speech_recognition(self, audio: ContentT, *, model: Optional[str]=None) -> AutomaticSpeechRecognitionOutput: response = self.post(data=audio, model=model, task='automatic-speech-recognition') return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) @overload def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: Literal[False]=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> ChatCompletionOutput: ... @overload def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: Literal[True]=True, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> Iterable[ChatCompletionStreamOutput]: ... @overload def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: bool=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: ... def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: bool=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: model_id_or_url = self.base_url or self.model or model or self.get_recommended_model('text-generation') is_url = model_id_or_url.startswith(('http://', 'https://')) if model_id_or_url == self.base_url: model_url = model_id_or_url.rstrip('/') if not model_url.endswith('/v1'): model_url += '/v1' model_url += '/chat/completions' elif is_url: model_url = model_id_or_url else: model_url = self._resolve_url(model_id_or_url).rstrip('/') + '/v1/chat/completions' model_id = model or self.model or 'tgi' if model_id.startswith(('http://', 'https://')): model_id = 'tgi' payload = dict(model=model_id, messages=messages, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, response_format=response_format, seed=seed, stop=stop, temperature=temperature, tool_choice=tool_choice, tool_prompt=tool_prompt, tools=tools, top_logprobs=top_logprobs, top_p=top_p, stream=stream) payload = {key: value for (key, value) in payload.items() if value is not None} data = self.post(model=model_url, json=payload, stream=stream) if stream: return _stream_chat_completion_response(data) return ChatCompletionOutput.parse_obj_as_instance(data) def document_question_answering(self, image: ContentT, question: str, *, model: Optional[str]=None) -> List[DocumentQuestionAnsweringOutputElement]: payload: Dict[str, Any] = {'question': question, 'image': _b64_encode(image)} response = self.post(json=payload, model=model, task='document-question-answering') return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) def feature_extraction(self, text: str, *, normalize: Optional[bool]=None, prompt_name: Optional[str]=None, truncate: Optional[bool]=None, truncation_direction: Optional[Literal['Left', 'Right']]=None, model: Optional[str]=None) -> 'np.ndarray': payload: Dict = {'inputs': text} if normalize is not None: payload['normalize'] = normalize if prompt_name is not None: payload['prompt_name'] = prompt_name if truncate is not None: payload['truncate'] = truncate if truncation_direction is not None: payload['truncation_direction'] = truncation_direction response = self.post(json=payload, model=model, task='feature-extraction') np = _import_numpy() return np.array(_bytes_to_dict(response), dtype='float32') def fill_mask(self, text: str, *, model: Optional[str]=None) -> List[FillMaskOutputElement]: response = self.post(json={'inputs': text}, model=model, task='fill-mask') return FillMaskOutputElement.parse_obj_as_list(response) def image_classification(self, image: ContentT, *, model: Optional[str]=None) -> List[ImageClassificationOutputElement]: response = self.post(data=image, model=model, task='image-classification') return ImageClassificationOutputElement.parse_obj_as_list(response) def image_segmentation(self, image: ContentT, *, model: Optional[str]=None) -> List[ImageSegmentationOutputElement]: response = self.post(data=image, model=model, task='image-segmentation') output = ImageSegmentationOutputElement.parse_obj_as_list(response) for item in output: item.mask = _b64_to_image(item.mask) return output def image_to_image(self, image: ContentT, prompt: Optional[str]=None, *, negative_prompt: Optional[str]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=None, guidance_scale: Optional[float]=None, model: Optional[str]=None, **kwargs) -> 'Image': parameters = {'prompt': prompt, 'negative_prompt': negative_prompt, 'height': height, 'width': width, 'num_inference_steps': num_inference_steps, 'guidance_scale': guidance_scale, **kwargs} if all((parameter is None for parameter in parameters.values())): data = image payload: Optional[Dict[str, Any]] = None else: data = None payload = {'inputs': _b64_encode(image)} for (key, value) in parameters.items(): if value is not None: payload.setdefault('parameters', {})[key] = value response = self.post(json=payload, data=data, model=model, task='image-to-image') return _bytes_to_image(response) def image_to_text(self, image: ContentT, *, model: Optional[str]=None) -> ImageToTextOutput: response = self.post(data=image, model=model, task='image-to-text') output = ImageToTextOutput.parse_obj(response) return output[0] if isinstance(output, list) else output def list_deployed_models(self, frameworks: Union[None, str, Literal['all'], List[str]]=None) -> Dict[str, List[str]]: if frameworks is None: frameworks = MAIN_INFERENCE_API_FRAMEWORKS elif frameworks == 'all': frameworks = ALL_INFERENCE_API_FRAMEWORKS elif isinstance(frameworks, str): frameworks = [frameworks] frameworks = list(set(frameworks)) models_by_task: Dict[str, List[str]] = {} def _unpack_response(framework: str, items: List[Dict]) -> None: for model in items: if framework == 'sentence-transformers': models_by_task.setdefault('feature-extraction', []).append(model['model_id']) models_by_task.setdefault('sentence-similarity', []).append(model['model_id']) else: models_by_task.setdefault(model['task'], []).append(model['model_id']) for framework in frameworks: response = get_session().get(f'{INFERENCE_ENDPOINT}/framework/{framework}', headers=self.headers) hf_raise_for_status(response) _unpack_response(framework, response.json()) for (task, models) in models_by_task.items(): models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) return models_by_task def object_detection(self, image: ContentT, *, model: Optional[str]=None) -> List[ObjectDetectionOutputElement]: response = self.post(data=image, model=model, task='object-detection') return ObjectDetectionOutputElement.parse_obj_as_list(response) def question_answering(self, question: str, context: str, *, model: Optional[str]=None) -> QuestionAnsweringOutputElement: payload: Dict[str, Any] = {'question': question, 'context': context} response = self.post(json=payload, model=model, task='question-answering') return QuestionAnsweringOutputElement.parse_obj_as_instance(response) def sentence_similarity(self, sentence: str, other_sentences: List[str], *, model: Optional[str]=None) -> List[float]: response = self.post(json={'inputs': {'source_sentence': sentence, 'sentences': other_sentences}}, model=model, task='sentence-similarity') return _bytes_to_list(response) def summarization(self, text: str, *, parameters: Optional[Dict[str, Any]]=None, model: Optional[str]=None) -> SummarizationOutput: payload: Dict[str, Any] = {'inputs': text} if parameters is not None: payload['parameters'] = parameters response = self.post(json=payload, model=model, task='summarization') return SummarizationOutput.parse_obj_as_list(response)[0] def table_question_answering(self, table: Dict[str, Any], query: str, *, model: Optional[str]=None) -> TableQuestionAnsweringOutputElement: response = self.post(json={'query': query, 'table': table}, model=model, task='table-question-answering') return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str]=None) -> List[str]: response = self.post(json={'table': table}, model=model, task='tabular-classification') return _bytes_to_list(response) def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str]=None) -> List[float]: response = self.post(json={'table': table}, model=model, task='tabular-regression') return _bytes_to_list(response) def text_classification(self, text: str, *, model: Optional[str]=None) -> List[TextClassificationOutputElement]: response = self.post(json={'inputs': text}, model=model, task='text-classification') return TextClassificationOutputElement.parse_obj_as_list(response)[0] @overload def text_generation(self, prompt: str, *, details: Literal[False]=..., stream: Literal[False]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> str: ... @overload def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: Literal[False]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> TextGenerationOutput: ... @overload def text_generation(self, prompt: str, *, details: Literal[False]=..., stream: Literal[True]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Iterable[str]: ... @overload def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: Literal[True]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Iterable[TextGenerationStreamOutput]: ... @overload def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: bool=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Union[TextGenerationOutput, Iterable[TextGenerationStreamOutput]]: ... def text_generation(self, prompt: str, *, details: bool=False, stream: bool=False, model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]: if decoder_input_details and (not details): warnings.warn('`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that the output from the server will be truncated.') decoder_input_details = False if stop_sequences is not None: warnings.warn("`stop_sequences` is a deprecated argument for `text_generation` task and will be removed in version '0.28.0'. Use `stop` instead.", FutureWarning) if stop is None: stop = stop_sequences parameters = {'adapter_id': adapter_id, 'best_of': best_of, 'decoder_input_details': decoder_input_details, 'details': details, 'do_sample': do_sample, 'frequency_penalty': frequency_penalty, 'grammar': grammar, 'max_new_tokens': max_new_tokens, 'repetition_penalty': repetition_penalty, 'return_full_text': return_full_text, 'seed': seed, 'stop': stop if stop is not None else [], 'temperature': temperature, 'top_k': top_k, 'top_n_tokens': top_n_tokens, 'top_p': top_p, 'truncate': truncate, 'typical_p': typical_p, 'watermark': watermark} parameters = {k: v for (k, v) in parameters.items() if v is not None} payload = {'inputs': prompt, 'parameters': parameters, 'stream': stream} unsupported_kwargs = _get_unsupported_text_generation_kwargs(model) if len(unsupported_kwargs) > 0: ignored_parameters = [] for key in unsupported_kwargs: if parameters.get(key): ignored_parameters.append(key) parameters.pop(key, None) if len(ignored_parameters) > 0: warnings.warn(f"API endpoint/model for text-generation is not served via TGI. Ignoring following parameters: {', '.join(ignored_parameters)}.", UserWarning) if details: warnings.warn('API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will be ignored meaning only the generated text will be returned.', UserWarning) details = False if stream: raise ValueError('API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream. Please pass `stream=False` as input.') try: bytes_output = self.post(json=payload, model=model, task='text-generation', stream=stream) except HTTPError as e: match = MODEL_KWARGS_NOT_USED_REGEX.search(str(e)) if isinstance(e, BadRequestError) and match: unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(',')] _set_unsupported_text_generation_kwargs(model, unused_params) return self.text_generation(prompt=prompt, details=details, stream=stream, model=model, adapter_id=adapter_id, best_of=best_of, decoder_input_details=decoder_input_details, do_sample=do_sample, frequency_penalty=frequency_penalty, grammar=grammar, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop, temperature=temperature, top_k=top_k, top_n_tokens=top_n_tokens, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark) raise_text_generation_error(e) if stream: return _stream_text_generation_response(bytes_output, details) data = _bytes_to_dict(bytes_output) if isinstance(data, list): data = data[0] return TextGenerationOutput.parse_obj_as_instance(data) if details else data['generated_text'] def text_to_image(self, prompt: str, *, negative_prompt: Optional[str]=None, height: Optional[float]=None, width: Optional[float]=None, num_inference_steps: Optional[float]=None, guidance_scale: Optional[float]=None, model: Optional[str]=None, **kwargs) -> 'Image': payload = {'inputs': prompt} parameters = {'negative_prompt': negative_prompt, 'height': height, 'width': width, 'num_inference_steps': num_inference_steps, 'guidance_scale': guidance_scale, **kwargs} for (key, value) in parameters.items(): if value is not None: payload.setdefault('parameters', {})[key] = value response = self.post(json=payload, model=model, task='text-to-image') return _bytes_to_image(response) def text_to_speech(self, text: str, *, model: Optional[str]=None) -> bytes: return self.post(json={'inputs': text}, model=model, task='text-to-speech') def token_classification(self, text: str, *, model: Optional[str]=None) -> List[TokenClassificationOutputElement]: payload: Dict[str, Any] = {'inputs': text} response = self.post(json=payload, model=model, task='token-classification') return TokenClassificationOutputElement.parse_obj_as_list(response) def translation(self, text: str, *, model: Optional[str]=None, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None) -> TranslationOutput: if src_lang is not None and tgt_lang is None: raise ValueError('You cannot specify `src_lang` without specifying `tgt_lang`.') if src_lang is None and tgt_lang is not None: raise ValueError('You cannot specify `tgt_lang` without specifying `src_lang`.') payload: Dict = {'inputs': text} if src_lang and tgt_lang: payload['parameters'] = {'src_lang': src_lang, 'tgt_lang': tgt_lang} response = self.post(json=payload, model=model, task='translation') return TranslationOutput.parse_obj_as_list(response)[0] def visual_question_answering(self, image: ContentT, question: str, *, model: Optional[str]=None) -> List[VisualQuestionAnsweringOutputElement]: payload: Dict[str, Any] = {'question': question, 'image': _b64_encode(image)} response = self.post(json=payload, model=model, task='visual-question-answering') return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) def zero_shot_classification(self, text: str, labels: List[str], *, multi_label: bool=False, hypothesis_template: Optional[str]=None, model: Optional[str]=None) -> List[ZeroShotClassificationOutputElement]: parameters = {'candidate_labels': labels, 'multi_label': multi_label} if hypothesis_template is not None: parameters['hypothesis_template'] = hypothesis_template response = self.post(json={'inputs': text, 'parameters': parameters}, task='zero-shot-classification', model=model) output = _bytes_to_dict(response) return [ZeroShotClassificationOutputElement.parse_obj_as_instance({'label': label, 'score': score}) for (label, score) in zip(output['labels'], output['scores'])] def zero_shot_image_classification(self, image: ContentT, labels: List[str], *, model: Optional[str]=None) -> List[ZeroShotImageClassificationOutputElement]: if len(labels) < 2: raise ValueError('You must specify at least 2 classes to compare.') response = self.post(json={'image': _b64_encode(image), 'parameters': {'candidate_labels': ','.join(labels)}}, model=model, task='zero-shot-image-classification') return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) def _resolve_url(self, model: Optional[str]=None, task: Optional[str]=None) -> str: model = model or self.model or self.base_url if model is not None and (model.startswith('http://') or model.startswith('https://')): return model if model is None: if task is None: raise ValueError('You must specify at least a model (repo_id or URL) or a task, either when instantiating `InferenceClient` or when making a request.') model = self.get_recommended_model(task) logger.info(f"Using recommended model {model} for task {task}. Note that it is encouraged to explicitly set `model='{model}'` as the recommended models list might get updated without prior notice.") return f'{INFERENCE_ENDPOINT}/pipeline/{task}/{model}' if task in ('feature-extraction', 'sentence-similarity') else f'{INFERENCE_ENDPOINT}/models/{model}' @staticmethod def get_recommended_model(task: str) -> str: model = _fetch_recommended_models().get(task) if model is None: raise ValueError(f'Task {task} has no recommended model. Please specify a model explicitly. Visit https://huggingface.co/tasks for more info.') return model def get_endpoint_info(self, *, model: Optional[str]=None) -> Dict[str, Any]: model = model or self.model if model is None: raise ValueError('Model id not provided.') if model.startswith(('http://', 'https://')): url = model.rstrip('/') + '/info' else: url = f'{INFERENCE_ENDPOINT}/models/{model}/info' response = get_session().get(url, headers=self.headers) hf_raise_for_status(response) return response.json() def health_check(self, model: Optional[str]=None) -> bool: model = model or self.model if model is None: raise ValueError('Model id not provided.') if not model.startswith(('http://', 'https://')): raise ValueError('Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`.') url = model.rstrip('/') + '/health' response = get_session().get(url, headers=self.headers) return response.status_code == 200 def get_model_status(self, model: Optional[str]=None) -> ModelStatus: model = model or self.model if model is None: raise ValueError('Model id not provided.') if model.startswith('https://'): raise NotImplementedError('Model status is only available for Inference API endpoints.') url = f'{INFERENCE_ENDPOINT}/status/{model}' response = get_session().get(url, headers=self.headers) hf_raise_for_status(response) response_data = response.json() if 'error' in response_data: raise ValueError(response_data['error']) return ModelStatus(loaded=response_data['loaded'], state=response_data['state'], compute_type=response_data['compute_type'], framework=response_data['framework']) @property def chat(self) -> 'ProxyClientChat': return ProxyClientChat(self) class _ProxyClient: def __init__(self, client: InferenceClient): self._client = client class ProxyClientChat(_ProxyClient): @property def completions(self) -> 'ProxyClientChatCompletions': return ProxyClientChatCompletions(self._client) class ProxyClientChatCompletions(_ProxyClient): @property def create(self): return self._client.chat_completion # File: huggingface_hub-main/src/huggingface_hub/inference/_common.py """""" import base64 import io import json import logging from contextlib import contextmanager from dataclasses import dataclass from pathlib import Path from typing import TYPE_CHECKING, Any, AsyncIterable, BinaryIO, ContextManager, Dict, Generator, Iterable, List, Literal, NoReturn, Optional, Union, overload from requests import HTTPError from huggingface_hub.errors import GenerationError, IncompleteGenerationError, OverloadedError, TextGenerationError, UnknownError, ValidationError from ..constants import ENDPOINT from ..utils import build_hf_headers, get_session, hf_raise_for_status, is_aiohttp_available, is_numpy_available, is_pillow_available from ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOutput if TYPE_CHECKING: from aiohttp import ClientResponse, ClientSession from PIL.Image import Image UrlT = str PathT = Union[str, Path] BinaryT = Union[bytes, BinaryIO] ContentT = Union[BinaryT, PathT, UrlT] TASKS_EXPECTING_IMAGES = {'text-to-image', 'image-to-image'} logger = logging.getLogger(__name__) @dataclass class ModelStatus: loaded: bool state: str compute_type: Dict framework: str def _import_aiohttp(): if not is_aiohttp_available(): raise ImportError('Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).') import aiohttp return aiohttp def _import_numpy(): if not is_numpy_available(): raise ImportError('Please install numpy to use deal with embeddings (`pip install numpy`).') import numpy return numpy def _import_pil_image(): if not is_pillow_available(): raise ImportError("Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be post-processed, use `client.post(...)` and get the raw response from the server.") from PIL import Image return Image _RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None def _fetch_recommended_models() -> Dict[str, Optional[str]]: global _RECOMMENDED_MODELS if _RECOMMENDED_MODELS is None: response = get_session().get(f'{ENDPOINT}/api/tasks', headers=build_hf_headers()) hf_raise_for_status(response) _RECOMMENDED_MODELS = {task: _first_or_none(details['widgetModels']) for (task, details) in response.json().items()} return _RECOMMENDED_MODELS def _first_or_none(items: List[Any]) -> Optional[Any]: try: return items[0] or None except IndexError: return None @overload def _open_as_binary(content: ContentT) -> ContextManager[BinaryT]: ... @overload def _open_as_binary(content: Literal[None]) -> ContextManager[Literal[None]]: ... @contextmanager def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]: if isinstance(content, str): if content.startswith('https://') or content.startswith('http://'): logger.debug(f'Downloading content from {content}') yield get_session().get(content).content return content = Path(content) if not content.exists(): raise FileNotFoundError(f'File not found at {content}. If `data` is a string, it must either be a URL or a path to a local file. To pass raw content, please encode it as bytes first.') if isinstance(content, Path): logger.debug(f'Opening content from {content}') with content.open('rb') as f: yield f else: yield content def _b64_encode(content: ContentT) -> str: with _open_as_binary(content) as data: data_as_bytes = data if isinstance(data, bytes) else data.read() return base64.b64encode(data_as_bytes).decode() def _b64_to_image(encoded_image: str) -> 'Image': Image = _import_pil_image() return Image.open(io.BytesIO(base64.b64decode(encoded_image))) def _bytes_to_list(content: bytes) -> List: return json.loads(content.decode()) def _bytes_to_dict(content: bytes) -> Dict: return json.loads(content.decode()) def _bytes_to_image(content: bytes) -> 'Image': Image = _import_pil_image() return Image.open(io.BytesIO(content)) def _stream_text_generation_response(bytes_output_as_lines: Iterable[bytes], details: bool) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]: for byte_payload in bytes_output_as_lines: try: output = _format_text_generation_stream_output(byte_payload, details) except StopIteration: break if output is not None: yield output async def _async_stream_text_generation_response(bytes_output_as_lines: AsyncIterable[bytes], details: bool) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: async for byte_payload in bytes_output_as_lines: try: output = _format_text_generation_stream_output(byte_payload, details) except StopIteration: break if output is not None: yield output def _format_text_generation_stream_output(byte_payload: bytes, details: bool) -> Optional[Union[str, TextGenerationStreamOutput]]: if not byte_payload.startswith(b'data:'): return None if byte_payload.strip() == b'data: [DONE]': raise StopIteration('[DONE] signal received.') payload = byte_payload.decode('utf-8') json_payload = json.loads(payload.lstrip('data:').rstrip('/n')) if json_payload.get('error') is not None: raise _parse_text_generation_error(json_payload['error'], json_payload.get('error_type')) output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload) return output.token.text if not details else output def _stream_chat_completion_response(bytes_lines: Iterable[bytes]) -> Iterable[ChatCompletionStreamOutput]: for item in bytes_lines: try: output = _format_chat_completion_stream_output(item) except StopIteration: break if output is not None: yield output async def _async_stream_chat_completion_response(bytes_lines: AsyncIterable[bytes]) -> AsyncIterable[ChatCompletionStreamOutput]: async for item in bytes_lines: try: output = _format_chat_completion_stream_output(item) except StopIteration: break if output is not None: yield output def _format_chat_completion_stream_output(byte_payload: bytes) -> Optional[ChatCompletionStreamOutput]: if not byte_payload.startswith(b'data:'): return None if byte_payload.strip() == b'data: [DONE]': raise StopIteration('[DONE] signal received.') payload = byte_payload.decode('utf-8') json_payload = json.loads(payload.lstrip('data:').rstrip('/n')) return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload) async def _async_yield_from(client: 'ClientSession', response: 'ClientResponse') -> AsyncIterable[bytes]: async for byte_payload in response.content: yield byte_payload.strip() await client.close() _UNSUPPORTED_TEXT_GENERATION_KWARGS: Dict[Optional[str], List[str]] = {} def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: List[str]) -> None: _UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs) def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]: return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, []) def raise_text_generation_error(http_error: HTTPError) -> NoReturn: try: payload = getattr(http_error, 'response_error_payload', None) or http_error.response.json() error = payload.get('error') error_type = payload.get('error_type') except Exception: raise http_error if error_type is not None: exception = _parse_text_generation_error(error, error_type) raise exception from http_error raise http_error def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError: if error_type == 'generation': return GenerationError(error) if error_type == 'incomplete_generation': return IncompleteGenerationError(error) if error_type == 'overloaded': return OverloadedError(error) if error_type == 'validation': return ValidationError(error) return UnknownError(error) # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/_async_client.py import asyncio import base64 import logging import re import time import warnings from typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Set, Union, overload from requests.structures import CaseInsensitiveDict from huggingface_hub.constants import ALL_INFERENCE_API_FRAMEWORKS, INFERENCE_ENDPOINT, MAIN_INFERENCE_API_FRAMEWORKS from huggingface_hub.errors import InferenceTimeoutError from huggingface_hub.inference._common import TASKS_EXPECTING_IMAGES, ContentT, ModelStatus, _async_stream_chat_completion_response, _async_stream_text_generation_response, _b64_encode, _b64_to_image, _bytes_to_dict, _bytes_to_image, _bytes_to_list, _fetch_recommended_models, _get_unsupported_text_generation_kwargs, _import_numpy, _open_as_binary, _set_unsupported_text_generation_kwargs, raise_text_generation_error from huggingface_hub.inference._generated.types import AudioClassificationOutputElement, AudioToAudioOutputElement, AutomaticSpeechRecognitionOutput, ChatCompletionInputGrammarType, ChatCompletionInputTool, ChatCompletionInputToolTypeClass, ChatCompletionOutput, ChatCompletionStreamOutput, DocumentQuestionAnsweringOutputElement, FillMaskOutputElement, ImageClassificationOutputElement, ImageSegmentationOutputElement, ImageToTextOutput, ObjectDetectionOutputElement, QuestionAnsweringOutputElement, SummarizationOutput, TableQuestionAnsweringOutputElement, TextClassificationOutputElement, TextGenerationInputGrammarType, TextGenerationOutput, TextGenerationStreamOutput, TokenClassificationOutputElement, TranslationOutput, VisualQuestionAnsweringOutputElement, ZeroShotClassificationOutputElement, ZeroShotImageClassificationOutputElement from huggingface_hub.utils import build_hf_headers from huggingface_hub.utils._deprecation import _deprecate_positional_args from .._common import _async_yield_from, _import_aiohttp if TYPE_CHECKING: import numpy as np from aiohttp import ClientResponse, ClientSession from PIL.Image import Image logger = logging.getLogger(__name__) MODEL_KWARGS_NOT_USED_REGEX = re.compile('The following `model_kwargs` are not used by the model: \\[(.*?)\\]') class AsyncInferenceClient: @_deprecate_positional_args(version='0.26') def __init__(self, model: Optional[str]=None, *, token: Union[str, bool, None]=None, timeout: Optional[float]=None, headers: Optional[Dict[str, str]]=None, cookies: Optional[Dict[str, str]]=None, trust_env: bool=False, proxies: Optional[Any]=None, base_url: Optional[str]=None, api_key: Optional[str]=None) -> None: if model is not None and base_url is not None: raise ValueError("Received both `model` and `base_url` arguments. Please provide only one of them. `base_url` is an alias for `model` to make the API compatible with OpenAI's client. If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url. When passing a URL as `model`, the client will not append any suffix path to it.") if token is not None and api_key is not None: raise ValueError("Received both `token` and `api_key` arguments. Please provide only one of them. `api_key` is an alias for `token` to make the API compatible with OpenAI's client. It has the exact same behavior as `token`.") self.model: Optional[str] = model self.token: Union[str, bool, None] = token if token is not None else api_key self.headers = CaseInsensitiveDict(build_hf_headers(token=self.token)) if headers is not None: self.headers.update(headers) self.cookies = cookies self.timeout = timeout self.trust_env = trust_env self.proxies = proxies self.base_url = base_url self._sessions: Dict['ClientSession', Set['ClientResponse']] = dict() def __repr__(self): return f"" @overload async def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: Literal[False]=...) -> bytes: ... @overload async def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: Literal[True]=...) -> AsyncIterable[bytes]: ... @overload async def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: bool=False) -> Union[bytes, AsyncIterable[bytes]]: ... async def post(self, *, json: Optional[Union[str, Dict, List]]=None, data: Optional[ContentT]=None, model: Optional[str]=None, task: Optional[str]=None, stream: bool=False) -> Union[bytes, AsyncIterable[bytes]]: aiohttp = _import_aiohttp() url = self._resolve_url(model, task) if data is not None and json is not None: warnings.warn('Ignoring `json` as `data` is passed as binary.') headers = dict() if task in TASKS_EXPECTING_IMAGES and 'Accept' not in headers: headers['Accept'] = 'image/png' t0 = time.time() timeout = self.timeout while True: with _open_as_binary(data) as data_as_binary: session = self._get_client_session(headers=headers) try: response = await session.post(url, json=json, data=data_as_binary, proxy=self.proxies) response_error_payload = None if response.status != 200: try: response_error_payload = await response.json() except Exception: pass response.raise_for_status() if stream: return _async_yield_from(session, response) else: content = await response.read() await session.close() return content except asyncio.TimeoutError as error: await session.close() raise InferenceTimeoutError(f'Inference call timed out: {url}') from error except aiohttp.ClientResponseError as error: error.response_error_payload = response_error_payload await session.close() if response.status == 422 and task is not None: error.message += f". Make sure '{task}' task is supported by the model." if response.status == 503: if timeout is not None and time.time() - t0 > timeout: raise InferenceTimeoutError(f'Model not loaded on the server: {url}. Please retry with a higher timeout (current: {self.timeout}).', request=error.request, response=error.response) from error logger.info(f'Waiting for model to be loaded on the server: {error}') if 'X-wait-for-model' not in headers and url.startswith(INFERENCE_ENDPOINT): headers['X-wait-for-model'] = '1' time.sleep(1) if timeout is not None: timeout = max(self.timeout - (time.time() - t0), 1) continue raise error except Exception: await session.close() raise async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_value, traceback): await self.close() def __del__(self): if len(self._sessions) > 0: warnings.warn("Deleting 'AsyncInferenceClient' client but some sessions are still open. This can happen if you've stopped streaming data from the server before the stream was complete. To close the client properly, you must call `await client.close()` or use an async context (e.g. `async with AsyncInferenceClient(): ...`.") async def close(self): await asyncio.gather(*[session.close() for session in self._sessions.keys()]) async def audio_classification(self, audio: ContentT, *, model: Optional[str]=None) -> List[AudioClassificationOutputElement]: response = await self.post(data=audio, model=model, task='audio-classification') return AudioClassificationOutputElement.parse_obj_as_list(response) async def audio_to_audio(self, audio: ContentT, *, model: Optional[str]=None) -> List[AudioToAudioOutputElement]: response = await self.post(data=audio, model=model, task='audio-to-audio') audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) for item in audio_output: item.blob = base64.b64decode(item.blob) return audio_output async def automatic_speech_recognition(self, audio: ContentT, *, model: Optional[str]=None) -> AutomaticSpeechRecognitionOutput: response = await self.post(data=audio, model=model, task='automatic-speech-recognition') return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) @overload async def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: Literal[False]=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> ChatCompletionOutput: ... @overload async def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: Literal[True]=True, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> AsyncIterable[ChatCompletionStreamOutput]: ... @overload async def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: bool=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: ... async def chat_completion(self, messages: List[Dict[str, str]], *, model: Optional[str]=None, stream: bool=False, frequency_penalty: Optional[float]=None, logit_bias: Optional[List[float]]=None, logprobs: Optional[bool]=None, max_tokens: Optional[int]=None, n: Optional[int]=None, presence_penalty: Optional[float]=None, response_format: Optional[ChatCompletionInputGrammarType]=None, seed: Optional[int]=None, stop: Optional[List[str]]=None, temperature: Optional[float]=None, tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]]=None, tool_prompt: Optional[str]=None, tools: Optional[List[ChatCompletionInputTool]]=None, top_logprobs: Optional[int]=None, top_p: Optional[float]=None) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: model_id_or_url = self.base_url or self.model or model or self.get_recommended_model('text-generation') is_url = model_id_or_url.startswith(('http://', 'https://')) if model_id_or_url == self.base_url: model_url = model_id_or_url.rstrip('/') if not model_url.endswith('/v1'): model_url += '/v1' model_url += '/chat/completions' elif is_url: model_url = model_id_or_url else: model_url = self._resolve_url(model_id_or_url).rstrip('/') + '/v1/chat/completions' model_id = model or self.model or 'tgi' if model_id.startswith(('http://', 'https://')): model_id = 'tgi' payload = dict(model=model_id, messages=messages, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, response_format=response_format, seed=seed, stop=stop, temperature=temperature, tool_choice=tool_choice, tool_prompt=tool_prompt, tools=tools, top_logprobs=top_logprobs, top_p=top_p, stream=stream) payload = {key: value for (key, value) in payload.items() if value is not None} data = await self.post(model=model_url, json=payload, stream=stream) if stream: return _async_stream_chat_completion_response(data) return ChatCompletionOutput.parse_obj_as_instance(data) async def document_question_answering(self, image: ContentT, question: str, *, model: Optional[str]=None) -> List[DocumentQuestionAnsweringOutputElement]: payload: Dict[str, Any] = {'question': question, 'image': _b64_encode(image)} response = await self.post(json=payload, model=model, task='document-question-answering') return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) async def feature_extraction(self, text: str, *, normalize: Optional[bool]=None, prompt_name: Optional[str]=None, truncate: Optional[bool]=None, truncation_direction: Optional[Literal['Left', 'Right']]=None, model: Optional[str]=None) -> 'np.ndarray': payload: Dict = {'inputs': text} if normalize is not None: payload['normalize'] = normalize if prompt_name is not None: payload['prompt_name'] = prompt_name if truncate is not None: payload['truncate'] = truncate if truncation_direction is not None: payload['truncation_direction'] = truncation_direction response = await self.post(json=payload, model=model, task='feature-extraction') np = _import_numpy() return np.array(_bytes_to_dict(response), dtype='float32') async def fill_mask(self, text: str, *, model: Optional[str]=None) -> List[FillMaskOutputElement]: response = await self.post(json={'inputs': text}, model=model, task='fill-mask') return FillMaskOutputElement.parse_obj_as_list(response) async def image_classification(self, image: ContentT, *, model: Optional[str]=None) -> List[ImageClassificationOutputElement]: response = await self.post(data=image, model=model, task='image-classification') return ImageClassificationOutputElement.parse_obj_as_list(response) async def image_segmentation(self, image: ContentT, *, model: Optional[str]=None) -> List[ImageSegmentationOutputElement]: response = await self.post(data=image, model=model, task='image-segmentation') output = ImageSegmentationOutputElement.parse_obj_as_list(response) for item in output: item.mask = _b64_to_image(item.mask) return output async def image_to_image(self, image: ContentT, prompt: Optional[str]=None, *, negative_prompt: Optional[str]=None, height: Optional[int]=None, width: Optional[int]=None, num_inference_steps: Optional[int]=None, guidance_scale: Optional[float]=None, model: Optional[str]=None, **kwargs) -> 'Image': parameters = {'prompt': prompt, 'negative_prompt': negative_prompt, 'height': height, 'width': width, 'num_inference_steps': num_inference_steps, 'guidance_scale': guidance_scale, **kwargs} if all((parameter is None for parameter in parameters.values())): data = image payload: Optional[Dict[str, Any]] = None else: data = None payload = {'inputs': _b64_encode(image)} for (key, value) in parameters.items(): if value is not None: payload.setdefault('parameters', {})[key] = value response = await self.post(json=payload, data=data, model=model, task='image-to-image') return _bytes_to_image(response) async def image_to_text(self, image: ContentT, *, model: Optional[str]=None) -> ImageToTextOutput: response = await self.post(data=image, model=model, task='image-to-text') output = ImageToTextOutput.parse_obj(response) return output[0] if isinstance(output, list) else output async def list_deployed_models(self, frameworks: Union[None, str, Literal['all'], List[str]]=None) -> Dict[str, List[str]]: if frameworks is None: frameworks = MAIN_INFERENCE_API_FRAMEWORKS elif frameworks == 'all': frameworks = ALL_INFERENCE_API_FRAMEWORKS elif isinstance(frameworks, str): frameworks = [frameworks] frameworks = list(set(frameworks)) models_by_task: Dict[str, List[str]] = {} def _unpack_response(framework: str, items: List[Dict]) -> None: for model in items: if framework == 'sentence-transformers': models_by_task.setdefault('feature-extraction', []).append(model['model_id']) models_by_task.setdefault('sentence-similarity', []).append(model['model_id']) else: models_by_task.setdefault(model['task'], []).append(model['model_id']) async def _fetch_framework(framework: str) -> None: async with self._get_client_session() as client: response = await client.get(f'{INFERENCE_ENDPOINT}/framework/{framework}', proxy=self.proxies) response.raise_for_status() _unpack_response(framework, await response.json()) import asyncio await asyncio.gather(*[_fetch_framework(framework) for framework in frameworks]) for (task, models) in models_by_task.items(): models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) return models_by_task async def object_detection(self, image: ContentT, *, model: Optional[str]=None) -> List[ObjectDetectionOutputElement]: response = await self.post(data=image, model=model, task='object-detection') return ObjectDetectionOutputElement.parse_obj_as_list(response) async def question_answering(self, question: str, context: str, *, model: Optional[str]=None) -> QuestionAnsweringOutputElement: payload: Dict[str, Any] = {'question': question, 'context': context} response = await self.post(json=payload, model=model, task='question-answering') return QuestionAnsweringOutputElement.parse_obj_as_instance(response) async def sentence_similarity(self, sentence: str, other_sentences: List[str], *, model: Optional[str]=None) -> List[float]: response = await self.post(json={'inputs': {'source_sentence': sentence, 'sentences': other_sentences}}, model=model, task='sentence-similarity') return _bytes_to_list(response) async def summarization(self, text: str, *, parameters: Optional[Dict[str, Any]]=None, model: Optional[str]=None) -> SummarizationOutput: payload: Dict[str, Any] = {'inputs': text} if parameters is not None: payload['parameters'] = parameters response = await self.post(json=payload, model=model, task='summarization') return SummarizationOutput.parse_obj_as_list(response)[0] async def table_question_answering(self, table: Dict[str, Any], query: str, *, model: Optional[str]=None) -> TableQuestionAnsweringOutputElement: response = await self.post(json={'query': query, 'table': table}, model=model, task='table-question-answering') return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) async def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str]=None) -> List[str]: response = await self.post(json={'table': table}, model=model, task='tabular-classification') return _bytes_to_list(response) async def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str]=None) -> List[float]: response = await self.post(json={'table': table}, model=model, task='tabular-regression') return _bytes_to_list(response) async def text_classification(self, text: str, *, model: Optional[str]=None) -> List[TextClassificationOutputElement]: response = await self.post(json={'inputs': text}, model=model, task='text-classification') return TextClassificationOutputElement.parse_obj_as_list(response)[0] @overload async def text_generation(self, prompt: str, *, details: Literal[False]=..., stream: Literal[False]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> str: ... @overload async def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: Literal[False]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> TextGenerationOutput: ... @overload async def text_generation(self, prompt: str, *, details: Literal[False]=..., stream: Literal[True]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> AsyncIterable[str]: ... @overload async def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: Literal[True]=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> AsyncIterable[TextGenerationStreamOutput]: ... @overload async def text_generation(self, prompt: str, *, details: Literal[True]=..., stream: bool=..., model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Union[TextGenerationOutput, AsyncIterable[TextGenerationStreamOutput]]: ... async def text_generation(self, prompt: str, *, details: bool=False, stream: bool=False, model: Optional[str]=None, adapter_id: Optional[str]=None, best_of: Optional[int]=None, decoder_input_details: Optional[bool]=None, do_sample: Optional[bool]=False, frequency_penalty: Optional[float]=None, grammar: Optional[TextGenerationInputGrammarType]=None, max_new_tokens: Optional[int]=None, repetition_penalty: Optional[float]=None, return_full_text: Optional[bool]=False, seed: Optional[int]=None, stop: Optional[List[str]]=None, stop_sequences: Optional[List[str]]=None, temperature: Optional[float]=None, top_k: Optional[int]=None, top_n_tokens: Optional[int]=None, top_p: Optional[float]=None, truncate: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=None) -> Union[str, TextGenerationOutput, AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: if decoder_input_details and (not details): warnings.warn('`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that the output from the server will be truncated.') decoder_input_details = False if stop_sequences is not None: warnings.warn("`stop_sequences` is a deprecated argument for `text_generation` task and will be removed in version '0.28.0'. Use `stop` instead.", FutureWarning) if stop is None: stop = stop_sequences parameters = {'adapter_id': adapter_id, 'best_of': best_of, 'decoder_input_details': decoder_input_details, 'details': details, 'do_sample': do_sample, 'frequency_penalty': frequency_penalty, 'grammar': grammar, 'max_new_tokens': max_new_tokens, 'repetition_penalty': repetition_penalty, 'return_full_text': return_full_text, 'seed': seed, 'stop': stop if stop is not None else [], 'temperature': temperature, 'top_k': top_k, 'top_n_tokens': top_n_tokens, 'top_p': top_p, 'truncate': truncate, 'typical_p': typical_p, 'watermark': watermark} parameters = {k: v for (k, v) in parameters.items() if v is not None} payload = {'inputs': prompt, 'parameters': parameters, 'stream': stream} unsupported_kwargs = _get_unsupported_text_generation_kwargs(model) if len(unsupported_kwargs) > 0: ignored_parameters = [] for key in unsupported_kwargs: if parameters.get(key): ignored_parameters.append(key) parameters.pop(key, None) if len(ignored_parameters) > 0: warnings.warn(f"API endpoint/model for text-generation is not served via TGI. Ignoring following parameters: {', '.join(ignored_parameters)}.", UserWarning) if details: warnings.warn('API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will be ignored meaning only the generated text will be returned.', UserWarning) details = False if stream: raise ValueError('API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream. Please pass `stream=False` as input.') try: bytes_output = await self.post(json=payload, model=model, task='text-generation', stream=stream) except _import_aiohttp().ClientResponseError as e: match = MODEL_KWARGS_NOT_USED_REGEX.search(e.response_error_payload['error']) if e.status == 400 and match: unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(',')] _set_unsupported_text_generation_kwargs(model, unused_params) return await self.text_generation(prompt=prompt, details=details, stream=stream, model=model, adapter_id=adapter_id, best_of=best_of, decoder_input_details=decoder_input_details, do_sample=do_sample, frequency_penalty=frequency_penalty, grammar=grammar, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop, temperature=temperature, top_k=top_k, top_n_tokens=top_n_tokens, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark) raise_text_generation_error(e) if stream: return _async_stream_text_generation_response(bytes_output, details) data = _bytes_to_dict(bytes_output) if isinstance(data, list): data = data[0] return TextGenerationOutput.parse_obj_as_instance(data) if details else data['generated_text'] async def text_to_image(self, prompt: str, *, negative_prompt: Optional[str]=None, height: Optional[float]=None, width: Optional[float]=None, num_inference_steps: Optional[float]=None, guidance_scale: Optional[float]=None, model: Optional[str]=None, **kwargs) -> 'Image': payload = {'inputs': prompt} parameters = {'negative_prompt': negative_prompt, 'height': height, 'width': width, 'num_inference_steps': num_inference_steps, 'guidance_scale': guidance_scale, **kwargs} for (key, value) in parameters.items(): if value is not None: payload.setdefault('parameters', {})[key] = value response = await self.post(json=payload, model=model, task='text-to-image') return _bytes_to_image(response) async def text_to_speech(self, text: str, *, model: Optional[str]=None) -> bytes: return await self.post(json={'inputs': text}, model=model, task='text-to-speech') async def token_classification(self, text: str, *, model: Optional[str]=None) -> List[TokenClassificationOutputElement]: payload: Dict[str, Any] = {'inputs': text} response = await self.post(json=payload, model=model, task='token-classification') return TokenClassificationOutputElement.parse_obj_as_list(response) async def translation(self, text: str, *, model: Optional[str]=None, src_lang: Optional[str]=None, tgt_lang: Optional[str]=None) -> TranslationOutput: if src_lang is not None and tgt_lang is None: raise ValueError('You cannot specify `src_lang` without specifying `tgt_lang`.') if src_lang is None and tgt_lang is not None: raise ValueError('You cannot specify `tgt_lang` without specifying `src_lang`.') payload: Dict = {'inputs': text} if src_lang and tgt_lang: payload['parameters'] = {'src_lang': src_lang, 'tgt_lang': tgt_lang} response = await self.post(json=payload, model=model, task='translation') return TranslationOutput.parse_obj_as_list(response)[0] async def visual_question_answering(self, image: ContentT, question: str, *, model: Optional[str]=None) -> List[VisualQuestionAnsweringOutputElement]: payload: Dict[str, Any] = {'question': question, 'image': _b64_encode(image)} response = await self.post(json=payload, model=model, task='visual-question-answering') return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) async def zero_shot_classification(self, text: str, labels: List[str], *, multi_label: bool=False, hypothesis_template: Optional[str]=None, model: Optional[str]=None) -> List[ZeroShotClassificationOutputElement]: parameters = {'candidate_labels': labels, 'multi_label': multi_label} if hypothesis_template is not None: parameters['hypothesis_template'] = hypothesis_template response = await self.post(json={'inputs': text, 'parameters': parameters}, task='zero-shot-classification', model=model) output = _bytes_to_dict(response) return [ZeroShotClassificationOutputElement.parse_obj_as_instance({'label': label, 'score': score}) for (label, score) in zip(output['labels'], output['scores'])] async def zero_shot_image_classification(self, image: ContentT, labels: List[str], *, model: Optional[str]=None) -> List[ZeroShotImageClassificationOutputElement]: if len(labels) < 2: raise ValueError('You must specify at least 2 classes to compare.') response = await self.post(json={'image': _b64_encode(image), 'parameters': {'candidate_labels': ','.join(labels)}}, model=model, task='zero-shot-image-classification') return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) def _get_client_session(self, headers: Optional[Dict]=None) -> 'ClientSession': aiohttp = _import_aiohttp() client_headers = self.headers.copy() if headers is not None: client_headers.update(headers) session = aiohttp.ClientSession(headers=client_headers, cookies=self.cookies, timeout=aiohttp.ClientTimeout(self.timeout), trust_env=self.trust_env) self._sessions[session] = set() session._wrapped_request = session._request async def _request(method, url, **kwargs): response = await session._wrapped_request(method, url, **kwargs) self._sessions[session].add(response) return response session._request = _request session._close = session.close async def close_session(): for response in self._sessions[session]: response.close() await session._close() self._sessions.pop(session, None) session.close = close_session return session def _resolve_url(self, model: Optional[str]=None, task: Optional[str]=None) -> str: model = model or self.model or self.base_url if model is not None and (model.startswith('http://') or model.startswith('https://')): return model if model is None: if task is None: raise ValueError('You must specify at least a model (repo_id or URL) or a task, either when instantiating `InferenceClient` or when making a request.') model = self.get_recommended_model(task) logger.info(f"Using recommended model {model} for task {task}. Note that it is encouraged to explicitly set `model='{model}'` as the recommended models list might get updated without prior notice.") return f'{INFERENCE_ENDPOINT}/pipeline/{task}/{model}' if task in ('feature-extraction', 'sentence-similarity') else f'{INFERENCE_ENDPOINT}/models/{model}' @staticmethod def get_recommended_model(task: str) -> str: model = _fetch_recommended_models().get(task) if model is None: raise ValueError(f'Task {task} has no recommended model. Please specify a model explicitly. Visit https://huggingface.co/tasks for more info.') return model async def get_endpoint_info(self, *, model: Optional[str]=None) -> Dict[str, Any]: model = model or self.model if model is None: raise ValueError('Model id not provided.') if model.startswith(('http://', 'https://')): url = model.rstrip('/') + '/info' else: url = f'{INFERENCE_ENDPOINT}/models/{model}/info' async with self._get_client_session() as client: response = await client.get(url, proxy=self.proxies) response.raise_for_status() return await response.json() async def health_check(self, model: Optional[str]=None) -> bool: model = model or self.model if model is None: raise ValueError('Model id not provided.') if not model.startswith(('http://', 'https://')): raise ValueError('Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`.') url = model.rstrip('/') + '/health' async with self._get_client_session() as client: response = await client.get(url, proxy=self.proxies) return response.status == 200 async def get_model_status(self, model: Optional[str]=None) -> ModelStatus: model = model or self.model if model is None: raise ValueError('Model id not provided.') if model.startswith('https://'): raise NotImplementedError('Model status is only available for Inference API endpoints.') url = f'{INFERENCE_ENDPOINT}/status/{model}' async with self._get_client_session() as client: response = await client.get(url, proxy=self.proxies) response.raise_for_status() response_data = await response.json() if 'error' in response_data: raise ValueError(response_data['error']) return ModelStatus(loaded=response_data['loaded'], state=response_data['state'], compute_type=response_data['compute_type'], framework=response_data['framework']) @property def chat(self) -> 'ProxyClientChat': return ProxyClientChat(self) class _ProxyClient: def __init__(self, client: AsyncInferenceClient): self._client = client class ProxyClientChat(_ProxyClient): @property def completions(self) -> 'ProxyClientChatCompletions': return ProxyClientChatCompletions(self._client) class ProxyClientChatCompletions(_ProxyClient): @property def create(self): return self._client.chat_completion # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/__init__.py from .audio_classification import AudioClassificationInput, AudioClassificationOutputElement, AudioClassificationParameters from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement from .automatic_speech_recognition import AutomaticSpeechRecognitionGenerationParameters, AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput, AutomaticSpeechRecognitionOutputChunk, AutomaticSpeechRecognitionParameters from .base import BaseInferenceType from .chat_completion import ChatCompletionInput, ChatCompletionInputFunctionDefinition, ChatCompletionInputFunctionName, ChatCompletionInputGrammarType, ChatCompletionInputMessage, ChatCompletionInputMessageChunk, ChatCompletionInputTool, ChatCompletionInputToolTypeClass, ChatCompletionInputURL, ChatCompletionOutput, ChatCompletionOutputComplete, ChatCompletionOutputFunctionDefinition, ChatCompletionOutputLogprob, ChatCompletionOutputLogprobs, ChatCompletionOutputMessage, ChatCompletionOutputToolCall, ChatCompletionOutputTopLogprob, ChatCompletionOutputUsage, ChatCompletionStreamOutput, ChatCompletionStreamOutputChoice, ChatCompletionStreamOutputDelta, ChatCompletionStreamOutputDeltaToolCall, ChatCompletionStreamOutputFunction, ChatCompletionStreamOutputLogprob, ChatCompletionStreamOutputLogprobs, ChatCompletionStreamOutputTopLogprob from .depth_estimation import DepthEstimationInput, DepthEstimationOutput from .document_question_answering import DocumentQuestionAnsweringInput, DocumentQuestionAnsweringInputData, DocumentQuestionAnsweringOutputElement, DocumentQuestionAnsweringParameters from .feature_extraction import FeatureExtractionInput from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters from .image_classification import ImageClassificationInput, ImageClassificationOutputElement, ImageClassificationParameters from .image_segmentation import ImageSegmentationInput, ImageSegmentationOutputElement, ImageSegmentationParameters from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize from .image_to_text import ImageToTextGenerationParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters from .object_detection import ObjectDetectionBoundingBox, ObjectDetectionInput, ObjectDetectionOutputElement, ObjectDetectionParameters from .question_answering import QuestionAnsweringInput, QuestionAnsweringInputData, QuestionAnsweringOutputElement, QuestionAnsweringParameters from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData from .summarization import SummarizationGenerationParameters, SummarizationInput, SummarizationOutput from .table_question_answering import TableQuestionAnsweringInput, TableQuestionAnsweringInputData, TableQuestionAnsweringOutputElement from .text2text_generation import Text2TextGenerationInput, Text2TextGenerationOutput, Text2TextGenerationParameters from .text_classification import TextClassificationInput, TextClassificationOutputElement, TextClassificationParameters from .text_generation import TextGenerationInput, TextGenerationInputGenerateParameters, TextGenerationInputGrammarType, TextGenerationOutput, TextGenerationOutputBestOfSequence, TextGenerationOutputDetails, TextGenerationOutputPrefillToken, TextGenerationOutputToken, TextGenerationStreamOutput, TextGenerationStreamOutputStreamDetails, TextGenerationStreamOutputToken from .text_to_audio import TextToAudioGenerationParameters, TextToAudioInput, TextToAudioOutput, TextToAudioParameters from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters, TextToImageTargetSize from .token_classification import TokenClassificationInput, TokenClassificationOutputElement, TokenClassificationParameters from .translation import TranslationGenerationParameters, TranslationInput, TranslationOutput from .video_classification import VideoClassificationInput, VideoClassificationOutputElement, VideoClassificationParameters from .visual_question_answering import VisualQuestionAnsweringInput, VisualQuestionAnsweringInputData, VisualQuestionAnsweringOutputElement, VisualQuestionAnsweringParameters from .zero_shot_classification import ZeroShotClassificationInput, ZeroShotClassificationInputData, ZeroShotClassificationOutputElement, ZeroShotClassificationParameters from .zero_shot_image_classification import ZeroShotImageClassificationInput, ZeroShotImageClassificationInputData, ZeroShotImageClassificationOutputElement, ZeroShotImageClassificationParameters from .zero_shot_object_detection import ZeroShotObjectDetectionBoundingBox, ZeroShotObjectDetectionInput, ZeroShotObjectDetectionInputData, ZeroShotObjectDetectionOutputElement # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/audio_classification.py from dataclasses import dataclass from typing import Any, Literal, Optional from .base import BaseInferenceType ClassificationOutputTransform = Literal['sigmoid', 'softmax', 'none'] @dataclass class AudioClassificationParameters(BaseInferenceType): function_to_apply: Optional['ClassificationOutputTransform'] = None top_k: Optional[int] = None '' @dataclass class AudioClassificationInput(BaseInferenceType): inputs: Any '' parameters: Optional[AudioClassificationParameters] = None '' @dataclass class AudioClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/audio_to_audio.py from dataclasses import dataclass from typing import Any from .base import BaseInferenceType @dataclass class AudioToAudioInput(BaseInferenceType): inputs: Any '' @dataclass class AudioToAudioOutputElement(BaseInferenceType): blob: Any '' content_type: str '' label: str '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py from dataclasses import dataclass from typing import Any, List, Literal, Optional, Union from .base import BaseInferenceType EarlyStoppingEnum = Literal['never'] @dataclass class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType): do_sample: Optional[bool] = None '' early_stopping: Optional[Union[bool, 'EarlyStoppingEnum']] = None '' epsilon_cutoff: Optional[float] = None '' eta_cutoff: Optional[float] = None '' max_length: Optional[int] = None '' max_new_tokens: Optional[int] = None '' min_length: Optional[int] = None '' min_new_tokens: Optional[int] = None '' num_beam_groups: Optional[int] = None '' num_beams: Optional[int] = None '' penalty_alpha: Optional[float] = None '' temperature: Optional[float] = None '' top_k: Optional[int] = None '' top_p: Optional[float] = None '' typical_p: Optional[float] = None '' use_cache: Optional[bool] = None '' @dataclass class AutomaticSpeechRecognitionParameters(BaseInferenceType): generate: Optional[AutomaticSpeechRecognitionGenerationParameters] = None '' return_timestamps: Optional[bool] = None '' @dataclass class AutomaticSpeechRecognitionInput(BaseInferenceType): inputs: Any '' parameters: Optional[AutomaticSpeechRecognitionParameters] = None '' @dataclass class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType): text: str '' timestamps: List[float] '' @dataclass class AutomaticSpeechRecognitionOutput(BaseInferenceType): text: str '' chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/base.py """""" import inspect import json from dataclasses import asdict, dataclass from typing import Any, Dict, List, Type, TypeVar, Union, get_args T = TypeVar('T', bound='BaseInferenceType') @dataclass class BaseInferenceType(dict): @classmethod def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]: output = cls.parse_obj(data) if not isinstance(output, list): raise ValueError(f'Invalid input data for {cls}. Expected a list, but got {type(output)}.') return output @classmethod def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T: output = cls.parse_obj(data) if isinstance(output, list): raise ValueError(f'Invalid input data for {cls}. Expected a single instance, but got a list.') return output @classmethod def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]: if isinstance(data, bytes): data = data.decode() if isinstance(data, str): data = json.loads(data) if isinstance(data, List): return [cls.parse_obj(d) for d in data] if not isinstance(data, dict): raise ValueError(f'Invalid data type: {type(data)}') init_values = {} other_values = {} for (key, value) in data.items(): key = normalize_key(key) if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init: if isinstance(value, dict) or isinstance(value, list): field_type = cls.__dataclass_fields__[key].type if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType): value = field_type.parse_obj(value) else: expected_types = get_args(field_type) for expected_type in expected_types: if getattr(expected_type, '_name', None) == 'List': expected_type = get_args(expected_type)[0] if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType): value = expected_type.parse_obj(value) break init_values[key] = value else: other_values[key] = value for key in cls.__dataclass_fields__: if key not in init_values: init_values[key] = None item = cls(**init_values) item.update(other_values) return item def __post_init__(self): self.update(asdict(self)) def __setitem__(self, __key: Any, __value: Any) -> None: super().__setitem__(__key, __value) if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value: self.__setattr__(__key, __value) return def __setattr__(self, __name: str, __value: Any) -> None: super().__setattr__(__name, __value) if self.get(__name) != __value: self[__name] = __value return def normalize_key(key: str) -> str: return key.replace('-', '_').replace(' ', '_').lower() # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/chat_completion.py from dataclasses import dataclass from typing import Any, List, Literal, Optional, Union from .base import BaseInferenceType @dataclass class ChatCompletionInputURL(BaseInferenceType): url: str ChatCompletionInputMessageChunkType = Literal['text', 'image_url'] @dataclass class ChatCompletionInputMessageChunk(BaseInferenceType): type: 'ChatCompletionInputMessageChunkType' image_url: Optional[ChatCompletionInputURL] = None text: Optional[str] = None @dataclass class ChatCompletionInputMessage(BaseInferenceType): content: Union[List[ChatCompletionInputMessageChunk], str] role: str name: Optional[str] = None ChatCompletionInputGrammarTypeType = Literal['json', 'regex'] @dataclass class ChatCompletionInputGrammarType(BaseInferenceType): type: 'ChatCompletionInputGrammarTypeType' value: Any '' @dataclass class ChatCompletionInputFunctionName(BaseInferenceType): name: str @dataclass class ChatCompletionInputToolTypeClass(BaseInferenceType): function: Optional[ChatCompletionInputFunctionName] = None @dataclass class ChatCompletionInputFunctionDefinition(BaseInferenceType): arguments: Any name: str description: Optional[str] = None @dataclass class ChatCompletionInputTool(BaseInferenceType): function: ChatCompletionInputFunctionDefinition type: str @dataclass class ChatCompletionInput(BaseInferenceType): messages: List[ChatCompletionInputMessage] '' frequency_penalty: Optional[float] = None '' logit_bias: Optional[List[float]] = None '' logprobs: Optional[bool] = None '' max_tokens: Optional[int] = None '' model: Optional[str] = None '' n: Optional[int] = None '' presence_penalty: Optional[float] = None '' response_format: Optional[ChatCompletionInputGrammarType] = None seed: Optional[int] = None stop: Optional[List[str]] = None '' stream: Optional[bool] = None temperature: Optional[float] = None '' tool_choice: Optional[Union[ChatCompletionInputToolTypeClass, str]] = None tool_prompt: Optional[str] = None '' tools: Optional[List[ChatCompletionInputTool]] = None '' top_logprobs: Optional[int] = None '' top_p: Optional[float] = None '' @dataclass class ChatCompletionOutputTopLogprob(BaseInferenceType): logprob: float token: str @dataclass class ChatCompletionOutputLogprob(BaseInferenceType): logprob: float token: str top_logprobs: List[ChatCompletionOutputTopLogprob] @dataclass class ChatCompletionOutputLogprobs(BaseInferenceType): content: List[ChatCompletionOutputLogprob] @dataclass class ChatCompletionOutputFunctionDefinition(BaseInferenceType): arguments: Any name: str description: Optional[str] = None @dataclass class ChatCompletionOutputToolCall(BaseInferenceType): function: ChatCompletionOutputFunctionDefinition id: str type: str @dataclass class ChatCompletionOutputMessage(BaseInferenceType): role: str content: Optional[str] = None tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None @dataclass class ChatCompletionOutputComplete(BaseInferenceType): finish_reason: str index: int message: ChatCompletionOutputMessage logprobs: Optional[ChatCompletionOutputLogprobs] = None @dataclass class ChatCompletionOutputUsage(BaseInferenceType): completion_tokens: int prompt_tokens: int total_tokens: int @dataclass class ChatCompletionOutput(BaseInferenceType): choices: List[ChatCompletionOutputComplete] created: int id: str model: str system_fingerprint: str usage: ChatCompletionOutputUsage @dataclass class ChatCompletionStreamOutputFunction(BaseInferenceType): arguments: str name: Optional[str] = None @dataclass class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType): function: ChatCompletionStreamOutputFunction id: str index: int type: str @dataclass class ChatCompletionStreamOutputDelta(BaseInferenceType): role: str content: Optional[str] = None tool_calls: Optional[ChatCompletionStreamOutputDeltaToolCall] = None @dataclass class ChatCompletionStreamOutputTopLogprob(BaseInferenceType): logprob: float token: str @dataclass class ChatCompletionStreamOutputLogprob(BaseInferenceType): logprob: float token: str top_logprobs: List[ChatCompletionStreamOutputTopLogprob] @dataclass class ChatCompletionStreamOutputLogprobs(BaseInferenceType): content: List[ChatCompletionStreamOutputLogprob] @dataclass class ChatCompletionStreamOutputChoice(BaseInferenceType): delta: ChatCompletionStreamOutputDelta index: int finish_reason: Optional[str] = None logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None @dataclass class ChatCompletionStreamOutput(BaseInferenceType): choices: List[ChatCompletionStreamOutputChoice] created: int id: str model: str system_fingerprint: str # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/depth_estimation.py from dataclasses import dataclass from typing import Any, Dict, Optional from .base import BaseInferenceType @dataclass class DepthEstimationInput(BaseInferenceType): inputs: Any '' parameters: Optional[Dict[str, Any]] = None '' @dataclass class DepthEstimationOutput(BaseInferenceType): depth: Any '' predicted_depth: Any '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/document_question_answering.py from dataclasses import dataclass from typing import Any, List, Optional, Union from .base import BaseInferenceType @dataclass class DocumentQuestionAnsweringInputData(BaseInferenceType): image: Any '' question: str '' @dataclass class DocumentQuestionAnsweringParameters(BaseInferenceType): doc_stride: Optional[int] = None '' handle_impossible_answer: Optional[bool] = None '' lang: Optional[str] = None '' max_answer_len: Optional[int] = None '' max_question_len: Optional[int] = None '' max_seq_len: Optional[int] = None '' top_k: Optional[int] = None '' word_boxes: Optional[List[Union[List[float], str]]] = None '' @dataclass class DocumentQuestionAnsweringInput(BaseInferenceType): inputs: DocumentQuestionAnsweringInputData '' parameters: Optional[DocumentQuestionAnsweringParameters] = None '' @dataclass class DocumentQuestionAnsweringOutputElement(BaseInferenceType): answer: str '' end: int '' score: float '' start: int '' words: List[int] '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/feature_extraction.py from dataclasses import dataclass from typing import Literal, Optional from .base import BaseInferenceType FeatureExtractionInputTruncationDirection = Literal['Left', 'Right'] @dataclass class FeatureExtractionInput(BaseInferenceType): inputs: str '' normalize: Optional[bool] = None prompt_name: Optional[str] = None '' truncate: Optional[bool] = None truncation_direction: Optional['FeatureExtractionInputTruncationDirection'] = None # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/fill_mask.py from dataclasses import dataclass from typing import Any, List, Optional from .base import BaseInferenceType @dataclass class FillMaskParameters(BaseInferenceType): targets: Optional[List[str]] = None '' top_k: Optional[int] = None '' @dataclass class FillMaskInput(BaseInferenceType): inputs: str '' parameters: Optional[FillMaskParameters] = None '' @dataclass class FillMaskOutputElement(BaseInferenceType): score: float '' sequence: str '' token: int '' token_str: Any fill_mask_output_token_str: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/image_classification.py from dataclasses import dataclass from typing import Any, Literal, Optional from .base import BaseInferenceType ClassificationOutputTransform = Literal['sigmoid', 'softmax', 'none'] @dataclass class ImageClassificationParameters(BaseInferenceType): function_to_apply: Optional['ClassificationOutputTransform'] = None top_k: Optional[int] = None '' @dataclass class ImageClassificationInput(BaseInferenceType): inputs: Any '' parameters: Optional[ImageClassificationParameters] = None '' @dataclass class ImageClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/image_segmentation.py from dataclasses import dataclass from typing import Any, Literal, Optional from .base import BaseInferenceType ImageSegmentationSubtask = Literal['instance', 'panoptic', 'semantic'] @dataclass class ImageSegmentationParameters(BaseInferenceType): mask_threshold: Optional[float] = None '' overlap_mask_area_threshold: Optional[float] = None '' subtask: Optional['ImageSegmentationSubtask'] = None '' threshold: Optional[float] = None '' @dataclass class ImageSegmentationInput(BaseInferenceType): inputs: Any '' parameters: Optional[ImageSegmentationParameters] = None '' @dataclass class ImageSegmentationOutputElement(BaseInferenceType): label: str '' mask: Any '' score: Optional[float] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/image_to_image.py from dataclasses import dataclass from typing import Any, List, Optional from .base import BaseInferenceType @dataclass class ImageToImageTargetSize(BaseInferenceType): height: int width: int @dataclass class ImageToImageParameters(BaseInferenceType): guidance_scale: Optional[float] = None '' negative_prompt: Optional[List[str]] = None '' num_inference_steps: Optional[int] = None '' target_size: Optional[ImageToImageTargetSize] = None '' @dataclass class ImageToImageInput(BaseInferenceType): inputs: Any '' parameters: Optional[ImageToImageParameters] = None '' @dataclass class ImageToImageOutput(BaseInferenceType): image: Any '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/image_to_text.py from dataclasses import dataclass from typing import Any, Literal, Optional, Union from .base import BaseInferenceType EarlyStoppingEnum = Literal['never'] @dataclass class ImageToTextGenerationParameters(BaseInferenceType): do_sample: Optional[bool] = None '' early_stopping: Optional[Union[bool, 'EarlyStoppingEnum']] = None '' epsilon_cutoff: Optional[float] = None '' eta_cutoff: Optional[float] = None '' max_length: Optional[int] = None '' max_new_tokens: Optional[int] = None '' min_length: Optional[int] = None '' min_new_tokens: Optional[int] = None '' num_beam_groups: Optional[int] = None '' num_beams: Optional[int] = None '' penalty_alpha: Optional[float] = None '' temperature: Optional[float] = None '' top_k: Optional[int] = None '' top_p: Optional[float] = None '' typical_p: Optional[float] = None '' use_cache: Optional[bool] = None '' @dataclass class ImageToTextParameters(BaseInferenceType): generate: Optional[ImageToTextGenerationParameters] = None '' max_new_tokens: Optional[int] = None '' @dataclass class ImageToTextInput(BaseInferenceType): inputs: Any '' parameters: Optional[ImageToTextParameters] = None '' @dataclass class ImageToTextOutput(BaseInferenceType): generated_text: Any image_to_text_output_generated_text: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/object_detection.py from dataclasses import dataclass from typing import Any, Optional from .base import BaseInferenceType @dataclass class ObjectDetectionParameters(BaseInferenceType): threshold: Optional[float] = None '' @dataclass class ObjectDetectionInput(BaseInferenceType): inputs: Any '' parameters: Optional[ObjectDetectionParameters] = None '' @dataclass class ObjectDetectionBoundingBox(BaseInferenceType): xmax: int xmin: int ymax: int ymin: int @dataclass class ObjectDetectionOutputElement(BaseInferenceType): box: ObjectDetectionBoundingBox '' label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/question_answering.py from dataclasses import dataclass from typing import Optional from .base import BaseInferenceType @dataclass class QuestionAnsweringInputData(BaseInferenceType): context: str '' question: str '' @dataclass class QuestionAnsweringParameters(BaseInferenceType): align_to_words: Optional[bool] = None '' doc_stride: Optional[int] = None '' handle_impossible_answer: Optional[bool] = None '' max_answer_len: Optional[int] = None '' max_question_len: Optional[int] = None '' max_seq_len: Optional[int] = None '' top_k: Optional[int] = None '' @dataclass class QuestionAnsweringInput(BaseInferenceType): inputs: QuestionAnsweringInputData '' parameters: Optional[QuestionAnsweringParameters] = None '' @dataclass class QuestionAnsweringOutputElement(BaseInferenceType): answer: str '' end: int '' score: float '' start: int '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/sentence_similarity.py from dataclasses import dataclass from typing import Any, Dict, List, Optional from .base import BaseInferenceType @dataclass class SentenceSimilarityInputData(BaseInferenceType): sentences: List[str] '' source_sentence: str '' @dataclass class SentenceSimilarityInput(BaseInferenceType): inputs: SentenceSimilarityInputData parameters: Optional[Dict[str, Any]] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/summarization.py from dataclasses import dataclass from typing import Any, Dict, Literal, Optional from .base import BaseInferenceType SummarizationGenerationTruncationStrategy = Literal['do_not_truncate', 'longest_first', 'only_first', 'only_second'] @dataclass class SummarizationGenerationParameters(BaseInferenceType): clean_up_tokenization_spaces: Optional[bool] = None '' generate_parameters: Optional[Dict[str, Any]] = None '' truncation: Optional['SummarizationGenerationTruncationStrategy'] = None '' @dataclass class SummarizationInput(BaseInferenceType): inputs: str '' parameters: Optional[SummarizationGenerationParameters] = None '' @dataclass class SummarizationOutput(BaseInferenceType): summary_text: str '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/table_question_answering.py from dataclasses import dataclass from typing import Any, Dict, List, Optional from .base import BaseInferenceType @dataclass class TableQuestionAnsweringInputData(BaseInferenceType): question: str '' table: Dict[str, List[str]] '' @dataclass class TableQuestionAnsweringInput(BaseInferenceType): inputs: TableQuestionAnsweringInputData '' parameters: Optional[Dict[str, Any]] = None '' @dataclass class TableQuestionAnsweringOutputElement(BaseInferenceType): answer: str '' cells: List[str] '' coordinates: List[List[int]] '' aggregator: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/text2text_generation.py from dataclasses import dataclass from typing import Any, Dict, Literal, Optional from .base import BaseInferenceType Text2TextGenerationTruncationStrategy = Literal['do_not_truncate', 'longest_first', 'only_first', 'only_second'] @dataclass class Text2TextGenerationParameters(BaseInferenceType): clean_up_tokenization_spaces: Optional[bool] = None '' generate_parameters: Optional[Dict[str, Any]] = None '' truncation: Optional['Text2TextGenerationTruncationStrategy'] = None '' @dataclass class Text2TextGenerationInput(BaseInferenceType): inputs: str '' parameters: Optional[Text2TextGenerationParameters] = None '' @dataclass class Text2TextGenerationOutput(BaseInferenceType): generated_text: Any text2_text_generation_output_generated_text: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/text_classification.py from dataclasses import dataclass from typing import Literal, Optional from .base import BaseInferenceType ClassificationOutputTransform = Literal['sigmoid', 'softmax', 'none'] @dataclass class TextClassificationParameters(BaseInferenceType): function_to_apply: Optional['ClassificationOutputTransform'] = None top_k: Optional[int] = None '' @dataclass class TextClassificationInput(BaseInferenceType): inputs: str '' parameters: Optional[TextClassificationParameters] = None '' @dataclass class TextClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/text_generation.py from dataclasses import dataclass from typing import Any, List, Literal, Optional from .base import BaseInferenceType TypeEnum = Literal['json', 'regex'] @dataclass class TextGenerationInputGrammarType(BaseInferenceType): type: 'TypeEnum' value: Any '' @dataclass class TextGenerationInputGenerateParameters(BaseInferenceType): adapter_id: Optional[str] = None '' best_of: Optional[int] = None '' decoder_input_details: Optional[bool] = None '' details: Optional[bool] = None '' do_sample: Optional[bool] = None '' frequency_penalty: Optional[float] = None '' grammar: Optional[TextGenerationInputGrammarType] = None max_new_tokens: Optional[int] = None '' repetition_penalty: Optional[float] = None '' return_full_text: Optional[bool] = None '' seed: Optional[int] = None '' stop: Optional[List[str]] = None '' temperature: Optional[float] = None '' top_k: Optional[int] = None '' top_n_tokens: Optional[int] = None '' top_p: Optional[float] = None '' truncate: Optional[int] = None '' typical_p: Optional[float] = None '' watermark: Optional[bool] = None '' @dataclass class TextGenerationInput(BaseInferenceType): inputs: str parameters: Optional[TextGenerationInputGenerateParameters] = None stream: Optional[bool] = None TextGenerationOutputFinishReason = Literal['length', 'eos_token', 'stop_sequence'] @dataclass class TextGenerationOutputPrefillToken(BaseInferenceType): id: int logprob: float text: str @dataclass class TextGenerationOutputToken(BaseInferenceType): id: int logprob: float special: bool text: str @dataclass class TextGenerationOutputBestOfSequence(BaseInferenceType): finish_reason: 'TextGenerationOutputFinishReason' generated_text: str generated_tokens: int prefill: List[TextGenerationOutputPrefillToken] tokens: List[TextGenerationOutputToken] seed: Optional[int] = None top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None @dataclass class TextGenerationOutputDetails(BaseInferenceType): finish_reason: 'TextGenerationOutputFinishReason' generated_tokens: int prefill: List[TextGenerationOutputPrefillToken] tokens: List[TextGenerationOutputToken] best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None seed: Optional[int] = None top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None @dataclass class TextGenerationOutput(BaseInferenceType): generated_text: str details: Optional[TextGenerationOutputDetails] = None @dataclass class TextGenerationStreamOutputStreamDetails(BaseInferenceType): finish_reason: 'TextGenerationOutputFinishReason' generated_tokens: int seed: Optional[int] = None @dataclass class TextGenerationStreamOutputToken(BaseInferenceType): id: int logprob: float special: bool text: str @dataclass class TextGenerationStreamOutput(BaseInferenceType): index: int token: TextGenerationStreamOutputToken details: Optional[TextGenerationStreamOutputStreamDetails] = None generated_text: Optional[str] = None top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/text_to_audio.py from dataclasses import dataclass from typing import Any, Literal, Optional, Union from .base import BaseInferenceType EarlyStoppingEnum = Literal['never'] @dataclass class TextToAudioGenerationParameters(BaseInferenceType): do_sample: Optional[bool] = None '' early_stopping: Optional[Union[bool, 'EarlyStoppingEnum']] = None '' epsilon_cutoff: Optional[float] = None '' eta_cutoff: Optional[float] = None '' max_length: Optional[int] = None '' max_new_tokens: Optional[int] = None '' min_length: Optional[int] = None '' min_new_tokens: Optional[int] = None '' num_beam_groups: Optional[int] = None '' num_beams: Optional[int] = None '' penalty_alpha: Optional[float] = None '' temperature: Optional[float] = None '' top_k: Optional[int] = None '' top_p: Optional[float] = None '' typical_p: Optional[float] = None '' use_cache: Optional[bool] = None '' @dataclass class TextToAudioParameters(BaseInferenceType): generate: Optional[TextToAudioGenerationParameters] = None '' @dataclass class TextToAudioInput(BaseInferenceType): inputs: str '' parameters: Optional[TextToAudioParameters] = None '' @dataclass class TextToAudioOutput(BaseInferenceType): audio: Any '' sampling_rate: Any text_to_audio_output_sampling_rate: Optional[float] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/text_to_image.py from dataclasses import dataclass from typing import Any, List, Optional from .base import BaseInferenceType @dataclass class TextToImageTargetSize(BaseInferenceType): height: int width: int @dataclass class TextToImageParameters(BaseInferenceType): guidance_scale: Optional[float] = None '' negative_prompt: Optional[List[str]] = None '' num_inference_steps: Optional[int] = None '' scheduler: Optional[str] = None '' target_size: Optional[TextToImageTargetSize] = None '' @dataclass class TextToImageInput(BaseInferenceType): inputs: str '' parameters: Optional[TextToImageParameters] = None '' @dataclass class TextToImageOutput(BaseInferenceType): image: Any '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/token_classification.py from dataclasses import dataclass from typing import Any, List, Literal, Optional from .base import BaseInferenceType TokenClassificationAggregationStrategy = Literal['none', 'simple', 'first', 'average', 'max'] @dataclass class TokenClassificationParameters(BaseInferenceType): aggregation_strategy: Optional['TokenClassificationAggregationStrategy'] = None '' ignore_labels: Optional[List[str]] = None '' stride: Optional[int] = None '' @dataclass class TokenClassificationInput(BaseInferenceType): inputs: str '' parameters: Optional[TokenClassificationParameters] = None '' @dataclass class TokenClassificationOutputElement(BaseInferenceType): label: Any score: float '' end: Optional[int] = None '' entity_group: Optional[str] = None '' start: Optional[int] = None '' word: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/translation.py from dataclasses import dataclass from typing import Any, Dict, Literal, Optional from .base import BaseInferenceType TranslationGenerationTruncationStrategy = Literal['do_not_truncate', 'longest_first', 'only_first', 'only_second'] @dataclass class TranslationGenerationParameters(BaseInferenceType): clean_up_tokenization_spaces: Optional[bool] = None '' generate_parameters: Optional[Dict[str, Any]] = None '' truncation: Optional['TranslationGenerationTruncationStrategy'] = None '' @dataclass class TranslationInput(BaseInferenceType): inputs: str '' parameters: Optional[TranslationGenerationParameters] = None '' @dataclass class TranslationOutput(BaseInferenceType): translation_text: str '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/video_classification.py from dataclasses import dataclass from typing import Any, Literal, Optional from .base import BaseInferenceType ClassificationOutputTransform = Literal['sigmoid', 'softmax', 'none'] @dataclass class VideoClassificationParameters(BaseInferenceType): frame_sampling_rate: Optional[int] = None '' function_to_apply: Optional['ClassificationOutputTransform'] = None num_frames: Optional[int] = None '' top_k: Optional[int] = None '' @dataclass class VideoClassificationInput(BaseInferenceType): inputs: Any '' parameters: Optional[VideoClassificationParameters] = None '' @dataclass class VideoClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/visual_question_answering.py from dataclasses import dataclass from typing import Any, Optional from .base import BaseInferenceType @dataclass class VisualQuestionAnsweringInputData(BaseInferenceType): image: Any '' question: Any '' @dataclass class VisualQuestionAnsweringParameters(BaseInferenceType): top_k: Optional[int] = None '' @dataclass class VisualQuestionAnsweringInput(BaseInferenceType): inputs: VisualQuestionAnsweringInputData '' parameters: Optional[VisualQuestionAnsweringParameters] = None '' @dataclass class VisualQuestionAnsweringOutputElement(BaseInferenceType): label: Any score: float '' answer: Optional[str] = None '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py from dataclasses import dataclass from typing import List, Optional from .base import BaseInferenceType @dataclass class ZeroShotClassificationInputData(BaseInferenceType): candidate_labels: List[str] '' text: str '' @dataclass class ZeroShotClassificationParameters(BaseInferenceType): hypothesis_template: Optional[str] = None '' multi_label: Optional[bool] = None '' @dataclass class ZeroShotClassificationInput(BaseInferenceType): inputs: ZeroShotClassificationInputData '' parameters: Optional[ZeroShotClassificationParameters] = None '' @dataclass class ZeroShotClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py from dataclasses import dataclass from typing import Any, List, Optional from .base import BaseInferenceType @dataclass class ZeroShotImageClassificationInputData(BaseInferenceType): candidate_labels: List[str] '' image: Any '' @dataclass class ZeroShotImageClassificationParameters(BaseInferenceType): hypothesis_template: Optional[str] = None '' @dataclass class ZeroShotImageClassificationInput(BaseInferenceType): inputs: ZeroShotImageClassificationInputData '' parameters: Optional[ZeroShotImageClassificationParameters] = None '' @dataclass class ZeroShotImageClassificationOutputElement(BaseInferenceType): label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py from dataclasses import dataclass from typing import Any, Dict, List, Optional from .base import BaseInferenceType @dataclass class ZeroShotObjectDetectionInputData(BaseInferenceType): candidate_labels: List[str] '' image: Any '' @dataclass class ZeroShotObjectDetectionInput(BaseInferenceType): inputs: ZeroShotObjectDetectionInputData '' parameters: Optional[Dict[str, Any]] = None '' @dataclass class ZeroShotObjectDetectionBoundingBox(BaseInferenceType): xmax: int xmin: int ymax: int ymin: int @dataclass class ZeroShotObjectDetectionOutputElement(BaseInferenceType): box: ZeroShotObjectDetectionBoundingBox '' label: str '' score: float '' # File: huggingface_hub-main/src/huggingface_hub/inference/_templating.py from functools import lru_cache from typing import Callable, Dict, List, Optional, Union from ..errors import HfHubHTTPError, RepositoryNotFoundError, TemplateError from ..utils import is_minijinja_available def _import_minijinja(): if not is_minijinja_available(): raise ImportError('Cannot render template. Please install minijinja using `pip install minijinja`.') import minijinja return minijinja def render_chat_prompt(*, model_id: str, messages: List[Dict[str, str]], token: Union[str, bool, None]=None, add_generation_prompt: bool=True, **kwargs) -> str: minijinja = _import_minijinja() template = _fetch_and_compile_template(model_id=model_id, token=token) try: return template(messages=messages, add_generation_prompt=add_generation_prompt, **kwargs) except minijinja.TemplateError as e: raise TemplateError(f"Error while trying to render chat prompt for model '{model_id}': {e}") from e @lru_cache def _fetch_and_compile_template(*, model_id: str, token: Union[str, None]) -> Callable: from huggingface_hub.hf_api import HfApi minijinja = _import_minijinja() try: config = HfApi(token=token).model_info(model_id).config except RepositoryNotFoundError as e: raise TemplateError(f"Cannot render chat template: model '{model_id}' not found.") from e except HfHubHTTPError as e: raise TemplateError(f"Error while trying to fetch chat template for model '{model_id}': {e}") from e if config is None: raise TemplateError(f"Config not found for model '{model_id}'.") tokenizer_config = config.get('tokenizer_config') if tokenizer_config is None: raise TemplateError(f"Tokenizer config not found for model '{model_id}'.") if tokenizer_config.get('chat_template') is None: raise TemplateError(f"Chat template not found in tokenizer_config for model '{model_id}'.") chat_template = tokenizer_config['chat_template'] if not isinstance(chat_template, str): raise TemplateError(f"Chat template must be a string, not '{type(chat_template)}' (model: {model_id}).") special_tokens: Dict[str, Optional[str]] = {} for (key, value) in tokenizer_config.items(): if 'token' in key: if isinstance(value, str): special_tokens[key] = value elif isinstance(value, dict) and value.get('__type') == 'AddedToken': special_tokens[key] = value.get('content') env = minijinja.Environment() try: env.add_template('chat_template', chat_template) except minijinja.TemplateError as e: raise TemplateError(f"Error while trying to compile chat template for model '{model_id}': {e}") from e return lambda **kwargs: env.render_template('chat_template', **kwargs, **special_tokens) # File: huggingface_hub-main/src/huggingface_hub/inference_api.py import io from typing import Any, Dict, List, Optional, Union from . import constants from .hf_api import HfApi from .utils import build_hf_headers, get_session, is_pillow_available, logging, validate_hf_hub_args from .utils._deprecation import _deprecate_method logger = logging.get_logger(__name__) ALL_TASKS = ['text-classification', 'token-classification', 'table-question-answering', 'question-answering', 'zero-shot-classification', 'translation', 'summarization', 'conversational', 'feature-extraction', 'text-generation', 'text2text-generation', 'fill-mask', 'sentence-similarity', 'text-to-speech', 'automatic-speech-recognition', 'audio-to-audio', 'audio-classification', 'voice-activity-detection', 'image-classification', 'object-detection', 'image-segmentation', 'text-to-image', 'image-to-image', 'tabular-classification', 'tabular-regression'] class InferenceApi: @validate_hf_hub_args @_deprecate_method(version='1.0', message='`InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`. Check out this guide to learn how to convert your script to use it: https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client.') def __init__(self, repo_id: str, task: Optional[str]=None, token: Optional[str]=None, gpu: bool=False): self.options = {'wait_for_model': True, 'use_gpu': gpu} self.headers = build_hf_headers(token=token) model_info = HfApi(token=token).model_info(repo_id=repo_id) if not model_info.pipeline_tag and (not task): raise ValueError('Task not specified in the repository. Please add it to the model card using pipeline_tag (https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined)') if task and task != model_info.pipeline_tag: if task not in ALL_TASKS: raise ValueError(f"Invalid task {task}. Make sure it's valid.") logger.warning("You're using a different task than the one specified in the repository. Be sure to know what you're doing :)") self.task = task else: assert model_info.pipeline_tag is not None, 'Pipeline tag cannot be None' self.task = model_info.pipeline_tag self.api_url = f'{constants.INFERENCE_ENDPOINT}/pipeline/{self.task}/{repo_id}' def __repr__(self): return f"InferenceAPI(api_url='{self.api_url}', task='{self.task}', options={self.options})" def __call__(self, inputs: Optional[Union[str, Dict, List[str], List[List[str]]]]=None, params: Optional[Dict]=None, data: Optional[bytes]=None, raw_response: bool=False) -> Any: payload: Dict[str, Any] = {'options': self.options} if inputs: payload['inputs'] = inputs if params: payload['parameters'] = params response = get_session().post(self.api_url, headers=self.headers, json=payload, data=data) if raw_response: return response content_type = response.headers.get('Content-Type') or '' if content_type.startswith('image'): if not is_pillow_available(): raise ImportError(f"Task '{self.task}' returned as image but Pillow is not installed. Please install it (`pip install Pillow`) or pass `raw_response=True` to get the raw `Response` object and parse the image by yourself.") from PIL import Image return Image.open(io.BytesIO(response.content)) elif content_type == 'application/json': return response.json() else: raise NotImplementedError(f'{content_type} output type is not implemented yet. You can pass `raw_response=True` to get the raw `Response` object and parse the output by yourself.') # File: huggingface_hub-main/src/huggingface_hub/keras_mixin.py import collections.abc as collections import json import os import warnings from functools import wraps from pathlib import Path from shutil import copytree from typing import Any, Dict, List, Optional, Union from huggingface_hub import ModelHubMixin, snapshot_download from huggingface_hub.utils import get_tf_version, is_graphviz_available, is_pydot_available, is_tf_available, yaml_dump from . import constants from .hf_api import HfApi from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args from .utils._typing import CallableT logger = logging.get_logger(__name__) keras = None if is_tf_available(): try: import tf_keras as keras except ImportError: import tensorflow as tf keras = tf.keras def _requires_keras_2_model(fn: CallableT) -> CallableT: @wraps(fn) def _inner(model, *args, **kwargs): if not hasattr(model, 'history'): raise NotImplementedError(f"Cannot use '{fn.__name__}': Keras 3.x is not supported. Please save models manually and upload them using `upload_folder` or `huggingface-cli upload`.") return fn(model, *args, **kwargs) return _inner def _flatten_dict(dictionary, parent_key=''): items = [] for (key, value) in dictionary.items(): new_key = f'{parent_key}.{key}' if parent_key else key if isinstance(value, collections.MutableMapping): items.extend(_flatten_dict(value, new_key).items()) else: items.append((new_key, value)) return dict(items) def _create_hyperparameter_table(model): table = None if model.optimizer is not None: optimizer_params = model.optimizer.get_config() optimizer_params = _flatten_dict(optimizer_params) optimizer_params['training_precision'] = keras.mixed_precision.global_policy().name table = '| Hyperparameters | Value |\n| :-- | :-- |\n' for (key, value) in optimizer_params.items(): table += f'| {key} | {value} |\n' return table def _plot_network(model, save_directory): keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=False, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96, layer_range=None) def _create_model_card(model, repo_dir: Path, plot_model: bool=True, metadata: Optional[dict]=None): readme_path = repo_dir / 'README.md' if readme_path.exists(): return hyperparameters = _create_hyperparameter_table(model) if plot_model and is_graphviz_available() and is_pydot_available(): _plot_network(model, repo_dir) if metadata is None: metadata = {} metadata['library_name'] = 'keras' model_card: str = '---\n' model_card += yaml_dump(metadata, default_flow_style=False) model_card += '---\n' model_card += '\n## Model description\n\nMore information needed\n' model_card += '\n## Intended uses & limitations\n\nMore information needed\n' model_card += '\n## Training and evaluation data\n\nMore information needed\n' if hyperparameters is not None: model_card += '\n## Training procedure\n' model_card += '\n### Training hyperparameters\n' model_card += '\nThe following hyperparameters were used during training:\n\n' model_card += hyperparameters model_card += '\n' if plot_model and os.path.exists(f'{repo_dir}/model.png'): model_card += '\n ## Model Plot\n' model_card += '\n
' model_card += '\nView Model Plot\n' path_to_plot = './model.png' model_card += f'\n![Model Image]({path_to_plot})\n' model_card += '\n
' readme_path.write_text(model_card) @_requires_keras_2_model def save_pretrained_keras(model, save_directory: Union[str, Path], config: Optional[Dict[str, Any]]=None, include_optimizer: bool=False, plot_model: bool=True, tags: Optional[Union[list, str]]=None, **model_save_kwargs): if keras is None: raise ImportError('Called a Tensorflow-specific function but could not import it.') if not model.built: raise ValueError('Model should be built before trying to save') save_directory = Path(save_directory) save_directory.mkdir(parents=True, exist_ok=True) if config: if not isinstance(config, dict): raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'") with (save_directory / constants.CONFIG_NAME).open('w') as f: json.dump(config, f) metadata = {} if isinstance(tags, list): metadata['tags'] = tags elif isinstance(tags, str): metadata['tags'] = [tags] task_name = model_save_kwargs.pop('task_name', None) if task_name is not None: warnings.warn('`task_name` input argument is deprecated. Pass `tags` instead.', FutureWarning) if 'tags' in metadata: metadata['tags'].append(task_name) else: metadata['tags'] = [task_name] if model.history is not None: if model.history.history != {}: path = save_directory / 'history.json' if path.exists(): warnings.warn('`history.json` file already exists, it will be overwritten by the history of this version.', UserWarning) with path.open('w', encoding='utf-8') as f: json.dump(model.history.history, f, indent=2, sort_keys=True) _create_model_card(model, save_directory, plot_model, metadata) keras.models.save_model(model, save_directory, include_optimizer=include_optimizer, **model_save_kwargs) def from_pretrained_keras(*args, **kwargs) -> 'KerasModelHubMixin': return KerasModelHubMixin.from_pretrained(*args, **kwargs) @validate_hf_hub_args @_requires_keras_2_model def push_to_hub_keras(model, repo_id: str, *, config: Optional[dict]=None, commit_message: str='Push Keras model using huggingface_hub.', private: bool=False, api_endpoint: Optional[str]=None, token: Optional[str]=None, branch: Optional[str]=None, create_pr: Optional[bool]=None, allow_patterns: Optional[Union[List[str], str]]=None, ignore_patterns: Optional[Union[List[str], str]]=None, delete_patterns: Optional[Union[List[str], str]]=None, log_dir: Optional[str]=None, include_optimizer: bool=False, tags: Optional[Union[list, str]]=None, plot_model: bool=True, **model_save_kwargs): api = HfApi(endpoint=api_endpoint) repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id with SoftTemporaryDirectory() as tmp: saved_path = Path(tmp) / repo_id save_pretrained_keras(model, saved_path, config=config, include_optimizer=include_optimizer, tags=tags, plot_model=plot_model, **model_save_kwargs) if log_dir is not None: delete_patterns = [] if delete_patterns is None else [delete_patterns] if isinstance(delete_patterns, str) else delete_patterns delete_patterns.append('logs/*') copytree(log_dir, saved_path / 'logs') return api.upload_folder(repo_type='model', repo_id=repo_id, folder_path=saved_path, commit_message=commit_message, token=token, revision=branch, create_pr=create_pr, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, delete_patterns=delete_patterns) class KerasModelHubMixin(ModelHubMixin): def _save_pretrained(self, save_directory): save_pretrained_keras(self, save_directory) @classmethod def _from_pretrained(cls, model_id, revision, cache_dir, force_download, proxies, resume_download, local_files_only, token, config: Optional[Dict[str, Any]]=None, **model_kwargs): if keras is None: raise ImportError('Called a TensorFlow-specific function but could not import it.') if not os.path.isdir(model_id): storage_folder = snapshot_download(repo_id=model_id, revision=revision, cache_dir=cache_dir, library_name='keras', library_version=get_tf_version()) else: storage_folder = model_id model = keras.models.load_model(storage_folder) model.config = config return model # File: huggingface_hub-main/src/huggingface_hub/lfs.py """""" import inspect import io import os import re import warnings from contextlib import AbstractContextManager from dataclasses import dataclass from math import ceil from os.path import getsize from pathlib import Path from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional, Tuple, TypedDict from urllib.parse import unquote from huggingface_hub import constants from .utils import build_hf_headers, fix_hf_endpoint_in_url, get_session, hf_raise_for_status, http_backoff, logging, tqdm, validate_hf_hub_args from .utils.sha import sha256, sha_fileobj if TYPE_CHECKING: from ._commit_api import CommitOperationAdd logger = logging.get_logger(__name__) OID_REGEX = re.compile('^[0-9a-f]{40}$') LFS_MULTIPART_UPLOAD_COMMAND = 'lfs-multipart-upload' LFS_HEADERS = {'Accept': 'application/vnd.git-lfs+json', 'Content-Type': 'application/vnd.git-lfs+json'} @dataclass class UploadInfo: sha256: bytes size: int sample: bytes @classmethod def from_path(cls, path: str): size = getsize(path) with io.open(path, 'rb') as file: sample = file.peek(512)[:512] sha = sha_fileobj(file) return cls(size=size, sha256=sha, sample=sample) @classmethod def from_bytes(cls, data: bytes): sha = sha256(data).digest() return cls(size=len(data), sample=data[:512], sha256=sha) @classmethod def from_fileobj(cls, fileobj: BinaryIO): sample = fileobj.read(512) fileobj.seek(0, io.SEEK_SET) sha = sha_fileobj(fileobj) size = fileobj.tell() fileobj.seek(0, io.SEEK_SET) return cls(size=size, sha256=sha, sample=sample) @validate_hf_hub_args def post_lfs_batch_info(upload_infos: Iterable[UploadInfo], token: Optional[str], repo_type: str, repo_id: str, revision: Optional[str]=None, endpoint: Optional[str]=None, headers: Optional[Dict[str, str]]=None) -> Tuple[List[dict], List[dict]]: endpoint = endpoint if endpoint is not None else constants.ENDPOINT url_prefix = '' if repo_type in constants.REPO_TYPES_URL_PREFIXES: url_prefix = constants.REPO_TYPES_URL_PREFIXES[repo_type] batch_url = f'{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch' payload: Dict = {'operation': 'upload', 'transfers': ['basic', 'multipart'], 'objects': [{'oid': upload.sha256.hex(), 'size': upload.size} for upload in upload_infos], 'hash_algo': 'sha256'} if revision is not None: payload['ref'] = {'name': unquote(revision)} headers = {**LFS_HEADERS, **build_hf_headers(token=token), **(headers or {})} resp = get_session().post(batch_url, headers=headers, json=payload) hf_raise_for_status(resp) batch_info = resp.json() objects = batch_info.get('objects', None) if not isinstance(objects, list): raise ValueError('Malformed response from server') return ([_validate_batch_actions(obj) for obj in objects if 'error' not in obj], [_validate_batch_error(obj) for obj in objects if 'error' in obj]) class PayloadPartT(TypedDict): partNumber: int etag: str class CompletionPayloadT(TypedDict): oid: str parts: List[PayloadPartT] def lfs_upload(operation: 'CommitOperationAdd', lfs_batch_action: Dict, token: Optional[str]=None, headers: Optional[Dict[str, str]]=None, endpoint: Optional[str]=None) -> None: _validate_batch_actions(lfs_batch_action) actions = lfs_batch_action.get('actions') if actions is None: logger.debug(f'Content of file {operation.path_in_repo} is already present upstream - skipping upload') return upload_action = lfs_batch_action['actions']['upload'] _validate_lfs_action(upload_action) verify_action = lfs_batch_action['actions'].get('verify') if verify_action is not None: _validate_lfs_action(verify_action) header = upload_action.get('header', {}) chunk_size = header.get('chunk_size') upload_url = fix_hf_endpoint_in_url(upload_action['href'], endpoint=endpoint) if chunk_size is not None: try: chunk_size = int(chunk_size) except (ValueError, TypeError): raise ValueError(f"Malformed response from LFS batch endpoint: `chunk_size` should be an integer. Got '{chunk_size}'.") _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_url) else: _upload_single_part(operation=operation, upload_url=upload_url) if verify_action is not None: _validate_lfs_action(verify_action) verify_url = fix_hf_endpoint_in_url(verify_action['href'], endpoint) verify_resp = get_session().post(verify_url, headers=build_hf_headers(token=token, headers=headers), json={'oid': operation.upload_info.sha256.hex(), 'size': operation.upload_info.size}) hf_raise_for_status(verify_resp) logger.debug(f'{operation.path_in_repo}: Upload successful') def _validate_lfs_action(lfs_action: dict): if not (isinstance(lfs_action.get('href'), str) and (lfs_action.get('header') is None or isinstance(lfs_action.get('header'), dict))): raise ValueError('lfs_action is improperly formatted') return lfs_action def _validate_batch_actions(lfs_batch_actions: dict): if not (isinstance(lfs_batch_actions.get('oid'), str) and isinstance(lfs_batch_actions.get('size'), int)): raise ValueError('lfs_batch_actions is improperly formatted') upload_action = lfs_batch_actions.get('actions', {}).get('upload') verify_action = lfs_batch_actions.get('actions', {}).get('verify') if upload_action is not None: _validate_lfs_action(upload_action) if verify_action is not None: _validate_lfs_action(verify_action) return lfs_batch_actions def _validate_batch_error(lfs_batch_error: dict): if not (isinstance(lfs_batch_error.get('oid'), str) and isinstance(lfs_batch_error.get('size'), int)): raise ValueError('lfs_batch_error is improperly formatted') error_info = lfs_batch_error.get('error') if not (isinstance(error_info, dict) and isinstance(error_info.get('message'), str) and isinstance(error_info.get('code'), int)): raise ValueError('lfs_batch_error is improperly formatted') return lfs_batch_error def _upload_single_part(operation: 'CommitOperationAdd', upload_url: str) -> None: with operation.as_file(with_tqdm=True) as fileobj: response = http_backoff('PUT', upload_url, data=fileobj, retry_on_status_codes=(500, 502, 503, 504)) hf_raise_for_status(response) def _upload_multi_part(operation: 'CommitOperationAdd', header: Dict, chunk_size: int, upload_url: str) -> None: sorted_parts_urls = _get_sorted_parts_urls(header=header, upload_info=operation.upload_info, chunk_size=chunk_size) use_hf_transfer = constants.HF_HUB_ENABLE_HF_TRANSFER if constants.HF_HUB_ENABLE_HF_TRANSFER and (not isinstance(operation.path_or_fileobj, str)) and (not isinstance(operation.path_or_fileobj, Path)): warnings.warn('hf_transfer is enabled but does not support uploading from bytes or BinaryIO, falling back to regular upload') use_hf_transfer = False response_headers = _upload_parts_hf_transfer(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size) if use_hf_transfer else _upload_parts_iteratively(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size) completion_res = get_session().post(upload_url, json=_get_completion_payload(response_headers, operation.upload_info.sha256.hex()), headers=LFS_HEADERS) hf_raise_for_status(completion_res) def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: int) -> List[str]: sorted_part_upload_urls = [upload_url for (_, upload_url) in sorted([(int(part_num, 10), upload_url) for (part_num, upload_url) in header.items() if part_num.isdigit() and len(part_num) > 0], key=lambda t: t[0])] num_parts = len(sorted_part_upload_urls) if num_parts != ceil(upload_info.size / chunk_size): raise ValueError('Invalid server response to upload large LFS file') return sorted_part_upload_urls def _get_completion_payload(response_headers: List[Dict], oid: str) -> CompletionPayloadT: parts: List[PayloadPartT] = [] for (part_number, header) in enumerate(response_headers): etag = header.get('etag') if etag is None or etag == '': raise ValueError(f'Invalid etag (`{etag}`) returned for part {part_number + 1}') parts.append({'partNumber': part_number + 1, 'etag': etag}) return {'oid': oid, 'parts': parts} def _upload_parts_iteratively(operation: 'CommitOperationAdd', sorted_parts_urls: List[str], chunk_size: int) -> List[Dict]: headers = [] with operation.as_file(with_tqdm=True) as fileobj: for (part_idx, part_upload_url) in enumerate(sorted_parts_urls): with SliceFileObj(fileobj, seek_from=chunk_size * part_idx, read_limit=chunk_size) as fileobj_slice: part_upload_res = http_backoff('PUT', part_upload_url, data=fileobj_slice, retry_on_status_codes=(500, 502, 503, 504)) hf_raise_for_status(part_upload_res) headers.append(part_upload_res.headers) return headers def _upload_parts_hf_transfer(operation: 'CommitOperationAdd', sorted_parts_urls: List[str], chunk_size: int) -> List[Dict]: try: from hf_transfer import multipart_upload except ImportError: raise ValueError("Fast uploading using 'hf_transfer' is enabled (HF_HUB_ENABLE_HF_TRANSFER=1) but 'hf_transfer' package is not available in your environment. Try `pip install hf_transfer`.") supports_callback = 'callback' in inspect.signature(multipart_upload).parameters if not supports_callback: warnings.warn('You are using an outdated version of `hf_transfer`. Consider upgrading to latest version to enable progress bars using `pip install -U hf_transfer`.') total = operation.upload_info.size desc = operation.path_in_repo if len(desc) > 40: desc = f'(…){desc[-40:]}' disable = True if logger.getEffectiveLevel() == logging.NOTSET else None with tqdm(unit='B', unit_scale=True, total=total, initial=0, desc=desc, disable=disable, name='huggingface_hub.lfs_upload') as progress: try: output = multipart_upload(file_path=operation.path_or_fileobj, parts_urls=sorted_parts_urls, chunk_size=chunk_size, max_files=128, parallel_failures=127, max_retries=5, **{'callback': progress.update} if supports_callback else {}) except Exception as e: raise RuntimeError('An error occurred while uploading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for better error handling.') from e if not supports_callback: progress.update(total) return output class SliceFileObj(AbstractContextManager): def __init__(self, fileobj: BinaryIO, seek_from: int, read_limit: int): self.fileobj = fileobj self.seek_from = seek_from self.read_limit = read_limit def __enter__(self): self._previous_position = self.fileobj.tell() end_of_stream = self.fileobj.seek(0, os.SEEK_END) self._len = min(self.read_limit, end_of_stream - self.seek_from) self.fileobj.seek(self.seek_from, io.SEEK_SET) return self def __exit__(self, exc_type, exc_value, traceback): self.fileobj.seek(self._previous_position, io.SEEK_SET) def read(self, n: int=-1): pos = self.tell() if pos >= self._len: return b'' remaining_amount = self._len - pos data = self.fileobj.read(remaining_amount if n < 0 else min(n, remaining_amount)) return data def tell(self) -> int: return self.fileobj.tell() - self.seek_from def seek(self, offset: int, whence: int=os.SEEK_SET) -> int: start = self.seek_from end = start + self._len if whence in (os.SEEK_SET, os.SEEK_END): offset = start + offset if whence == os.SEEK_SET else end + offset offset = max(start, min(offset, end)) whence = os.SEEK_SET elif whence == os.SEEK_CUR: cur_pos = self.fileobj.tell() offset = max(start - cur_pos, min(offset, end - cur_pos)) else: raise ValueError(f'whence value {whence} is not supported') return self.fileobj.seek(offset, whence) - self.seek_from def __iter__(self): yield self.read(n=4 * 1024 * 1024) # File: huggingface_hub-main/src/huggingface_hub/repocard.py import os import re from pathlib import Path from typing import Any, Dict, Literal, Optional, Type, Union import requests import yaml from huggingface_hub.file_download import hf_hub_download from huggingface_hub.hf_api import upload_file from huggingface_hub.repocard_data import CardData, DatasetCardData, EvalResult, ModelCardData, SpaceCardData, eval_results_to_model_index, model_index_to_eval_results from huggingface_hub.utils import get_session, is_jinja_available, yaml_dump from . import constants from .errors import EntryNotFoundError from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args logger = logging.get_logger(__name__) TEMPLATE_MODELCARD_PATH = Path(__file__).parent / 'templates' / 'modelcard_template.md' TEMPLATE_DATASETCARD_PATH = Path(__file__).parent / 'templates' / 'datasetcard_template.md' REGEX_YAML_BLOCK = re.compile('^(\\s*---[\\r\\n]+)([\\S\\s]*?)([\\r\\n]+---(\\r\\n|\\n|$))') class RepoCard: card_data_class = CardData default_template_path = TEMPLATE_MODELCARD_PATH repo_type = 'model' def __init__(self, content: str, ignore_metadata_errors: bool=False): self.ignore_metadata_errors = ignore_metadata_errors self.content = content @property def content(self): line_break = _detect_line_ending(self._content) or '\n' return f'---{line_break}{self.data.to_yaml(line_break=line_break)}{line_break}---{line_break}{self.text}' @content.setter def content(self, content: str): self._content = content match = REGEX_YAML_BLOCK.search(content) if match: yaml_block = match.group(2) self.text = content[match.end():] data_dict = yaml.safe_load(yaml_block) if data_dict is None: data_dict = {} if not isinstance(data_dict, dict): raise ValueError('repo card metadata block should be a dict') else: logger.warning('Repo card metadata block was not found. Setting CardData to empty.') data_dict = {} self.text = content self.data = self.card_data_class(**data_dict, ignore_metadata_errors=self.ignore_metadata_errors) def __str__(self): return self.content def save(self, filepath: Union[Path, str]): filepath = Path(filepath) filepath.parent.mkdir(parents=True, exist_ok=True) with open(filepath, mode='w', newline='', encoding='utf-8') as f: f.write(str(self)) @classmethod def load(cls, repo_id_or_path: Union[str, Path], repo_type: Optional[str]=None, token: Optional[str]=None, ignore_metadata_errors: bool=False): if Path(repo_id_or_path).exists(): card_path = Path(repo_id_or_path) elif isinstance(repo_id_or_path, str): card_path = Path(hf_hub_download(repo_id_or_path, constants.REPOCARD_NAME, repo_type=repo_type or cls.repo_type, token=token)) else: raise ValueError(f'Cannot load RepoCard: path not found on disk ({repo_id_or_path}).') with card_path.open(mode='r', newline='', encoding='utf-8') as f: return cls(f.read(), ignore_metadata_errors=ignore_metadata_errors) def validate(self, repo_type: Optional[str]=None): repo_type = repo_type or self.repo_type body = {'repoType': repo_type, 'content': str(self)} headers = {'Accept': 'text/plain'} try: r = get_session().post('https://huggingface.co/api/validate-yaml', body, headers=headers) r.raise_for_status() except requests.exceptions.HTTPError as exc: if r.status_code == 400: raise ValueError(r.text) else: raise exc def push_to_hub(self, repo_id: str, token: Optional[str]=None, repo_type: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=None, parent_commit: Optional[str]=None): repo_type = repo_type or self.repo_type self.validate(repo_type=repo_type) with SoftTemporaryDirectory() as tmpdir: tmp_path = Path(tmpdir) / constants.REPOCARD_NAME tmp_path.write_text(str(self)) url = upload_file(path_or_fileobj=str(tmp_path), path_in_repo=constants.REPOCARD_NAME, repo_id=repo_id, token=token, repo_type=repo_type, commit_message=commit_message, commit_description=commit_description, create_pr=create_pr, revision=revision, parent_commit=parent_commit) return url @classmethod def from_template(cls, card_data: CardData, template_path: Optional[str]=None, template_str: Optional[str]=None, **template_kwargs): if is_jinja_available(): import jinja2 else: raise ImportError('Using RepoCard.from_template requires Jinja2 to be installed. Please install it with `pip install Jinja2`.') kwargs = card_data.to_dict().copy() kwargs.update(template_kwargs) if template_path is not None: template_str = Path(template_path).read_text() if template_str is None: template_str = Path(cls.default_template_path).read_text() template = jinja2.Template(template_str) content = template.render(card_data=card_data.to_yaml(), **kwargs) return cls(content) class ModelCard(RepoCard): card_data_class = ModelCardData default_template_path = TEMPLATE_MODELCARD_PATH repo_type = 'model' @classmethod def from_template(cls, card_data: ModelCardData, template_path: Optional[str]=None, template_str: Optional[str]=None, **template_kwargs): return super().from_template(card_data, template_path, template_str, **template_kwargs) class DatasetCard(RepoCard): card_data_class = DatasetCardData default_template_path = TEMPLATE_DATASETCARD_PATH repo_type = 'dataset' @classmethod def from_template(cls, card_data: DatasetCardData, template_path: Optional[str]=None, template_str: Optional[str]=None, **template_kwargs): return super().from_template(card_data, template_path, template_str, **template_kwargs) class SpaceCard(RepoCard): card_data_class = SpaceCardData default_template_path = TEMPLATE_MODELCARD_PATH repo_type = 'space' def _detect_line_ending(content: str) -> Literal['\r', '\n', '\r\n', None]: cr = content.count('\r') lf = content.count('\n') crlf = content.count('\r\n') if cr + lf == 0: return None if crlf == cr and crlf == lf: return '\r\n' if cr > lf: return '\r' else: return '\n' def metadata_load(local_path: Union[str, Path]) -> Optional[Dict]: content = Path(local_path).read_text() match = REGEX_YAML_BLOCK.search(content) if match: yaml_block = match.group(2) data = yaml.safe_load(yaml_block) if data is None or isinstance(data, dict): return data raise ValueError('repo card metadata block should be a dict') else: return None def metadata_save(local_path: Union[str, Path], data: Dict) -> None: line_break = '\n' content = '' if os.path.exists(local_path): with open(local_path, 'r', newline='', encoding='utf8') as readme: content = readme.read() if isinstance(readme.newlines, tuple): line_break = readme.newlines[0] elif isinstance(readme.newlines, str): line_break = readme.newlines with open(local_path, 'w', newline='', encoding='utf8') as readme: data_yaml = yaml_dump(data, sort_keys=False, line_break=line_break) match = REGEX_YAML_BLOCK.search(content) if match: output = content[:match.start()] + f'---{line_break}{data_yaml}---{line_break}' + content[match.end():] else: output = f'---{line_break}{data_yaml}---{line_break}{content}' readme.write(output) readme.close() def metadata_eval_result(*, model_pretty_name: str, task_pretty_name: str, task_id: str, metrics_pretty_name: str, metrics_id: str, metrics_value: Any, dataset_pretty_name: str, dataset_id: str, metrics_config: Optional[str]=None, metrics_verified: bool=False, dataset_config: Optional[str]=None, dataset_split: Optional[str]=None, dataset_revision: Optional[str]=None, metrics_verification_token: Optional[str]=None) -> Dict: return {'model-index': eval_results_to_model_index(model_name=model_pretty_name, eval_results=[EvalResult(task_name=task_pretty_name, task_type=task_id, metric_name=metrics_pretty_name, metric_type=metrics_id, metric_value=metrics_value, dataset_name=dataset_pretty_name, dataset_type=dataset_id, metric_config=metrics_config, verified=metrics_verified, verify_token=metrics_verification_token, dataset_config=dataset_config, dataset_split=dataset_split, dataset_revision=dataset_revision)])} @validate_hf_hub_args def metadata_update(repo_id: str, metadata: Dict, *, repo_type: Optional[str]=None, overwrite: bool=False, token: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, revision: Optional[str]=None, create_pr: bool=False, parent_commit: Optional[str]=None) -> str: commit_message = commit_message if commit_message is not None else 'Update metadata with huggingface_hub' card_class: Type[RepoCard] if repo_type is None or repo_type == 'model': card_class = ModelCard elif repo_type == 'dataset': card_class = DatasetCard elif repo_type == 'space': card_class = RepoCard else: raise ValueError(f'Unknown repo_type: {repo_type}') try: card = card_class.load(repo_id, token=token, repo_type=repo_type) except EntryNotFoundError: if repo_type == 'space': raise ValueError("Cannot update metadata on a Space that doesn't contain a `README.md` file.") card = card_class.from_template(CardData()) for (key, value) in metadata.items(): if key == 'model-index': if 'name' not in value[0]: value[0]['name'] = getattr(card, 'model_name', repo_id) (model_name, new_results) = model_index_to_eval_results(value) if card.data.eval_results is None: card.data.eval_results = new_results card.data.model_name = model_name else: existing_results = card.data.eval_results for new_result in new_results: result_found = False for existing_result in existing_results: if new_result.is_equal_except_value(existing_result): if new_result != existing_result and (not overwrite): raise ValueError(f"You passed a new value for the existing metric 'name: {new_result.metric_name}, type: {new_result.metric_type}'. Set `overwrite=True` to overwrite existing metrics.") result_found = True existing_result.metric_value = new_result.metric_value if existing_result.verified is True: existing_result.verify_token = new_result.verify_token if not result_found: card.data.eval_results.append(new_result) elif card.data.get(key) is not None and (not overwrite) and (card.data.get(key) != value): raise ValueError(f"You passed a new value for the existing meta data field '{key}'. Set `overwrite=True` to overwrite existing metadata.") else: card.data[key] = value return card.push_to_hub(repo_id, token=token, repo_type=repo_type, commit_message=commit_message, commit_description=commit_description, create_pr=create_pr, revision=revision, parent_commit=parent_commit) # File: huggingface_hub-main/src/huggingface_hub/repocard_data.py import copy from collections import defaultdict from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from huggingface_hub.utils import logging, yaml_dump logger = logging.get_logger(__name__) @dataclass class EvalResult: task_type: str dataset_type: str dataset_name: str metric_type: str metric_value: Any task_name: Optional[str] = None dataset_config: Optional[str] = None dataset_split: Optional[str] = None dataset_revision: Optional[str] = None dataset_args: Optional[Dict[str, Any]] = None metric_name: Optional[str] = None metric_config: Optional[str] = None metric_args: Optional[Dict[str, Any]] = None verified: Optional[bool] = None verify_token: Optional[str] = None source_name: Optional[str] = None source_url: Optional[str] = None @property def unique_identifier(self) -> tuple: return (self.task_type, self.dataset_type, self.dataset_config, self.dataset_split, self.dataset_revision) def is_equal_except_value(self, other: 'EvalResult') -> bool: for (key, _) in self.__dict__.items(): if key == 'metric_value': continue if key != 'verify_token' and getattr(self, key) != getattr(other, key): return False return True def __post_init__(self) -> None: if self.source_name is not None and self.source_url is None: raise ValueError('If `source_name` is provided, `source_url` must also be provided.') @dataclass class CardData: def __init__(self, ignore_metadata_errors: bool=False, **kwargs): self.__dict__.update(kwargs) def to_dict(self) -> Dict[str, Any]: data_dict = copy.deepcopy(self.__dict__) self._to_dict(data_dict) return _remove_none(data_dict) def _to_dict(self, data_dict): pass def to_yaml(self, line_break=None) -> str: return yaml_dump(self.to_dict(), sort_keys=False, line_break=line_break).strip() def __repr__(self): return repr(self.__dict__) def __str__(self): return self.to_yaml() def get(self, key: str, default: Any=None) -> Any: return self.__dict__.get(key, default) def pop(self, key: str, default: Any=None) -> Any: return self.__dict__.pop(key, default) def __getitem__(self, key: str) -> Any: return self.__dict__[key] def __setitem__(self, key: str, value: Any) -> None: self.__dict__[key] = value def __contains__(self, key: str) -> bool: return key in self.__dict__ def __len__(self) -> int: return len(self.__dict__) class ModelCardData(CardData): def __init__(self, *, base_model: Optional[Union[str, List[str]]]=None, datasets: Optional[List[str]]=None, eval_results: Optional[List[EvalResult]]=None, language: Optional[Union[str, List[str]]]=None, library_name: Optional[str]=None, license: Optional[str]=None, license_name: Optional[str]=None, license_link: Optional[str]=None, metrics: Optional[List[str]]=None, model_name: Optional[str]=None, pipeline_tag: Optional[str]=None, tags: Optional[List[str]]=None, ignore_metadata_errors: bool=False, **kwargs): self.base_model = base_model self.datasets = datasets self.eval_results = eval_results self.language = language self.library_name = library_name self.license = license self.license_name = license_name self.license_link = license_link self.metrics = metrics self.model_name = model_name self.pipeline_tag = pipeline_tag self.tags = _to_unique_list(tags) model_index = kwargs.pop('model-index', None) if model_index: try: (model_name, eval_results) = model_index_to_eval_results(model_index) self.model_name = model_name self.eval_results = eval_results except (KeyError, TypeError) as error: if ignore_metadata_errors: logger.warning('Invalid model-index. Not loading eval results into CardData.') else: raise ValueError(f'Invalid `model_index` in metadata cannot be parsed: {error.__class__} {error}. Pass `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning: some information will be lost. Use it at your own risk.') super().__init__(**kwargs) if self.eval_results: if isinstance(self.eval_results, EvalResult): self.eval_results = [self.eval_results] if self.model_name is None: raise ValueError('Passing `eval_results` requires `model_name` to be set.') def _to_dict(self, data_dict): if self.eval_results is not None: data_dict['model-index'] = eval_results_to_model_index(self.model_name, self.eval_results) del data_dict['eval_results'], data_dict['model_name'] class DatasetCardData(CardData): def __init__(self, *, language: Optional[Union[str, List[str]]]=None, license: Optional[Union[str, List[str]]]=None, annotations_creators: Optional[Union[str, List[str]]]=None, language_creators: Optional[Union[str, List[str]]]=None, multilinguality: Optional[Union[str, List[str]]]=None, size_categories: Optional[Union[str, List[str]]]=None, source_datasets: Optional[List[str]]=None, task_categories: Optional[Union[str, List[str]]]=None, task_ids: Optional[Union[str, List[str]]]=None, paperswithcode_id: Optional[str]=None, pretty_name: Optional[str]=None, train_eval_index: Optional[Dict]=None, config_names: Optional[Union[str, List[str]]]=None, ignore_metadata_errors: bool=False, **kwargs): self.annotations_creators = annotations_creators self.language_creators = language_creators self.language = language self.license = license self.multilinguality = multilinguality self.size_categories = size_categories self.source_datasets = source_datasets self.task_categories = task_categories self.task_ids = task_ids self.paperswithcode_id = paperswithcode_id self.pretty_name = pretty_name self.config_names = config_names self.train_eval_index = train_eval_index or kwargs.pop('train-eval-index', None) super().__init__(**kwargs) def _to_dict(self, data_dict): data_dict['train-eval-index'] = data_dict.pop('train_eval_index') class SpaceCardData(CardData): def __init__(self, *, title: Optional[str]=None, sdk: Optional[str]=None, sdk_version: Optional[str]=None, python_version: Optional[str]=None, app_file: Optional[str]=None, app_port: Optional[int]=None, license: Optional[str]=None, duplicated_from: Optional[str]=None, models: Optional[List[str]]=None, datasets: Optional[List[str]]=None, tags: Optional[List[str]]=None, ignore_metadata_errors: bool=False, **kwargs): self.title = title self.sdk = sdk self.sdk_version = sdk_version self.python_version = python_version self.app_file = app_file self.app_port = app_port self.license = license self.duplicated_from = duplicated_from self.models = models self.datasets = datasets self.tags = _to_unique_list(tags) super().__init__(**kwargs) def model_index_to_eval_results(model_index: List[Dict[str, Any]]) -> Tuple[str, List[EvalResult]]: eval_results = [] for elem in model_index: name = elem['name'] results = elem['results'] for result in results: task_type = result['task']['type'] task_name = result['task'].get('name') dataset_type = result['dataset']['type'] dataset_name = result['dataset']['name'] dataset_config = result['dataset'].get('config') dataset_split = result['dataset'].get('split') dataset_revision = result['dataset'].get('revision') dataset_args = result['dataset'].get('args') source_name = result.get('source', {}).get('name') source_url = result.get('source', {}).get('url') for metric in result['metrics']: metric_type = metric['type'] metric_value = metric['value'] metric_name = metric.get('name') metric_args = metric.get('args') metric_config = metric.get('config') verified = metric.get('verified') verify_token = metric.get('verifyToken') eval_result = EvalResult(task_type=task_type, dataset_type=dataset_type, dataset_name=dataset_name, metric_type=metric_type, metric_value=metric_value, task_name=task_name, dataset_config=dataset_config, dataset_split=dataset_split, dataset_revision=dataset_revision, dataset_args=dataset_args, metric_name=metric_name, metric_args=metric_args, metric_config=metric_config, verified=verified, verify_token=verify_token, source_name=source_name, source_url=source_url) eval_results.append(eval_result) return (name, eval_results) def _remove_none(obj): if isinstance(obj, (list, tuple, set)): return type(obj)((_remove_none(x) for x in obj if x is not None)) elif isinstance(obj, dict): return type(obj)(((_remove_none(k), _remove_none(v)) for (k, v) in obj.items() if k is not None and v is not None)) else: return obj def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult]) -> List[Dict[str, Any]]: task_and_ds_types_map: Dict[Any, List[EvalResult]] = defaultdict(list) for eval_result in eval_results: task_and_ds_types_map[eval_result.unique_identifier].append(eval_result) model_index_data = [] for results in task_and_ds_types_map.values(): sample_result = results[0] data = {'task': {'type': sample_result.task_type, 'name': sample_result.task_name}, 'dataset': {'name': sample_result.dataset_name, 'type': sample_result.dataset_type, 'config': sample_result.dataset_config, 'split': sample_result.dataset_split, 'revision': sample_result.dataset_revision, 'args': sample_result.dataset_args}, 'metrics': [{'type': result.metric_type, 'value': result.metric_value, 'name': result.metric_name, 'config': result.metric_config, 'args': result.metric_args, 'verified': result.verified, 'verifyToken': result.verify_token} for result in results]} if sample_result.source_url is not None: source = {'url': sample_result.source_url} if sample_result.source_name is not None: source['name'] = sample_result.source_name data['source'] = source model_index_data.append(data) model_index = [{'name': model_name, 'results': model_index_data}] return _remove_none(model_index) def _to_unique_list(tags: Optional[List[str]]) -> Optional[List[str]]: if tags is None: return tags unique_tags = [] for tag in tags: if tag not in unique_tags: unique_tags.append(tag) return unique_tags # File: huggingface_hub-main/src/huggingface_hub/repository.py import atexit import os import re import subprocess import threading import time from contextlib import contextmanager from pathlib import Path from typing import Callable, Dict, Iterator, List, Optional, Tuple, TypedDict, Union from urllib.parse import urlparse from huggingface_hub import constants from huggingface_hub.repocard import metadata_load, metadata_save from .hf_api import HfApi, repo_type_and_id_from_hf_id from .lfs import LFS_MULTIPART_UPLOAD_COMMAND from .utils import SoftTemporaryDirectory, get_token, logging, run_subprocess, tqdm, validate_hf_hub_args from .utils._deprecation import _deprecate_method logger = logging.get_logger(__name__) class CommandInProgress: def __init__(self, title: str, is_done_method: Callable, status_method: Callable, process: subprocess.Popen, post_method: Optional[Callable]=None): self.title = title self._is_done = is_done_method self._status = status_method self._process = process self._stderr = '' self._stdout = '' self._post_method = post_method @property def is_done(self) -> bool: result = self._is_done() if result and self._post_method is not None: self._post_method() self._post_method = None return result @property def status(self) -> int: return self._status() @property def failed(self) -> bool: return self.status > 0 @property def stderr(self) -> str: if self._process.stderr is not None: self._stderr += self._process.stderr.read() return self._stderr @property def stdout(self) -> str: if self._process.stdout is not None: self._stdout += self._process.stdout.read() return self._stdout def __repr__(self): status = self.status if status == -1: status = 'running' return f"[{self.title} command, status code: {status}, {('in progress.' if not self.is_done else 'finished.')} PID: {self._process.pid}]" def is_git_repo(folder: Union[str, Path]) -> bool: folder_exists = os.path.exists(os.path.join(folder, '.git')) git_branch = subprocess.run('git branch'.split(), cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return folder_exists and git_branch.returncode == 0 def is_local_clone(folder: Union[str, Path], remote_url: str) -> bool: if not is_git_repo(folder): return False remotes = run_subprocess('git remote -v', folder).stdout remote_url = re.sub('https://.*@', 'https://', remote_url) remotes = [re.sub('https://.*@', 'https://', remote) for remote in remotes.split()] return remote_url in remotes def is_tracked_with_lfs(filename: Union[str, Path]) -> bool: folder = Path(filename).parent filename = Path(filename).name try: p = run_subprocess('git check-attr -a'.split() + [filename], folder) attributes = p.stdout.strip() except subprocess.CalledProcessError as exc: if not is_git_repo(folder): return False else: raise OSError(exc.stderr) if len(attributes) == 0: return False found_lfs_tag = {'diff': False, 'merge': False, 'filter': False} for attribute in attributes.split('\n'): for tag in found_lfs_tag.keys(): if tag in attribute and 'lfs' in attribute: found_lfs_tag[tag] = True return all(found_lfs_tag.values()) def is_git_ignored(filename: Union[str, Path]) -> bool: folder = Path(filename).parent filename = Path(filename).name try: p = run_subprocess('git check-ignore'.split() + [filename], folder, check=False) is_ignored = not bool(p.returncode) except subprocess.CalledProcessError as exc: raise OSError(exc.stderr) return is_ignored def is_binary_file(filename: Union[str, Path]) -> bool: try: with open(filename, 'rb') as f: content = f.read(10 * 1024 ** 2) text_chars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(32, 256)) - {127}) return bool(content.translate(None, text_chars)) except UnicodeDecodeError: return True def files_to_be_staged(pattern: str='.', folder: Union[str, Path, None]=None) -> List[str]: try: p = run_subprocess('git ls-files --exclude-standard -mo'.split() + [pattern], folder) if len(p.stdout.strip()): files = p.stdout.strip().split('\n') else: files = [] except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return files def is_tracked_upstream(folder: Union[str, Path]) -> bool: try: run_subprocess('git rev-parse --symbolic-full-name --abbrev-ref @{u}', folder) return True except subprocess.CalledProcessError as exc: if 'HEAD' in exc.stderr: raise OSError('No branch checked out') return False def commits_to_push(folder: Union[str, Path], upstream: Optional[str]=None) -> int: try: result = run_subprocess(f"git cherry -v {upstream or ''}", folder) return len(result.stdout.split('\n')) - 1 except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) class PbarT(TypedDict): bar: tqdm past_bytes: int @contextmanager def _lfs_log_progress(): if logger.getEffectiveLevel() >= logging.ERROR: try: yield except Exception: pass return def output_progress(stopping_event: threading.Event): pbars: Dict[Tuple[str, str], PbarT] = {} def close_pbars(): for pbar in pbars.values(): pbar['bar'].update(pbar['bar'].total - pbar['past_bytes']) pbar['bar'].refresh() pbar['bar'].close() def tail_file(filename) -> Iterator[str]: with open(filename, 'r') as file: current_line = '' while True: if stopping_event.is_set(): close_pbars() break line_bit = file.readline() if line_bit is not None and (not len(line_bit.strip()) == 0): current_line += line_bit if current_line.endswith('\n'): yield current_line current_line = '' else: time.sleep(1) while not os.path.exists(os.environ['GIT_LFS_PROGRESS']): if stopping_event.is_set(): close_pbars() return time.sleep(2) for line in tail_file(os.environ['GIT_LFS_PROGRESS']): try: (state, file_progress, byte_progress, filename) = line.split() except ValueError as error: raise ValueError(f'Cannot unpack LFS progress line:\n{line}') from error description = f'{state.capitalize()} file {filename}' (current_bytes, total_bytes) = byte_progress.split('/') current_bytes_int = int(current_bytes) total_bytes_int = int(total_bytes) pbar = pbars.get((state, filename)) if pbar is None: pbars[state, filename] = {'bar': tqdm(desc=description, initial=current_bytes_int, total=total_bytes_int, unit='B', unit_scale=True, unit_divisor=1024, name='huggingface_hub.lfs_upload'), 'past_bytes': int(current_bytes)} else: pbar['bar'].update(current_bytes_int - pbar['past_bytes']) pbar['past_bytes'] = current_bytes_int current_lfs_progress_value = os.environ.get('GIT_LFS_PROGRESS', '') with SoftTemporaryDirectory() as tmpdir: os.environ['GIT_LFS_PROGRESS'] = os.path.join(tmpdir, 'lfs_progress') logger.debug(f"Following progress in {os.environ['GIT_LFS_PROGRESS']}") exit_event = threading.Event() x = threading.Thread(target=output_progress, args=(exit_event,), daemon=True) x.start() try: yield finally: exit_event.set() x.join() os.environ['GIT_LFS_PROGRESS'] = current_lfs_progress_value class Repository: command_queue: List[CommandInProgress] @validate_hf_hub_args @_deprecate_method(version='1.0', message='Please prefer the http-based alternatives instead. Given its large adoption in legacy code, the complete removal is only planned on next major release.\nFor more details, please read https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http.') def __init__(self, local_dir: Union[str, Path], clone_from: Optional[str]=None, repo_type: Optional[str]=None, token: Union[bool, str]=True, git_user: Optional[str]=None, git_email: Optional[str]=None, revision: Optional[str]=None, skip_lfs_files: bool=False, client: Optional[HfApi]=None): if isinstance(local_dir, Path): local_dir = str(local_dir) os.makedirs(local_dir, exist_ok=True) self.local_dir = os.path.join(os.getcwd(), local_dir) self._repo_type = repo_type self.command_queue = [] self.skip_lfs_files = skip_lfs_files self.client = client if client is not None else HfApi() self.check_git_versions() if isinstance(token, str): self.huggingface_token: Optional[str] = token elif token is False: self.huggingface_token = None else: self.huggingface_token = get_token() if clone_from is not None: self.clone_from(repo_url=clone_from) elif is_git_repo(self.local_dir): logger.debug('[Repository] is a valid git repo') else: raise ValueError('If not specifying `clone_from`, you need to pass Repository a valid git clone.') if self.huggingface_token is not None and (git_email is None or git_user is None): user = self.client.whoami(self.huggingface_token) if git_email is None: git_email = user.get('email') if git_user is None: git_user = user.get('fullname') if git_user is not None or git_email is not None: self.git_config_username_and_email(git_user, git_email) self.lfs_enable_largefiles() self.git_credential_helper_store() if revision is not None: self.git_checkout(revision, create_branch_ok=True) atexit.register(self.wait_for_commands) @property def current_branch(self) -> str: try: result = run_subprocess('git rev-parse --abbrev-ref HEAD', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return result def check_git_versions(self): try: git_version = run_subprocess('git --version', self.local_dir).stdout.strip() except FileNotFoundError: raise EnvironmentError('Looks like you do not have git installed, please install.') try: lfs_version = run_subprocess('git-lfs --version', self.local_dir).stdout.strip() except FileNotFoundError: raise EnvironmentError('Looks like you do not have git-lfs installed, please install. You can install from https://git-lfs.github.com/. Then run `git lfs install` (you only have to do this once).') logger.info(git_version + '\n' + lfs_version) @validate_hf_hub_args def clone_from(self, repo_url: str, token: Union[bool, str, None]=None): token = token if isinstance(token, str) else None if token is False else self.huggingface_token if token is not None and token.startswith('api_org'): raise ValueError('You must use your personal access token, not an Organization token (see https://hf.co/settings/tokens).') hub_url = self.client.endpoint if hub_url in repo_url or ('http' not in repo_url and len(repo_url.split('/')) <= 2): (repo_type, namespace, repo_name) = repo_type_and_id_from_hf_id(repo_url, hub_url=hub_url) repo_id = f'{namespace}/{repo_name}' if namespace is not None else repo_name if repo_type is not None: self._repo_type = repo_type repo_url = hub_url + '/' if self._repo_type in constants.REPO_TYPES_URL_PREFIXES: repo_url += constants.REPO_TYPES_URL_PREFIXES[self._repo_type] if token is not None: scheme = urlparse(repo_url).scheme repo_url = repo_url.replace(f'{scheme}://', f'{scheme}://user:{token}@') repo_url += repo_id clean_repo_url = re.sub('(https?)://.*@', '\\1://', repo_url) try: run_subprocess('git lfs install', self.local_dir) if len(os.listdir(self.local_dir)) == 0: logger.warning(f'Cloning {clean_repo_url} into local empty directory.') with _lfs_log_progress(): env = os.environ.copy() if self.skip_lfs_files: env.update({'GIT_LFS_SKIP_SMUDGE': '1'}) run_subprocess(f"{('git clone' if self.skip_lfs_files else 'git lfs clone')} {repo_url} .", self.local_dir, env=env) else: if not is_git_repo(self.local_dir): raise EnvironmentError(f"Tried to clone a repository in a non-empty folder that isn't a git repository ('{self.local_dir}'). If you really want to do this, do it manually:\n cd {self.local_dir} && git init && git remote add origin && git pull origin main\n or clone repo to a new folder and move your existing files there afterwards.") if is_local_clone(self.local_dir, repo_url): logger.warning(f'{self.local_dir} is already a clone of {clean_repo_url}. Make sure you pull the latest changes with `repo.git_pull()`.') else: output = run_subprocess('git remote get-url origin', self.local_dir, check=False) error_msg = f'Tried to clone {clean_repo_url} in an unrelated git repository.\nIf you believe this is an error, please add a remote with the following URL: {clean_repo_url}.' if output.returncode == 0: clean_local_remote_url = re.sub('https://.*@', 'https://', output.stdout) error_msg += f'\nLocal path has its origin defined as: {clean_local_remote_url}' raise EnvironmentError(error_msg) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_config_username_and_email(self, git_user: Optional[str]=None, git_email: Optional[str]=None): try: if git_user is not None: run_subprocess('git config user.name'.split() + [git_user], self.local_dir) if git_email is not None: run_subprocess(f'git config user.email {git_email}'.split(), self.local_dir) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_credential_helper_store(self): try: run_subprocess('git config credential.helper store', self.local_dir) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_head_hash(self) -> str: try: p = run_subprocess('git rev-parse HEAD', self.local_dir) return p.stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_remote_url(self) -> str: try: p = run_subprocess('git config --get remote.origin.url', self.local_dir) url = p.stdout.strip() return re.sub('https://.*@', 'https://', url) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_head_commit_url(self) -> str: sha = self.git_head_hash() url = self.git_remote_url() if url.endswith('/'): url = url[:-1] return f'{url}/commit/{sha}' def list_deleted_files(self) -> List[str]: try: git_status = run_subprocess('git status -s', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) if len(git_status) == 0: return [] modified_files_statuses = [status.strip() for status in git_status.split('\n')] deleted_files_statuses = [status for status in modified_files_statuses if 'D' in status.split()[0]] deleted_files = [status.split()[-1].strip() for status in deleted_files_statuses] return deleted_files def lfs_track(self, patterns: Union[str, List[str]], filename: bool=False): if isinstance(patterns, str): patterns = [patterns] try: for pattern in patterns: run_subprocess(f"git lfs track {('--filename' if filename else '')} {pattern}", self.local_dir) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def lfs_untrack(self, patterns: Union[str, List[str]]): if isinstance(patterns, str): patterns = [patterns] try: for pattern in patterns: run_subprocess('git lfs untrack'.split() + [pattern], self.local_dir) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def lfs_enable_largefiles(self): try: lfs_config = 'git config lfs.customtransfer.multipart' run_subprocess(f'{lfs_config}.path huggingface-cli', self.local_dir) run_subprocess(f'{lfs_config}.args {LFS_MULTIPART_UPLOAD_COMMAND}', self.local_dir) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def auto_track_binary_files(self, pattern: str='.') -> List[str]: files_to_be_tracked_with_lfs = [] deleted_files = self.list_deleted_files() for filename in files_to_be_staged(pattern, folder=self.local_dir): if filename in deleted_files: continue path_to_file = os.path.join(os.getcwd(), self.local_dir, filename) if not (is_tracked_with_lfs(path_to_file) or is_git_ignored(path_to_file)): size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024) if size_in_mb >= 10: logger.warning('Parsing a large file to check if binary or not. Tracking large files using `repository.auto_track_large_files` is recommended so as to not load the full file in memory.') is_binary = is_binary_file(path_to_file) if is_binary: self.lfs_track(filename) files_to_be_tracked_with_lfs.append(filename) self.lfs_untrack(deleted_files) return files_to_be_tracked_with_lfs def auto_track_large_files(self, pattern: str='.') -> List[str]: files_to_be_tracked_with_lfs = [] deleted_files = self.list_deleted_files() for filename in files_to_be_staged(pattern, folder=self.local_dir): if filename in deleted_files: continue path_to_file = os.path.join(os.getcwd(), self.local_dir, filename) size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024) if size_in_mb >= 10 and (not is_tracked_with_lfs(path_to_file)) and (not is_git_ignored(path_to_file)): self.lfs_track(filename) files_to_be_tracked_with_lfs.append(filename) self.lfs_untrack(deleted_files) return files_to_be_tracked_with_lfs def lfs_prune(self, recent=False): try: with _lfs_log_progress(): result = run_subprocess(f"git lfs prune {('--recent' if recent else '')}", self.local_dir) logger.info(result.stdout) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_pull(self, rebase: bool=False, lfs: bool=False): command = 'git pull' if not lfs else 'git lfs pull' if rebase: command += ' --rebase' try: with _lfs_log_progress(): result = run_subprocess(command, self.local_dir) logger.info(result.stdout) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_add(self, pattern: str='.', auto_lfs_track: bool=False): if auto_lfs_track: tracked_files = self.auto_track_large_files(pattern) tracked_files.extend(self.auto_track_binary_files(pattern)) if tracked_files: logger.warning(f'Adding files tracked by Git LFS: {tracked_files}. This may take a bit of time if the files are large.') try: result = run_subprocess('git add -v'.split() + [pattern], self.local_dir) logger.info(f'Adding to index:\n{result.stdout}\n') except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def git_commit(self, commit_message: str='commit files to HF hub'): try: result = run_subprocess('git commit -v -m'.split() + [commit_message], self.local_dir) logger.info(f'Committed:\n{result.stdout}\n') except subprocess.CalledProcessError as exc: if len(exc.stderr) > 0: raise EnvironmentError(exc.stderr) else: raise EnvironmentError(exc.stdout) def git_push(self, upstream: Optional[str]=None, blocking: bool=True, auto_lfs_prune: bool=False) -> Union[str, Tuple[str, CommandInProgress]]: command = 'git push' if upstream: command += f' --set-upstream {upstream}' number_of_commits = commits_to_push(self.local_dir, upstream) if number_of_commits > 1: logger.warning(f'Several commits ({number_of_commits}) will be pushed upstream.') if blocking: logger.warning('The progress bars may be unreliable.') try: with _lfs_log_progress(): process = subprocess.Popen(command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8', cwd=self.local_dir) if blocking: (stdout, stderr) = process.communicate() return_code = process.poll() process.kill() if len(stderr): logger.warning(stderr) if return_code: raise subprocess.CalledProcessError(return_code, process.args, output=stdout, stderr=stderr) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) if not blocking: def status_method(): status = process.poll() if status is None: return -1 else: return status command_in_progress = CommandInProgress('push', is_done_method=lambda : process.poll() is not None, status_method=status_method, process=process, post_method=self.lfs_prune if auto_lfs_prune else None) self.command_queue.append(command_in_progress) return (self.git_head_commit_url(), command_in_progress) if auto_lfs_prune: self.lfs_prune() return self.git_head_commit_url() def git_checkout(self, revision: str, create_branch_ok: bool=False): try: result = run_subprocess(f'git checkout {revision}', self.local_dir) logger.warning(f'Checked out {revision} from {self.current_branch}.') logger.warning(result.stdout) except subprocess.CalledProcessError as exc: if not create_branch_ok: raise EnvironmentError(exc.stderr) else: try: result = run_subprocess(f'git checkout -b {revision}', self.local_dir) logger.warning(f'Revision `{revision}` does not exist. Created and checked out branch `{revision}`.') logger.warning(result.stdout) except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def tag_exists(self, tag_name: str, remote: Optional[str]=None) -> bool: if remote: try: result = run_subprocess(f'git ls-remote origin refs/tags/{tag_name}', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return len(result) != 0 else: try: git_tags = run_subprocess('git tag', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) git_tags = git_tags.split('\n') return tag_name in git_tags def delete_tag(self, tag_name: str, remote: Optional[str]=None) -> bool: delete_locally = True delete_remotely = True if not self.tag_exists(tag_name): delete_locally = False if not self.tag_exists(tag_name, remote=remote): delete_remotely = False if delete_locally: try: run_subprocess(['git', 'tag', '-d', tag_name], self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) if remote and delete_remotely: try: run_subprocess(f'git push {remote} --delete {tag_name}', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return True def add_tag(self, tag_name: str, message: Optional[str]=None, remote: Optional[str]=None): if message: tag_args = ['git', 'tag', '-a', tag_name, '-m', message] else: tag_args = ['git', 'tag', tag_name] try: run_subprocess(tag_args, self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) if remote: try: run_subprocess(f'git push {remote} {tag_name}', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) def is_repo_clean(self) -> bool: try: git_status = run_subprocess('git status --porcelain', self.local_dir).stdout.strip() except subprocess.CalledProcessError as exc: raise EnvironmentError(exc.stderr) return len(git_status) == 0 def push_to_hub(self, commit_message: str='commit files to HF hub', blocking: bool=True, clean_ok: bool=True, auto_lfs_prune: bool=False) -> Union[None, str, Tuple[str, CommandInProgress]]: if clean_ok and self.is_repo_clean(): logger.info('Repo currently clean. Ignoring push_to_hub') return None self.git_add(auto_lfs_track=True) self.git_commit(commit_message) return self.git_push(upstream=f'origin {self.current_branch}', blocking=blocking, auto_lfs_prune=auto_lfs_prune) @contextmanager def commit(self, commit_message: str, branch: Optional[str]=None, track_large_files: bool=True, blocking: bool=True, auto_lfs_prune: bool=False): files_to_stage = files_to_be_staged('.', folder=self.local_dir) if len(files_to_stage): files_in_msg = str(files_to_stage[:5])[:-1] + ', ...]' if len(files_to_stage) > 5 else str(files_to_stage) logger.error(f'There exists some updated files in the local repository that are not committed: {files_in_msg}. This may lead to errors if checking out a branch. These files and their modifications will be added to the current commit.') if branch is not None: self.git_checkout(branch, create_branch_ok=True) if is_tracked_upstream(self.local_dir): logger.warning('Pulling changes ...') self.git_pull(rebase=True) else: logger.warning(f"The current branch has no upstream branch. Will push to 'origin {self.current_branch}'") current_working_directory = os.getcwd() os.chdir(os.path.join(current_working_directory, self.local_dir)) try: yield self finally: self.git_add(auto_lfs_track=track_large_files) try: self.git_commit(commit_message) except OSError as e: if 'nothing to commit' not in str(e): raise e try: self.git_push(upstream=f'origin {self.current_branch}', blocking=blocking, auto_lfs_prune=auto_lfs_prune) except OSError as e: if 'could not read Username' in str(e): raise OSError("Couldn't authenticate user for push. Did you set `token` to `True`?") from e else: raise e os.chdir(current_working_directory) def repocard_metadata_load(self) -> Optional[Dict]: filepath = os.path.join(self.local_dir, constants.REPOCARD_NAME) if os.path.isfile(filepath): return metadata_load(filepath) return None def repocard_metadata_save(self, data: Dict) -> None: return metadata_save(os.path.join(self.local_dir, constants.REPOCARD_NAME), data) @property def commands_failed(self): return [c for c in self.command_queue if c.status > 0] @property def commands_in_progress(self): return [c for c in self.command_queue if not c.is_done] def wait_for_commands(self): index = 0 for command_failed in self.commands_failed: logger.error(f'The {command_failed.title} command with PID {command_failed._process.pid} failed.') logger.error(command_failed.stderr) while self.commands_in_progress: if index % 10 == 0: logger.warning(f'Waiting for the following commands to finish before shutting down: {self.commands_in_progress}.') index += 1 time.sleep(1) # File: huggingface_hub-main/src/huggingface_hub/serialization/__init__.py """""" from ._base import StateDictSplit, split_state_dict_into_shards_factory from ._tensorflow import get_tf_storage_size, split_tf_state_dict_into_shards from ._torch import get_torch_storage_id, get_torch_storage_size, save_torch_model, save_torch_state_dict, split_torch_state_dict_into_shards # File: huggingface_hub-main/src/huggingface_hub/serialization/_base.py """""" from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Optional, TypeVar, Union from .. import logging TensorT = TypeVar('TensorT') TensorSizeFn_T = Callable[[TensorT], int] StorageIDFn_T = Callable[[TensorT], Optional[Any]] MAX_SHARD_SIZE = '5GB' SIZE_UNITS = {'TB': 10 ** 12, 'GB': 10 ** 9, 'MB': 10 ** 6, 'KB': 10 ** 3} logger = logging.get_logger(__file__) @dataclass class StateDictSplit: is_sharded: bool = field(init=False) metadata: Dict[str, Any] filename_to_tensors: Dict[str, List[str]] tensor_to_filename: Dict[str, str] def __post_init__(self): self.is_sharded = len(self.filename_to_tensors) > 1 def split_state_dict_into_shards_factory(state_dict: Dict[str, TensorT], *, get_storage_size: TensorSizeFn_T, filename_pattern: str, get_storage_id: StorageIDFn_T=lambda tensor: None, max_shard_size: Union[int, str]=MAX_SHARD_SIZE) -> StateDictSplit: storage_id_to_tensors: Dict[Any, List[str]] = {} shard_list: List[Dict[str, TensorT]] = [] current_shard: Dict[str, TensorT] = {} current_shard_size = 0 total_size = 0 if isinstance(max_shard_size, str): max_shard_size = parse_size_to_int(max_shard_size) for (key, tensor) in state_dict.items(): if isinstance(tensor, str): logger.info('Skipping tensor %s as it is a string (bnb serialization)', key) continue storage_id = get_storage_id(tensor) if storage_id is not None: if storage_id in storage_id_to_tensors: storage_id_to_tensors[storage_id].append(key) continue else: storage_id_to_tensors[storage_id] = [key] tensor_size = get_storage_size(tensor) if tensor_size > max_shard_size: total_size += tensor_size shard_list.append({key: tensor}) continue if current_shard_size + tensor_size > max_shard_size: shard_list.append(current_shard) current_shard = {} current_shard_size = 0 current_shard[key] = tensor current_shard_size += tensor_size total_size += tensor_size if len(current_shard) > 0: shard_list.append(current_shard) nb_shards = len(shard_list) for (storage_id, keys) in storage_id_to_tensors.items(): for shard in shard_list: if keys[0] in shard: for key in keys: shard[key] = state_dict[key] break if nb_shards == 1: filename = filename_pattern.format(suffix='') return StateDictSplit(metadata={'total_size': total_size}, filename_to_tensors={filename: list(state_dict.keys())}, tensor_to_filename={key: filename for key in state_dict.keys()}) tensor_name_to_filename = {} filename_to_tensors = {} for (idx, shard) in enumerate(shard_list): filename = filename_pattern.format(suffix=f'-{idx + 1:05d}-of-{nb_shards:05d}') for key in shard: tensor_name_to_filename[key] = filename filename_to_tensors[filename] = list(shard.keys()) return StateDictSplit(metadata={'total_size': total_size}, filename_to_tensors=filename_to_tensors, tensor_to_filename=tensor_name_to_filename) def parse_size_to_int(size_as_str: str) -> int: size_as_str = size_as_str.strip() unit = size_as_str[-2:].upper() if unit not in SIZE_UNITS: raise ValueError(f"Unit '{unit}' not supported. Supported units are TB, GB, MB, KB. Got '{size_as_str}'.") multiplier = SIZE_UNITS[unit] try: value = float(size_as_str[:-2].strip()) except ValueError as e: raise ValueError(f"Could not parse the size value from '{size_as_str}': {e}") from e return int(value * multiplier) # File: huggingface_hub-main/src/huggingface_hub/serialization/_tensorflow.py """""" import math import re from typing import TYPE_CHECKING, Dict, Union from .. import constants from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory if TYPE_CHECKING: import tensorflow as tf def split_tf_state_dict_into_shards(state_dict: Dict[str, 'tf.Tensor'], *, filename_pattern: str=constants.TF2_WEIGHTS_FILE_PATTERN, max_shard_size: Union[int, str]=MAX_SHARD_SIZE) -> StateDictSplit: return split_state_dict_into_shards_factory(state_dict, max_shard_size=max_shard_size, filename_pattern=filename_pattern, get_storage_size=get_tf_storage_size) def get_tf_storage_size(tensor: 'tf.Tensor') -> int: return math.ceil(tensor.numpy().size * _dtype_byte_size_tf(tensor.dtype)) def _dtype_byte_size_tf(dtype) -> float: import tensorflow as tf if dtype == tf.bool: return 1 / 8 bit_search = re.search('[^\\d](\\d+)$', dtype.name) if bit_search is None: raise ValueError(f'`dtype` is not a valid dtype: {dtype}.') bit_size = int(bit_search.groups()[0]) return bit_size // 8 # File: huggingface_hub-main/src/huggingface_hub/serialization/_torch.py """""" import importlib import json import os import re from collections import defaultdict from functools import lru_cache from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union from .. import constants, logging from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory logger = logging.get_logger(__file__) if TYPE_CHECKING: import torch def save_torch_model(model: 'torch.nn.Module', save_directory: Union[str, Path], *, filename_pattern: Optional[str]=None, force_contiguous: bool=True, max_shard_size: Union[int, str]=MAX_SHARD_SIZE, metadata: Optional[Dict[str, str]]=None, safe_serialization: bool=True): save_torch_state_dict(state_dict=model.state_dict(), filename_pattern=filename_pattern, force_contiguous=force_contiguous, max_shard_size=max_shard_size, metadata=metadata, safe_serialization=safe_serialization, save_directory=save_directory) def save_torch_state_dict(state_dict: Dict[str, 'torch.Tensor'], save_directory: Union[str, Path], *, filename_pattern: Optional[str]=None, force_contiguous: bool=True, max_shard_size: Union[int, str]=MAX_SHARD_SIZE, metadata: Optional[Dict[str, str]]=None, safe_serialization: bool=True) -> None: save_directory = str(save_directory) if filename_pattern is None: filename_pattern = constants.SAFETENSORS_WEIGHTS_FILE_PATTERN if safe_serialization else constants.PYTORCH_WEIGHTS_FILE_PATTERN if safe_serialization: try: from safetensors.torch import save_file as save_file_fn except ImportError as e: raise ImportError('Please install `safetensors` to use safe serialization. You can install it with `pip install safetensors`.') from e else: from torch import save as save_file_fn logger.warning('You are using unsafe serialization. Due to security reasons, it is recommended not to load pickled models from untrusted sources. If you intend to share your model, we strongly recommend using safe serialization by installing `safetensors` with `pip install safetensors`.') if metadata is None: metadata = {} if safe_serialization: state_dict = _clean_state_dict_for_safetensors(state_dict, metadata, force_contiguous=force_contiguous) state_dict_split = split_torch_state_dict_into_shards(state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size) existing_files_regex = re.compile(filename_pattern.format(suffix='(-\\d{5}-of-\\d{5})?') + '(\\.index\\.json)?') for filename in os.listdir(save_directory): if existing_files_regex.match(filename): try: logger.debug(f"Removing existing file '{filename}' from folder.") os.remove(os.path.join(save_directory, filename)) except Exception as e: logger.warning(f"Error when trying to remove existing '{filename}' from folder: {e}. Continuing...") per_file_metadata = {'format': 'pt'} if not state_dict_split.is_sharded: per_file_metadata.update(metadata) safe_file_kwargs = {'metadata': per_file_metadata} if safe_serialization else {} for (filename, tensors) in state_dict_split.filename_to_tensors.items(): shard = {tensor: state_dict[tensor] for tensor in tensors} save_file_fn(shard, os.path.join(save_directory, filename), **safe_file_kwargs) logger.debug(f'Shard saved to {filename}') if state_dict_split.is_sharded: index_path = filename_pattern.format(suffix='') + '.index.json' index = {'metadata': {**state_dict_split.metadata, **metadata}, 'weight_map': state_dict_split.tensor_to_filename} with open(os.path.join(save_directory, index_path), 'w') as f: json.dump(index, f, indent=2) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}). Model weighs have been saved in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the index located at {index_path}.') logger.info(f'Model weights successfully saved to {save_directory}!') def split_torch_state_dict_into_shards(state_dict: Dict[str, 'torch.Tensor'], *, filename_pattern: str=constants.SAFETENSORS_WEIGHTS_FILE_PATTERN, max_shard_size: Union[int, str]=MAX_SHARD_SIZE) -> StateDictSplit: return split_state_dict_into_shards_factory(state_dict, max_shard_size=max_shard_size, filename_pattern=filename_pattern, get_storage_size=get_torch_storage_size, get_storage_id=get_torch_storage_id) def _get_unique_id(tensor: 'torch.Tensor') -> Union[int, Tuple[Any, ...]]: try: from torch.utils._python_dispatch import is_traceable_wrapper_subclass if is_traceable_wrapper_subclass(tensor): (attrs, _) = tensor.__tensor_flatten__() return tuple((_get_unique_id(getattr(tensor, attr)) for attr in attrs)) except ImportError: pass if tensor.device.type == 'xla' and is_torch_tpu_available(): import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return unique_id def get_torch_storage_id(tensor: 'torch.Tensor') -> Tuple['torch.device', Union[int, Tuple[Any, ...]], int]: return (tensor.device, _get_unique_id(tensor), get_torch_storage_size(tensor)) def get_torch_storage_size(tensor: 'torch.Tensor') -> int: try: from torch.utils._python_dispatch import is_traceable_wrapper_subclass if is_traceable_wrapper_subclass(tensor): (attrs, _) = tensor.__tensor_flatten__() return sum((get_torch_storage_size(getattr(tensor, attr)) for attr in attrs)) except ImportError: pass try: return tensor.untyped_storage().nbytes() except AttributeError: try: return tensor.storage().size() * _get_dtype_size(tensor.dtype) except NotImplementedError: return tensor.nelement() * _get_dtype_size(tensor.dtype) @lru_cache() def is_torch_tpu_available(check_device=True): if importlib.util.find_spec('torch_xla') is not None: if check_device: try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False def storage_ptr(tensor: 'torch.Tensor') -> Union[int, Tuple[Any, ...]]: try: from torch.utils._python_dispatch import is_traceable_wrapper_subclass if is_traceable_wrapper_subclass(tensor): return _get_unique_id(tensor) except ImportError: pass try: return tensor.untyped_storage().data_ptr() except Exception: try: return tensor.storage().data_ptr() except NotImplementedError: return 0 def _clean_state_dict_for_safetensors(state_dict: Dict[str, 'torch.Tensor'], metadata: Dict[str, str], force_contiguous: bool=True): to_removes = _remove_duplicate_names(state_dict) for (kept_name, to_remove_group) in to_removes.items(): for to_remove in to_remove_group: if metadata is None: metadata = {} if to_remove not in metadata: metadata[to_remove] = kept_name del state_dict[to_remove] if force_contiguous: state_dict = {k: v.contiguous() for (k, v) in state_dict.items()} return state_dict def _end_ptr(tensor: 'torch.Tensor') -> int: if tensor.nelement(): stop = tensor.view(-1)[-1].data_ptr() + _get_dtype_size(tensor.dtype) else: stop = tensor.data_ptr() return stop def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, 'torch.Tensor']) -> List[Set[str]]: filtered_tensors = [] for shared in tensors: if len(shared) < 2: filtered_tensors.append(shared) continue areas = [] for name in shared: tensor = state_dict[name] areas.append((tensor.data_ptr(), _end_ptr(tensor), name)) areas.sort() (_, last_stop, last_name) = areas[0] filtered_tensors.append({last_name}) for (start, stop, name) in areas[1:]: if start >= last_stop: filtered_tensors.append({name}) else: filtered_tensors[-1].add(name) last_stop = stop return filtered_tensors def _find_shared_tensors(state_dict: Dict[str, 'torch.Tensor']) -> List[Set[str]]: import torch tensors_dict = defaultdict(set) for (k, v) in state_dict.items(): if v.device != torch.device('meta') and storage_ptr(v) != 0 and (get_torch_storage_size(v) != 0): tensors_dict[v.device, storage_ptr(v), get_torch_storage_size(v)].add(k) tensors = list(sorted(tensors_dict.values())) tensors = _filter_shared_not_shared(tensors, state_dict) return tensors def _is_complete(tensor: 'torch.Tensor') -> bool: try: from torch.utils._python_dispatch import is_traceable_wrapper_subclass if is_traceable_wrapper_subclass(tensor): (attrs, _) = tensor.__tensor_flatten__() return all((_is_complete(getattr(tensor, attr)) for attr in attrs)) except ImportError: pass return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _get_dtype_size(tensor.dtype) == get_torch_storage_size(tensor) def _remove_duplicate_names(state_dict: Dict[str, 'torch.Tensor'], *, preferred_names: Optional[List[str]]=None, discard_names: Optional[List[str]]=None) -> Dict[str, List[str]]: if preferred_names is None: preferred_names = [] unique_preferred_names = set(preferred_names) if discard_names is None: discard_names = [] unique_discard_names = set(discard_names) shareds = _find_shared_tensors(state_dict) to_remove = defaultdict(list) for shared in shareds: complete_names = set([name for name in shared if _is_complete(state_dict[name])]) if not complete_names: raise RuntimeError(f'Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage. Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue.') keep_name = sorted(list(complete_names))[0] preferred = complete_names.difference(unique_discard_names) if preferred: keep_name = sorted(list(preferred))[0] if unique_preferred_names: preferred = unique_preferred_names.intersection(complete_names) if preferred: keep_name = sorted(list(preferred))[0] for name in sorted(shared): if name != keep_name: to_remove[keep_name].append(name) return to_remove @lru_cache() def _get_dtype_size(dtype: 'torch.dtype') -> int: import torch _float8_e4m3fn = getattr(torch, 'float8_e4m3fn', None) _float8_e5m2 = getattr(torch, 'float8_e5m2', None) _SIZE = {torch.int64: 8, torch.float32: 4, torch.int32: 4, torch.bfloat16: 2, torch.float16: 2, torch.int16: 2, torch.uint8: 1, torch.int8: 1, torch.bool: 1, torch.float64: 8, _float8_e4m3fn: 1, _float8_e5m2: 1} return _SIZE[dtype]