import os import shutil import torch import gradio as gr from huggingface_hub import HfApi, whoami, ModelCard from gradio_huggingfacehub_search import HuggingfaceHubSearch from textwrap import dedent from pathlib import Path from tempfile import TemporaryDirectory from huggingface_hub.file_download import repo_folder_name from optimum.exporters.tasks import TasksManager from optimum.intel.utils.constant import _TASK_ALIASES from optimum.intel.openvino.utils import _HEAD_TO_AUTOMODELS from optimum.exporters import TasksManager from optimum.intel.utils.modeling_utils import _find_files_matching_pattern from optimum.intel import ( OVModelForAudioClassification, OVModelForCausalLM, OVModelForFeatureExtraction, OVModelForImageClassification, OVModelForMaskedLM, OVModelForQuestionAnswering, OVModelForSeq2SeqLM, OVModelForSequenceClassification, OVModelForTokenClassification, OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline, OVModelForPix2Struct, OVWeightQuantizationConfig, ) from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipelineBase def export( model_id: str, private_repo: bool, oauth_token: gr.OAuthToken, ): if oauth_token.token is None: raise ValueError("You must be logged in to use this space") model_name = model_id.split("/")[-1] username = whoami(oauth_token.token)["name"] new_repo_id = f"{username}/{model_name}-openvino" task = TasksManager.infer_task_from_model(model_id) if task not in _HEAD_TO_AUTOMODELS: raise ValueError( f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported" ) if task == "text2text-generation": raise ValueError("Export of Seq2Seq models is currently disabled.") auto_model_class = _HEAD_TO_AUTOMODELS[task] ov_files = _find_files_matching_pattern( model_id, pattern=r"(.*)?openvino(.*)?\_model.xml", use_auth_token=oauth_token.token, ) if len(ov_files) > 0: raise Exception(f"Model {model_id} is already converted, skipping..") api = HfApi(token=oauth_token.token) with TemporaryDirectory() as d: folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models")) os.makedirs(folder) try: api.snapshot_download( repo_id=model_id, local_dir=folder, allow_patterns=["*.json"] ) ov_model = eval(auto_model_class).from_pretrained(model_id, export=True) ov_model.save_pretrained(folder) """ if not isinstance(ov_model, OVStableDiffusionPipelineBase): model = TasksManager.get_model_from_task(task, model_id) exporter_config_class = TasksManager.get_exporter_config_constructor( exporter="openvino", model=model, task=task, model_name=model_id, model_type=model.config.model_type.replace("_", "-"), ) openvino_config = exporter_config_class(model.config) inputs = openvino_config.generate_dummy_inputs(framework="pt") ov_outputs = ov_model(**inputs) outputs = model(**inputs) for output_name in ov_outputs: if isinstance(outputs, torch.Tensor) and not torch.allclose(outputs[output_name], ov_outputs[output_name], atol=1e-3): raise ValueError( "The exported model does not have the same outputs as the original model. Export interrupted." ) """ new_repo_url = api.create_repo(repo_id=new_repo_id, exist_ok=True, private=private_repo) new_repo_id = new_repo_url.repo_id print("Repo created successfully!", new_repo_url) folder = Path(folder) folder_parts = len(folder.parts) for file_path in folder.glob("**/*"): name = Path(*file_path.parts[folder_parts:]) if not file_path.is_file() or any(part_name.startswith(".") for part_name in name.parts): continue try: api.upload_file( path_or_fileobj=file_path, path_in_repo=str(name), repo_id=new_repo_id, ) except Exception as e: raise Exception(f"Error uploading file {file_path}: {e}") try: card = ModelCard.load(model_id, token=oauth_token.token) except: card = ModelCard("") if card.data.tags is None: card.data.tags = [] card.data.tags.append("openvino") card.data.base_model = model_id card.text = dedent( f""" This model was converted to OpenVINO from [`{model_id}`](https://huggingface.co/{model_id}) using [optimum-intel](https://github.com/huggingface/optimum-intel) via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space. First make sure you have optimum-intel installed: ```bash pip install optimum[openvino] ``` To load your model you can do as follows: ```python from optimum.intel import {auto_model_class} model_id = "{new_repo_id}" model = {auto_model_class}.from_pretrained(model_id) ``` """ ) card_path = os.path.join(folder, "README.md") card.save(card_path) api.upload_file( path_or_fileobj=card_path, path_in_repo="README.md", repo_id=new_repo_id, ) return f"This model was successfully exported, find it under your repo {new_repo_url}'" finally: shutil.rmtree(folder, ignore_errors=True) model_id = HuggingfaceHubSearch( label="Hub Model ID", placeholder="Search for model id on the hub", search_type="model", ) private_repo = gr.Checkbox( value=False, label="Private Repo", info="Create a private repo under your username", ) interface = gr.Interface( fn=export, inputs=[ model_id, private_repo, ], outputs=[ gr.Markdown(label="output"), ], title="Export your model to OpenVINO", description="This space converts your model to the OpenVINO format using [optimum-intel](https://huggingface.co/docs/optimum/main/intel/openvino/inference) The resulting model will then be pushed on the Hub under your HF user namespace", api_name=False, ) with gr.Blocks() as demo: gr.Markdown("You must be logged in to use this space") gr.LoginButton(min_width=250) interface.render() demo.launch()