John6666 commited on
Commit
0c78fe7
1 Parent(s): 9f49725

Update multit2i.py

Browse files
Files changed (1) hide show
  1. multit2i.py +3 -3
multit2i.py CHANGED
@@ -60,7 +60,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
60
  limit = limit * 20 if check_status and force_gpu else limit * 5
61
  models = []
62
  try:
63
- model_infos = api.list_models(author=author, task="text-to-image",
64
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
65
  except Exception as e:
66
  print(f"Error: Failed to list models.")
@@ -157,8 +157,8 @@ def load_from_model(model_name: str, hf_token: str | Literal[False] | None = Non
157
  raise ModelNotFoundError(
158
  f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
159
  )
160
- #p = response.json().get("pipeline_tag")
161
- #if p != "text-to-image": raise ModelNotFoundError(f"This model isn't for text-to-image or unsupported: {model_name}.")
162
  headers["X-Wait-For-Model"] = "true"
163
  client = huggingface_hub.InferenceClient(model=model_name, headers=headers,
164
  token=hf_token, timeout=server_timeout)
 
60
  limit = limit * 20 if check_status and force_gpu else limit * 5
61
  models = []
62
  try:
63
+ model_infos = api.list_models(author=author, #task="text-to-image",
64
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
65
  except Exception as e:
66
  print(f"Error: Failed to list models.")
 
157
  raise ModelNotFoundError(
158
  f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
159
  )
160
+ p = response.json().get("pipeline_tag")
161
+ if p != "text-to-image": raise ModelNotFoundError(f"This model isn't for text-to-image or unsupported: {model_name}.")
162
  headers["X-Wait-For-Model"] = "true"
163
  client = huggingface_hub.InferenceClient(model=model_name, headers=headers,
164
  token=hf_token, timeout=server_timeout)