Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -5,13 +5,16 @@ import gradio as gr
5
 
6
  from huggingface_hub import create_repo, HfApi
7
  from huggingface_hub import snapshot_download
 
8
 
9
  api = HfApi()
10
 
11
- def process_model(model_id, q_method, username, hf_token):
12
 
13
  MODEL_NAME = model_id.split('/')[-1]
14
  fp16 = f"{MODEL_NAME}/{MODEL_NAME.lower()}.fp16.bin"
 
 
15
 
16
  snapshot_download(repo_id=model_id, local_dir = f"{MODEL_NAME}", local_dir_use_symlinks=False)
17
  print("Model downloaded successully!")
@@ -35,11 +38,16 @@ def process_model(model_id, q_method, username, hf_token):
35
  print("Empty repo created successfully!")
36
 
37
  # Upload gguf files
38
- api.upload_folder(
39
- folder_path=MODEL_NAME,
 
 
 
 
 
 
40
  repo_id=f"{username}/{MODEL_NAME}-{q_method}-GGUF",
41
- allow_patterns=["*.gguf","*.md"],
42
- token=hf_token
43
  )
44
  print("Uploaded successfully!")
45
 
@@ -65,11 +73,6 @@ iface = gr.Interface(
65
  label="Quantization Method",
66
  info="GGML quantisation type"
67
  ),
68
- gr.Textbox(
69
- lines=1,
70
- label="Username",
71
- info="Your Hugging Face username"
72
- ),
73
  gr.Textbox(
74
  lines=1,
75
  label="HF Write Token",
 
5
 
6
  from huggingface_hub import create_repo, HfApi
7
  from huggingface_hub import snapshot_download
8
+ from huggingface_hub import whoami
9
 
10
  api = HfApi()
11
 
12
+ def process_model(model_id, q_method, hf_token):
13
 
14
  MODEL_NAME = model_id.split('/')[-1]
15
  fp16 = f"{MODEL_NAME}/{MODEL_NAME.lower()}.fp16.bin"
16
+
17
+ username = whoami(hf_token)["name"]
18
 
19
  snapshot_download(repo_id=model_id, local_dir = f"{MODEL_NAME}", local_dir_use_symlinks=False)
20
  print("Model downloaded successully!")
 
38
  print("Empty repo created successfully!")
39
 
40
  # Upload gguf files
41
+ # api.upload_folder(
42
+ # folder_path=MODEL_NAME,
43
+ # repo_id=f"{username}/{MODEL_NAME}-{q_method}-GGUF",
44
+ # allow_patterns=["*.gguf"],
45
+ # token=hf_token
46
+ # )
47
+ api.upload_file(
48
+ path_or_fileobj=qtype,
49
  repo_id=f"{username}/{MODEL_NAME}-{q_method}-GGUF",
50
+ repo_type="model",
 
51
  )
52
  print("Uploaded successfully!")
53
 
 
73
  label="Quantization Method",
74
  info="GGML quantisation type"
75
  ),
 
 
 
 
 
76
  gr.Textbox(
77
  lines=1,
78
  label="HF Write Token",