refactor: Migrate from OpenAI GPT-4 to Qwen inference with Hugging Face API
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from PIL import Image
|
|
4 |
from moviepy.editor import VideoFileClip, AudioFileClip
|
5 |
|
6 |
import os
|
7 |
-
import
|
8 |
import subprocess
|
9 |
from pathlib import Path
|
10 |
import uuid
|
@@ -13,8 +13,11 @@ import shlex
|
|
13 |
import shutil
|
14 |
from utils import format_bash_command
|
15 |
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
allowed_medias = [
|
20 |
".png",
|
@@ -127,8 +130,12 @@ YOUR FFMPEG COMMAND:
|
|
127 |
},
|
128 |
]
|
129 |
try:
|
130 |
-
completion =
|
131 |
-
model="
|
|
|
|
|
|
|
|
|
132 |
)
|
133 |
command = completion.choices[0].message.content.replace("\n", "")
|
134 |
|
|
|
4 |
from moviepy.editor import VideoFileClip, AudioFileClip
|
5 |
|
6 |
import os
|
7 |
+
from openai import OpenAI
|
8 |
import subprocess
|
9 |
from pathlib import Path
|
10 |
import uuid
|
|
|
13 |
import shutil
|
14 |
from utils import format_bash_command
|
15 |
|
16 |
+
HF_API_KEY = os.environ["HF_API_KEY"]
|
17 |
+
client = OpenAI(
|
18 |
+
base_url="https://api-inference.huggingface.co/v1/",
|
19 |
+
api_key=HF_API_KEY
|
20 |
+
)
|
21 |
|
22 |
allowed_medias = [
|
23 |
".png",
|
|
|
130 |
},
|
131 |
]
|
132 |
try:
|
133 |
+
completion = client.chat.completions.create(
|
134 |
+
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
135 |
+
messages=messages,
|
136 |
+
temperature=temperature,
|
137 |
+
top_p=top_p,
|
138 |
+
max_tokens=2048
|
139 |
)
|
140 |
command = completion.choices[0].message.content.replace("\n", "")
|
141 |
|