Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,10 +7,10 @@ import openai
|
|
7 |
# تعریف مدلها
|
8 |
MODEL_CONFIG = {
|
9 |
"translation": "PontifexMaximus/opus-mt-iir-en-finetuned-fa-to-en", # ترجمه
|
10 |
-
"qa": "HooshvareLab/bert-fa-base-uncased",
|
11 |
-
"math": "
|
12 |
-
"persian_nlp": "HooshvareLab/bert-fa-zwnj-base",
|
13 |
-
"custom_ai": "universitytehran/PersianMind-v1.0",
|
14 |
}
|
15 |
|
16 |
# بارگذاری توکن از متغیر محیطی
|
@@ -19,76 +19,74 @@ if not TOKEN:
|
|
19 |
raise ValueError("API token is missing! Set it as 'Passsssssss' in environment variables.")
|
20 |
|
21 |
# تنظیم کلید API OpenAI
|
22 |
-
openai.api_key = "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"
|
|
|
|
|
23 |
|
24 |
-
# سیستم مدیریت مدل
|
25 |
class MultiModelSystem:
|
26 |
def __init__(self):
|
|
|
27 |
self.models = {}
|
28 |
self.queue = Queue()
|
29 |
self.executor = ThreadPoolExecutor(max_workers=5)
|
30 |
self.load_models()
|
31 |
|
32 |
def load_models(self):
|
33 |
-
"""
|
34 |
try:
|
35 |
for task, model_id in MODEL_CONFIG.items():
|
36 |
-
if
|
37 |
-
# مدل ریاضی برای OpenAI
|
38 |
self.models[task] = self.load_openai_model()
|
39 |
-
print(f"Model '{task}' loaded successfully.")
|
40 |
else:
|
41 |
-
self.models[task] = pipeline(
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
except Exception as e:
|
44 |
print(f"Error loading models: {e}")
|
45 |
raise
|
46 |
|
47 |
def load_openai_model(self):
|
48 |
-
"""
|
49 |
-
return "OpenAI
|
50 |
|
51 |
@staticmethod
|
52 |
def get_task_type(task):
|
53 |
-
"""
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
elif task == "custom_ai":
|
63 |
-
return "text-generation"
|
64 |
-
else:
|
65 |
-
raise ValueError(f"Unknown task: {task}")
|
66 |
|
67 |
def process_task(self, task, **kwargs):
|
68 |
-
"""
|
69 |
if task not in self.models:
|
70 |
raise ValueError(f"Model for task '{task}' not loaded.")
|
71 |
-
|
72 |
def task_handler():
|
73 |
try:
|
74 |
if task == "math":
|
75 |
-
# استفاده از OpenAI API برای ریاضیات
|
76 |
result = self.process_math_task(kwargs.get("text"))
|
77 |
else:
|
78 |
result = self.models[task](**kwargs)
|
79 |
self.queue.put(result)
|
80 |
-
print(f"Task '{task}' completed: {result}")
|
81 |
except Exception as e:
|
82 |
print(f"Error processing task '{task}': {e}")
|
83 |
self.queue.put(None)
|
84 |
-
|
85 |
self.executor.submit(task_handler)
|
86 |
|
87 |
def process_math_task(self, text):
|
88 |
-
"""
|
89 |
try:
|
90 |
response = openai.Completion.create(
|
91 |
-
engine="text-davinci-003",
|
92 |
prompt=text,
|
93 |
max_tokens=100
|
94 |
)
|
@@ -98,21 +96,26 @@ class MultiModelSystem:
|
|
98 |
return None
|
99 |
|
100 |
def get_result(self):
|
101 |
-
"""
|
102 |
return self.queue.get()
|
103 |
|
104 |
# نمونه استفاده
|
105 |
if __name__ == "__main__":
|
106 |
system = MultiModelSystem()
|
107 |
|
108 |
-
#
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# تعریف مدلها
|
8 |
MODEL_CONFIG = {
|
9 |
"translation": "PontifexMaximus/opus-mt-iir-en-finetuned-fa-to-en", # ترجمه
|
10 |
+
"qa": "HooshvareLab/bert-fa-base-uncased", # پاسخ به سوال
|
11 |
+
"math": "OpenAI", # ریاضی با OpenAI
|
12 |
+
"persian_nlp": "HooshvareLab/bert-fa-zwnj-base", # پردازش زبان فارسی
|
13 |
+
"custom_ai": "universitytehran/PersianMind-v1.0", # شخصیسازی شده
|
14 |
}
|
15 |
|
16 |
# بارگذاری توکن از متغیر محیطی
|
|
|
19 |
raise ValueError("API token is missing! Set it as 'Passsssssss' in environment variables.")
|
20 |
|
21 |
# تنظیم کلید API OpenAI
|
22 |
+
openai.api_key = os.getenv("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")
|
23 |
+
if not openai.api_key:
|
24 |
+
raise ValueError("OpenAI API key is missing! Set it as 'OPENAI_API_KEY' in environment variables.")
|
25 |
|
|
|
26 |
class MultiModelSystem:
|
27 |
def __init__(self):
|
28 |
+
"""مدیریت مدلهای مختلف و پردازش موازی وظایف."""
|
29 |
self.models = {}
|
30 |
self.queue = Queue()
|
31 |
self.executor = ThreadPoolExecutor(max_workers=5)
|
32 |
self.load_models()
|
33 |
|
34 |
def load_models(self):
|
35 |
+
"""بارگذاری مدلها به صورت موازی."""
|
36 |
try:
|
37 |
for task, model_id in MODEL_CONFIG.items():
|
38 |
+
if model_id == "OpenAI":
|
|
|
39 |
self.models[task] = self.load_openai_model()
|
|
|
40 |
else:
|
41 |
+
self.models[task] = pipeline(
|
42 |
+
task=self.get_task_type(task),
|
43 |
+
model=model_id,
|
44 |
+
use_auth_token=TOKEN
|
45 |
+
)
|
46 |
+
print(f"Model '{task}' loaded successfully.")
|
47 |
except Exception as e:
|
48 |
print(f"Error loading models: {e}")
|
49 |
raise
|
50 |
|
51 |
def load_openai_model(self):
|
52 |
+
"""مدل ریاضی OpenAI را بارگذاری کنید."""
|
53 |
+
return "OpenAI (Math)"
|
54 |
|
55 |
@staticmethod
|
56 |
def get_task_type(task):
|
57 |
+
"""بازگرداندن نوع وظیفه براساس مدل."""
|
58 |
+
task_map = {
|
59 |
+
"translation": "translation",
|
60 |
+
"qa": "question-answering",
|
61 |
+
"persian_nlp": "text-classification",
|
62 |
+
"custom_ai": "text-generation",
|
63 |
+
"math": "text-generation"
|
64 |
+
}
|
65 |
+
return task_map.get(task, "text-generation")
|
|
|
|
|
|
|
|
|
66 |
|
67 |
def process_task(self, task, **kwargs):
|
68 |
+
"""پردازش وظایف به صورت غیرهمزمان."""
|
69 |
if task not in self.models:
|
70 |
raise ValueError(f"Model for task '{task}' not loaded.")
|
71 |
+
|
72 |
def task_handler():
|
73 |
try:
|
74 |
if task == "math":
|
|
|
75 |
result = self.process_math_task(kwargs.get("text"))
|
76 |
else:
|
77 |
result = self.models[task](**kwargs)
|
78 |
self.queue.put(result)
|
|
|
79 |
except Exception as e:
|
80 |
print(f"Error processing task '{task}': {e}")
|
81 |
self.queue.put(None)
|
82 |
+
|
83 |
self.executor.submit(task_handler)
|
84 |
|
85 |
def process_math_task(self, text):
|
86 |
+
"""پردازش ریاضی با OpenAI."""
|
87 |
try:
|
88 |
response = openai.Completion.create(
|
89 |
+
engine="text-davinci-003",
|
90 |
prompt=text,
|
91 |
max_tokens=100
|
92 |
)
|
|
|
96 |
return None
|
97 |
|
98 |
def get_result(self):
|
99 |
+
"""دریافت نتیجه از صف."""
|
100 |
return self.queue.get()
|
101 |
|
102 |
# نمونه استفاده
|
103 |
if __name__ == "__main__":
|
104 |
system = MultiModelSystem()
|
105 |
|
106 |
+
# وظایف مختلف
|
107 |
+
tasks = [
|
108 |
+
{"task": "translation", "kwargs": {"text": "سلام دنیا!", "src_lang": "fa", "tgt_lang": "en"}},
|
109 |
+
{"task": "qa", "kwargs": {"question": "پایتخت ایران چیست؟", "context": "ایران کشوری در خاورمیانه است و پایتخت آن تهران است."}},
|
110 |
+
{"task": "math", "kwargs": {"text": "What is the integral of x^2?"}},
|
111 |
+
{"task": "persian_nlp", "kwargs": {"text": "این یک جمله فارسی است."}},
|
112 |
+
{"task": "custom_ai", "kwargs": {"text": "تحلیل متنهای تاریخی طبری."}}
|
113 |
+
]
|
114 |
+
|
115 |
+
# ارسال وظایف برای پردازش
|
116 |
+
for task_info in tasks:
|
117 |
+
system.process_task(task_info["task"], **task_info["kwargs"])
|
118 |
+
|
119 |
+
# بازیابی نتایج
|
120 |
+
for _ in range(len(tasks)):
|
121 |
+
print("Result:", system.get_result())
|