Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,31 +9,42 @@ import requests
|
|
9 |
with open("system_instructions.txt", "r", encoding="utf-8") as f:
|
10 |
ECO_PROMPT = f.read()
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def score_qa(question, answer):
|
17 |
-
"""Get score from
|
18 |
try:
|
19 |
-
prompt = ECO_PROMPT
|
20 |
|
21 |
headers = {
|
22 |
-
"Authorization": f"Bearer {
|
23 |
"Content-Type": "application/json"
|
24 |
}
|
25 |
|
26 |
payload = {
|
27 |
-
"
|
28 |
-
"
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
}
|
32 |
|
33 |
-
response = requests.post(
|
34 |
response.raise_for_status()
|
35 |
|
36 |
-
output = response.json()[
|
37 |
match = re.search(r"\d+", output)
|
38 |
return int(match.group(0)) if match else 1
|
39 |
|
@@ -42,7 +53,7 @@ def score_qa(question, answer):
|
|
42 |
return 1 # Fallback score
|
43 |
|
44 |
def judge_ecolinguistics_from_csv(csv_file):
|
45 |
-
"""Process CSV and generate results"""
|
46 |
rows = []
|
47 |
with open(csv_file.name, "r", encoding="utf-8") as f:
|
48 |
reader = csv.DictReader(f)
|
@@ -84,7 +95,7 @@ def judge_ecolinguistics_from_csv(csv_file):
|
|
84 |
|
85 |
return out_path, percentage_display
|
86 |
|
87 |
-
# Custom theme and styling
|
88 |
custom_theme = gr.themes.Default().set(
|
89 |
body_background_fill="#f8fff9",
|
90 |
button_primary_background_fill="#38a169",
|
@@ -194,7 +205,7 @@ with gr.Blocks(theme=custom_theme, css=css) as demo:
|
|
194 |
gr.Markdown("""
|
195 |
<div class="footer">
|
196 |
<p style="margin: 0; color: #2e7d32; font-size: 0.9em;">
|
197 |
-
π Powered by
|
198 |
</p>
|
199 |
</div>
|
200 |
""")
|
|
|
9 |
with open("system_instructions.txt", "r", encoding="utf-8") as f:
|
10 |
ECO_PROMPT = f.read()
|
11 |
|
12 |
+
# Hugging Face configuration
|
13 |
+
HF_API_KEY = os.environ.get("HF_API_KEY")
|
14 |
+
HF_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
15 |
+
|
16 |
+
def format_llama3_prompt(system_prompt, question, answer):
|
17 |
+
"""Format prompt according to Llama3's chat template"""
|
18 |
+
return f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|>
|
19 |
+
{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
|
20 |
+
Question: {question}
|
21 |
+
Answer: {answer}
|
22 |
+
Please provide a numerical score between 1-5 based on the guidelines.<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
23 |
+
"""
|
24 |
|
25 |
def score_qa(question, answer):
|
26 |
+
"""Get score from Llama3 via Hugging Face API"""
|
27 |
try:
|
28 |
+
prompt = format_llama3_prompt(ECO_PROMPT, question, answer)
|
29 |
|
30 |
headers = {
|
31 |
+
"Authorization": f"Bearer {HF_API_KEY}",
|
32 |
"Content-Type": "application/json"
|
33 |
}
|
34 |
|
35 |
payload = {
|
36 |
+
"inputs": prompt,
|
37 |
+
"parameters": {
|
38 |
+
"max_new_tokens": 5,
|
39 |
+
"temperature": 0.1,
|
40 |
+
"return_full_text": False
|
41 |
+
}
|
42 |
}
|
43 |
|
44 |
+
response = requests.post(HF_API_URL, json=payload, headers=headers)
|
45 |
response.raise_for_status()
|
46 |
|
47 |
+
output = response.json()[0]['generated_text']
|
48 |
match = re.search(r"\d+", output)
|
49 |
return int(match.group(0)) if match else 1
|
50 |
|
|
|
53 |
return 1 # Fallback score
|
54 |
|
55 |
def judge_ecolinguistics_from_csv(csv_file):
|
56 |
+
"""Process CSV and generate results (unchanged from original)"""
|
57 |
rows = []
|
58 |
with open(csv_file.name, "r", encoding="utf-8") as f:
|
59 |
reader = csv.DictReader(f)
|
|
|
95 |
|
96 |
return out_path, percentage_display
|
97 |
|
98 |
+
# Custom theme and styling (unchanged from original)
|
99 |
custom_theme = gr.themes.Default().set(
|
100 |
body_background_fill="#f8fff9",
|
101 |
button_primary_background_fill="#38a169",
|
|
|
205 |
gr.Markdown("""
|
206 |
<div class="footer">
|
207 |
<p style="margin: 0; color: #2e7d32; font-size: 0.9em;">
|
208 |
+
π Powered by Meta Llama3 | Environmentally Conscious Language Analysis π
|
209 |
</p>
|
210 |
</div>
|
211 |
""")
|