Spaces:
Sleeping
Sleeping
Hazzzardous
commited on
Commit
·
9f00cb8
1
Parent(s):
f02c936
Update app.py
Browse files
app.py
CHANGED
@@ -64,7 +64,7 @@ def infer(
|
|
64 |
# Clear model state for generative mode
|
65 |
model.resetState()
|
66 |
if (mode == "Q/A"):
|
67 |
-
prompt = f"
|
68 |
|
69 |
print(f"PROMPT ({datetime.now()}):\n-------\n{prompt}")
|
70 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
@@ -142,10 +142,10 @@ def chat(
|
|
142 |
print(f"CHAT ({datetime.now()}):\n-------\n{prompt}")
|
143 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
144 |
# Load prompt
|
145 |
-
model.loadContext(newctx=prompt)
|
146 |
generated_text = ""
|
147 |
done = False
|
148 |
-
gen = model.forward(number=max_new_tokens, stopStrings=stop,temp=temperature,top_p_usual=top_p)
|
149 |
generated_text = gen["output"]
|
150 |
generated_text = generated_text.lstrip("\n ")
|
151 |
print(f"{generated_text}")
|
|
|
64 |
# Clear model state for generative mode
|
65 |
model.resetState()
|
66 |
if (mode == "Q/A"):
|
67 |
+
prompt = f"Q:\n{prompt}\n\nExpert Long Detailed Response:"
|
68 |
|
69 |
print(f"PROMPT ({datetime.now()}):\n-------\n{prompt}")
|
70 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
|
|
142 |
print(f"CHAT ({datetime.now()}):\n-------\n{prompt}")
|
143 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
144 |
# Load prompt
|
145 |
+
model.loadContext(newctx="\nUser: "+prompt+"\n\nBot: ")
|
146 |
generated_text = ""
|
147 |
done = False
|
148 |
+
gen = model.forward(number=max_new_tokens, stopStrings=stop+["User"],temp=temperature,top_p_usual=top_p)
|
149 |
generated_text = gen["output"]
|
150 |
generated_text = generated_text.lstrip("\n ")
|
151 |
print(f"{generated_text}")
|