Spaces:
Running
Running
Futuresony
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -20,9 +20,20 @@ def fetch_message():
|
|
20 |
if not message:
|
21 |
return jsonify({"error": "No input provided."}), 400
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Process the message using the Hugging Face model
|
24 |
try:
|
25 |
-
response = client.text_generation(
|
|
|
|
|
|
|
26 |
return jsonify({"response": response})
|
27 |
except Exception as e:
|
28 |
return jsonify({"error": str(e)}), 500
|
@@ -30,5 +41,5 @@ def fetch_message():
|
|
30 |
if __name__ == "__main__":
|
31 |
# Use PORT environment variable or default to 7860
|
32 |
port = int(os.getenv("PORT", 7860))
|
33 |
-
|
34 |
-
|
|
|
20 |
if not message:
|
21 |
return jsonify({"error": "No input provided."}), 400
|
22 |
|
23 |
+
# Define model parameters
|
24 |
+
model_params = {
|
25 |
+
"temperature": 0.7, # Controls randomness
|
26 |
+
"top_p": 0.9, # Nucleus sampling
|
27 |
+
"max_length": 300, # Limit response length
|
28 |
+
"do_sample": True # Enable sampling
|
29 |
+
}
|
30 |
+
|
31 |
# Process the message using the Hugging Face model
|
32 |
try:
|
33 |
+
response = client.text_generation(
|
34 |
+
message,
|
35 |
+
**model_params # Pass parameters
|
36 |
+
)
|
37 |
return jsonify({"response": response})
|
38 |
except Exception as e:
|
39 |
return jsonify({"error": str(e)}), 500
|
|
|
41 |
if __name__ == "__main__":
|
42 |
# Use PORT environment variable or default to 7860
|
43 |
port = int(os.getenv("PORT", 7860))
|
44 |
+
app.run(host="0.0.0.0", port=port)
|
45 |
+
|