scott12355 commited on
Commit
761e190
·
verified ·
1 Parent(s): a34885e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +14 -58
main.py CHANGED
@@ -1,64 +1,20 @@
1
- from flask import Flask, request, jsonify
2
  from transformers import pipeline
3
- import torch
4
- import os
5
 
6
- app = Flask(__name__)
7
 
8
- # Check for MPS (Apple Silicon) support
9
- if torch.backends.mps.is_available():
10
- device = torch.device("mps")
11
- elif torch.cuda.is_available():
12
- device = torch.device("cuda")
13
- else:
14
- device = torch.device("cpu")
15
- print(f"Using device: {device}")
16
 
17
- # Load the Hugging Face models
18
- model_name = "./Qwen2.5-1.5B-Instruct-Local" # or any other model suitable for your needs
19
- chatbot_model = pipeline("text-generation", model=model_name, device=device, batch_size=8)
20
- # sentiment_model = pipeline("sentiment-analysis", device=device)
21
-
22
- # Store conversation history
23
- conversation_history = []
24
-
25
- def generate_response(user_input, sentiment):
26
- # if sentiment['label'] == 'NEGATIVE':
27
- # prompt = f"The user is upset, respond with empathy and support: {user_input}"
28
- # else:
29
- # prompt = f"Respond to the following query: {user_input}"
30
-
31
- # Append the user prompt to the conversation history
32
- conversation_history.append({"role": "user", "content": prompt})
33
-
34
- # Generate response from the model
35
- result = chatbot_model(conversation_history, num_return_sequences=1, max_new_tokens=250)
36
-
37
- # Append the assistant's response to the conversation history
38
- conversation_history.append({"role": "assistant", "content": result[0]['generated_text'][-1]['content'] })
39
-
40
- return result[0]['generated_text'][-1]['content']
41
-
42
-
43
- @app.route('/chat', methods=['POST'])
44
- def chat():
45
- data = request.json
46
- user_input = data.get('message', '')
47
-
48
- if not user_input:
49
- return jsonify({'error': 'Please provide a message.'}), 400
50
-
51
- # sentiment = sentiment_model(user_input)[0]
52
- response = generate_response(user_input, 'POSITIVE')
53
-
54
- with open(file_path, 'w') as file:
55
- file.write("\nConversation History:\n")
56
- for item in conversation_history:
57
- file.write(f"{item}\n")
58
- return jsonify({'response': response , 'sentiment': sentiment})
59
-
60
- @app.route('/history', methods=['GET'])
61
- def get_history():
62
- return jsonify(conversation_history)
63
 
64
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
  from transformers import pipeline
 
 
3
 
 
4
 
5
+ # NOTE - we configure docs_url to serve the interactive Docs at the root path
6
+ # of the app. This way, we can use the docs as a landing page for the app on Spaces.
7
+ app = FastAPI(docs_url="/")
 
 
 
 
 
8
 
9
+ pipe = pipeline("text2text-generation", model="google/flan-t5-small")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
 
12
+ @app.get("/generate")
13
+ def generate(text: str):
14
+ """
15
+ Using the text2text-generation pipeline from `transformers`, generate text
16
+ from the given input text. The model used is `google/flan-t5-small`, which
17
+ can be found [here](https://huggingface.co/google/flan-t5-small).
18
+ """
19
+ output = pipe(text)
20
+ return {"output": output[0]["generated_text"]}