Anthony G commited on
Commit
b508e95
1 Parent(s): 44ceb10

used openai api

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +153 -65
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
app.py CHANGED
@@ -1,20 +1,22 @@
1
- import gradio as gr
2
- import torch
3
- import transformers
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
- from peft import PeftConfig, PeftModel
6
- import warnings
 
7
 
8
- warnings.filterwarnings("ignore")
9
 
10
- PEFT_MODEL = "givyboy/phi-2-finetuned-mental-health-conversational"
11
 
12
- SYSTEM_PROMPT = """Answer the following question truthfully.
13
- If you don't know the answer, respond 'Sorry, I don't know the answer to this question.'.
14
- If the question is too complex, respond 'Kindly, consult a psychiatrist for further queries.'."""
15
 
16
- USER_PROMPT = lambda x: f"""<HUMAN>: {x}\n<ASSISTANT>: """
17
- ADD_RESPONSE = lambda x, y: f"""<HUMAN>: {x}\n<ASSISTANT>: {y}"""
 
18
 
19
  # bnb_config = BitsAndBytesConfig(
20
  # load_in_4bit=True,
@@ -23,68 +25,154 @@ ADD_RESPONSE = lambda x, y: f"""<HUMAN>: {x}\n<ASSISTANT>: {y}"""
23
  # bnb_4bit_compute_dtype=torch.float16,
24
  # )
25
 
26
- config = PeftConfig.from_pretrained(PEFT_MODEL)
27
-
28
- peft_base_model = AutoModelForCausalLM.from_pretrained(
29
- config.base_model_name_or_path,
30
- return_dict=True,
31
- # quantization_config=bnb_config,
32
- device_map="auto",
33
- trust_remote_code=True,
34
- offload_folder="offload/",
35
- offload_state_dict=True,
36
- )
37
 
38
- peft_model = PeftModel.from_pretrained(
39
- peft_base_model,
40
- PEFT_MODEL,
41
- offload_folder="offload/",
42
- offload_state_dict=True,
43
- )
 
 
 
44
 
45
- peft_tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
46
- peft_tokenizer.pad_token = peft_tokenizer.eos_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- pipeline = transformers.pipeline(
49
- "text-generation",
50
- model=peft_model,
51
- tokenizer=peft_tokenizer,
52
- torch_dtype=torch.bfloat16,
53
- trust_remote_code=True,
54
- device_map="auto",
55
- )
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- def format_message(message: str, history: list[str], memory_limit: int = 3) -> str:
59
- if len(history) > memory_limit:
60
- history = history[-memory_limit:]
61
 
62
- if len(history) == 0:
63
- return f"{SYSTEM_PROMPT}\n{USER_PROMPT(message)}"
64
 
65
- formatted_message = f"{SYSTEM_PROMPT}\n{ADD_RESPONSE(history[0][0], history[0][1])}"
 
 
 
66
 
67
- for msg, ans in history[1:]:
68
- formatted_message += f"\n{ADD_RESPONSE(msg, ans)}"
69
 
70
- formatted_message += f"\n{USER_PROMPT(message)}"
71
- return formatted_message
 
 
 
 
 
72
 
 
 
 
73
 
74
- def get_model_response(message: str, history: list[str]) -> str:
75
- formatted_message = format_message(message, history)
76
- sequences = pipeline(
77
- formatted_message,
78
- do_sample=True,
79
- top_k=10,
80
- num_return_sequences=1,
81
- eos_token_id=peft_tokenizer.eos_token_id,
82
- max_length=600,
83
- )[0]
84
- print(sequences["generated_text"])
85
- output = sequences["generated_text"].split("<ASSISTANT>:")[-1].strip()
86
- # print(f"Response: {output}")
87
- return output
88
 
89
 
90
- gr.ChatInterface(fn=get_model_response).launch()
 
1
+ # import gradio as gr
2
+ # import torch
3
+ # import transformers
4
+ # from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
+ # from peft import PeftConfig, PeftModel
6
+ # import warnings
7
+ # from threading import Thread
8
 
9
+ # warnings.filterwarnings("ignore")
10
 
11
+ # PEFT_MODEL = "givyboy/phi-2-finetuned-mental-health-conversational"
12
 
13
+ # SYSTEM_PROMPT = """Answer the following question truthfully.
14
+ # If you don't know the answer, respond 'Sorry, I don't know the answer to this question.'.
15
+ # If the question is too complex, respond 'Kindly, consult a psychiatrist for further queries.'."""
16
 
17
+ # USER_PROMPT = lambda x: f"""<HUMAN>: {x}\n<ASSISTANT>: """
18
+ # ADD_RESPONSE = lambda x, y: f"""<HUMAN>: {x}\n<ASSISTANT>: {y}"""
19
+ # DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
  # bnb_config = BitsAndBytesConfig(
22
  # load_in_4bit=True,
 
25
  # bnb_4bit_compute_dtype=torch.float16,
26
  # )
27
 
28
+ # config = PeftConfig.from_pretrained(PEFT_MODEL)
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # peft_base_model = AutoModelForCausalLM.from_pretrained(
31
+ # config.base_model_name_or_path,
32
+ # return_dict=True,
33
+ # # quantization_config=bnb_config,
34
+ # device_map="auto",
35
+ # trust_remote_code=True,
36
+ # offload_folder="offload/",
37
+ # offload_state_dict=True,
38
+ # )
39
 
40
+ # peft_model = PeftModel.from_pretrained(
41
+ # peft_base_model,
42
+ # PEFT_MODEL,
43
+ # offload_folder="offload/",
44
+ # offload_state_dict=True,
45
+ # )
46
+ # peft_model = peft_model.to(DEVICE)
47
+
48
+ # peft_tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
49
+ # peft_tokenizer.pad_token = peft_tokenizer.eos_token
50
+
51
+ # pipeline = transformers.pipeline(
52
+ # "text-generation",
53
+ # model=peft_model,
54
+ # tokenizer=peft_tokenizer,
55
+ # torch_dtype=torch.bfloat16,
56
+ # trust_remote_code=True,
57
+ # device_map="auto",
58
+ # )
59
 
 
 
 
 
 
 
 
 
60
 
61
+ # # def format_message(message: str, history: list[str], memory_limit: int = 3) -> str:
62
+ # # if len(history) > memory_limit:
63
+ # # history = history[-memory_limit:]
64
+
65
+ # # if len(history) == 0:
66
+ # # return f"{SYSTEM_PROMPT}\n{USER_PROMPT(message)}"
67
+
68
+ # # formatted_message = f"{SYSTEM_PROMPT}\n{ADD_RESPONSE(history[0][0], history[0][1])}"
69
+
70
+ # # for msg, ans in history[1:]:
71
+ # # formatted_message += f"\n{ADD_RESPONSE(msg, ans)}"
72
+
73
+ # # formatted_message += f"\n{USER_PROMPT(message)}"
74
+ # # return formatted_message
75
+
76
+
77
+ # # def get_model_response(message: str, history: list[str]) -> str:
78
+ # # formatted_message = format_message(message, history)
79
+ # # sequences = pipeline(
80
+ # # formatted_message,
81
+ # # do_sample=True,
82
+ # # top_k=10,
83
+ # # num_return_sequences=1,
84
+ # # eos_token_id=peft_tokenizer.eos_token_id,
85
+ # # max_length=600,
86
+ # # )[0]
87
+ # # print(sequences["generated_text"])
88
+ # # output = sequences["generated_text"].split("<ASSISTANT>:")[-1].strip()
89
+ # # # print(f"Response: {output}")
90
+ # # return output
91
+
92
+
93
+ # start_message = ""
94
+
95
+
96
+ # def user(message, history):
97
+ # # Append the user's message to the conversation history
98
+ # return "", history + [[message, ""]]
99
+
100
+
101
+ # def chat(message, history):
102
+ # chat_history = []
103
+ # for item in history:
104
+ # chat_history.append({"role": "user", "content": item[0]})
105
+ # if item[1] is not None:
106
+ # chat_history.append({"role": "assistant", "content": item[1]})
107
+
108
+ # message = f"{SYSTEM_PROMPT}\n{USER_PROMPT(message)}"
109
+ # chat_history.append({"role": "user", "content": message})
110
+ # messages = peft_tokenizer.apply_chat_template(chat_history, tokenize=False, add_generation_prompt=True)
111
+
112
+ # # Tokenize the messages string
113
+ # model_inputs = peft_tokenizer([messages], return_tensors="pt").to(DEVICE)
114
+ # streamer = transformers.TextIteratorStreamer(
115
+ # peft_tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
116
+ # )
117
+ # generate_kwargs = dict(
118
+ # model_inputs,
119
+ # streamer=streamer,
120
+ # max_new_tokens=1024,
121
+ # do_sample=True,
122
+ # top_p=0.95,
123
+ # top_k=1000,
124
+ # temperature=0.75,
125
+ # num_beams=1,
126
+ # )
127
+ # t = Thread(target=peft_model.generate, kwargs=generate_kwargs)
128
+ # t.start()
129
+
130
+ # # Initialize an empty string to store the generated text
131
+ # partial_text = ""
132
+ # for new_text in streamer:
133
+ # # print(new_text)
134
+ # partial_text += new_text
135
+ # # Yield an empty string to cleanup the message textbox and the updated conversation history
136
+ # yield partial_text
137
+
138
+
139
+ # chat = gr.ChatInterface(fn=chat, title="Mental Health Chatbot - by Jayda Hunte")
140
+ # chat.launch(share=True)
141
+
142
+ import os
143
+ from openai import OpenAI
144
+ from dotenv import load_dotenv
145
+ import gradio as gr
146
 
147
+ load_dotenv()
148
+ API_KEY = os.getenv("OPENAI_API_KEY")
149
+ openai = OpenAI(api_key=API_KEY)
150
 
151
+ create_msg = lambda x, y: {"role": x, "content": y}
 
152
 
153
+ SYSTEM_PROMPT = create_msg(
154
+ "system",
155
+ """You are a helpful mental health chatbot, please answer with care. If you don't know the answer, respond 'Sorry, I don't know the answer to this question.'. If the question is too complex, respond 'Kindly, consult a psychiatrist for further queries.'.""".strip(),
156
+ )
157
 
 
 
158
 
159
+ def predict(message, history):
160
+ history_openai_format = []
161
+ history_openai_format.append(SYSTEM_PROMPT)
162
+ for human, assistant in history:
163
+ history_openai_format.append({"role": "user", "content": human})
164
+ history_openai_format.append({"role": "assistant", "content": assistant})
165
+ history_openai_format.append({"role": "user", "content": message})
166
 
167
+ response = openai.chat.completions.create(
168
+ model="ft:gpt-3.5-turbo-0613:personal::8kBTG8eh", messages=history_openai_format, temperature=1.0, stream=True
169
+ )
170
 
171
+ partial_message = ""
172
+ for chunk in response:
173
+ if chunk.choices[0].delta.content is not None:
174
+ partial_message = partial_message + chunk.choices[0].delta.content
175
+ yield partial_message
 
 
 
 
 
 
 
 
 
176
 
177
 
178
+ gr.ChatInterface(fn=predict, title="Mental Health Chatbot - by Jayda Hunte").launch(share=True)