FFatih commited on
Commit
8b4ba44
·
0 Parent(s):

Synchronisation Production

Browse files
Files changed (3) hide show
  1. README.md +37 -0
  2. app.py +98 -0
  3. requirements.txt +1 -0
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SAE-GPT2
3
+ emoji: ❤️
4
+ colorFrom: indigo
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.50.2
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ <hr/>
13
+
14
+ <h4> Environnement de développement commun </h4>
15
+
16
+ <br/>
17
+
18
+ | Nom | Lien |
19
+ |------------|-------------------------------------------------------|
20
+ | Production | https://huggingface.co/spaces/FFatih/SAE-GPT2-PROD |
21
+ | Recette | https://huggingface.co/spaces/FFatih/SAE-GPT2-RECETTE |
22
+
23
+ <hr/>
24
+
25
+ <h4> Environnement de développement personnel </h4>
26
+
27
+ <br/>
28
+
29
+ | Prenom | Lien |
30
+ |-------------------|------------------------------------------------------------|
31
+ | Fatih | https://huggingface.co/spaces/FFatih/SAE-GPT2-FATIH |
32
+ | Bastien | https://huggingface.co/spaces/BastienHot/SAE-GPT2-BASTIEN |
33
+ | Pascal | https://huggingface.co/spaces/PascalZhan/SAE-GPT2-PASCAL |
34
+ | Tamij | https://huggingface.co/spaces/Tamij/SAE-GPT2-TAMIJ |
35
+ | Kevin | https://huggingface.co/spaces/Kemasu/SAE-GPT2-KEVIN |
36
+ | Lilian | https://huggingface.co/spaces/Solialiranes/SAE-GPT2-LILIAN |
37
+ | Evan | https://huggingface.co/spaces/Evanparis240/SAE-GPT2-EVAN |
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bastien
2
+ # Date: 2/25/2024
3
+ # Project: SAE-GPT2 | BUT 3 Informatique - Semester 5
4
+
5
+ # Import of required libraries
6
+ import os
7
+
8
+ os.system("pip install --upgrade pip")
9
+ os.system("pip install googletrans-py")
10
+ os.system("pip install tensorflow==2.15.0")
11
+ os.system("pip install keras-nlp")
12
+ os.system("pip install -q --upgrade keras") # Upgrade Keras to version 3
13
+
14
+ import time
15
+ import keras
16
+ import keras_nlp
17
+ import pandas as pd
18
+ import gradio as gr
19
+ from googletrans import Translator
20
+
21
+ # Set Keras Backend to Tensorflow
22
+ os.environ["KERAS_BACKEND"] = "tensorflow"
23
+
24
+ # Load the fine-tuned model
25
+ model = keras.models.load_model("LoRA_Model_V2.keras")
26
+
27
+ translator = Translator() # Create Translator Instance
28
+
29
+ # Function to generate responses from the model
30
+ def generate_responses(question):
31
+ language = translator.detect(question).lang.upper() # Verify the language of the prompt
32
+
33
+ if language != "EN":
34
+ question = translator.translate(question, src=language, dest="en").text # Translation of user text to english for the model
35
+
36
+ prompt = f"Patient: \"{question}\"\nDoctor:"
37
+ # Generate the answer from the model and then clean and extract the real model's response from the prompt engineered string
38
+ output = clean_answer_text(model.generate(prompt, max_length=1024))
39
+
40
+ if language != "EN":
41
+ output = Translator().translate(output, src="en", dest=language).text # Translation of model's text to user's language
42
+
43
+ return output
44
+
45
+
46
+ # Function clean the output of the model from the prompt engineering done in the "generate_responses" function
47
+ def clean_answer_text(text: str) -> str:
48
+ # Define the start marker for the model's response
49
+ doctor_response_start = text.find("Doctor:") + len("Doctor:")
50
+
51
+ # Extract everything after "Doctor:"
52
+ response_text = text[doctor_response_start:].strip()
53
+
54
+ # If there's a follow-up "Patient:" in the response, cut the response there
55
+ follow_up_index = response_text.find("\nPatient:")
56
+ if follow_up_index != -1:
57
+ response_text = response_text[:follow_up_index]
58
+
59
+ # If there's no follow-up "Patient:", cut the response to the last dot (.)
60
+ else:
61
+ last_dot_index = response_text.rfind(".")
62
+ if last_dot_index != -1:
63
+ response_text = response_text[:last_dot_index + 1]
64
+
65
+ # Additional cleaning if necessary (e.g., removing leading/trailing spaces or new lines)
66
+ response_text = response_text.strip()
67
+
68
+ response_text = response_text.replace("Doctor: ","")
69
+
70
+ return response_text
71
+
72
+ # Initialize an empty DataFrame to keep track of question-answer history
73
+ history_df = pd.DataFrame(columns=["Question", "Réponse"])
74
+
75
+ # Define a Gradio interface
76
+ def chat_interface(question):
77
+ global history_df
78
+ response = generate_responses(question)
79
+ # Insert the new question and response at the beginning of the DataFrame
80
+ history_df = pd.concat([pd.DataFrame({"Question": [question], "Réponse": [response]}), history_df], ignore_index=True)
81
+ return response, history_df
82
+
83
+ with gr.Blocks() as demo:
84
+ gr.HTML("""
85
+ <div style='width: 100%; height: 200px; background: url("https://github.com/BastienHot/SAE-GPT2/raw/70fb88500a2cc168d71e8ed635fc54492beb6241/image/logo.png") no-repeat center center; background-size: contain;'>
86
+ <h1 style='text-align:center; width=100%'>DracolIA - AI Question Answering for Healthcare</h1>
87
+ </div>
88
+ """)
89
+ with gr.Row():
90
+ question = gr.Textbox(label="Votre Question", placeholder="Saisissez ici...")
91
+ submit_btn = gr.Button("Envoyer")
92
+ response = gr.Textbox(label="Réponse", interactive=False)
93
+ history_display = gr.Dataframe(headers=["Question", "Réponse"], values=[])
94
+
95
+ submit_btn.click(fn=chat_interface, inputs=question, outputs=[response, history_display])
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio