RamboRogers commited on
Commit
adc37b6
1 Parent(s): 0ba73f9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +39 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Load model directly
3
+ from transformers import pipeline
4
+ import gradio as gr
5
+ import torch
6
+
7
+ # Check if CUDA is available, otherwise use CPU
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
+ pipe = pipeline("text-generation", model="microsoft/BioGPT-Large", device=device)
11
+
12
+ def question(message, history):
13
+
14
+
15
+ # Generate the response
16
+ response = pipe(message, max_length=200)[0]['generated_text']
17
+
18
+ return response
19
+
20
+ #Description in Markdown
21
+ description = """
22
+ # Summary
23
+ This chat directly pipes into this BioGPT Large LLM. This LLM outputs some strange things and can be found here: [Microsoft BioGPT Large](https://huggingface.co/microsoft/BioGPT-Large). To use this LLM and derive any value, think of it as a neural network trying to complete a problem. See the examples for ideas.
24
+
25
+ ### Examples
26
+ * HIV is
27
+ * Foot Fungus causes
28
+ * Symptoms of liver failure are
29
+
30
+ ### Good Luck! 🍀
31
+ Coded 🧾 by [Matthew Rogers](https://matthewrogers.org) | [RamboRogers](https://github.com/ramboRogers)
32
+ """
33
+
34
+
35
+
36
+ program = gr.ChatInterface(question,description=description,title="Microsoft BioGPT Large Chat")
37
+
38
+ if __name__ == "__main__":
39
+ program.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ sacremoses
2
+ transformers
3
+ gradio
4
+ torch