Wootang01 commited on
Commit
193579c
1 Parent(s): 639ffbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -3
app.py CHANGED
@@ -1,6 +1,8 @@
1
  #import libraries and dependencies
2
- import gradio as gr
3
  #from gradio.mix import Parallel
 
 
 
4
  from transformers import pipeline
5
 
6
  #instantiate variables as strings
@@ -18,7 +20,9 @@ from transformers import pipeline
18
  #]
19
 
20
  #instantiate variables as functions
21
- pipe = pipeline("text-generation", model='EleutherAI/gpt-neo-2.7B', trust_remote_code=True)
 
 
22
 
23
  #model1 = gr.Interface.load("huggingface/bigscience/bloom-560m")
24
  #model2 = gr.Interface.load("huggingface/google/flan-t5-xl")
@@ -27,7 +31,13 @@ pipe = pipeline("text-generation", model='EleutherAI/gpt-neo-2.7B', trust_remote
27
 
28
  #togethercomputer/GPT-NeoXT-Chat-Base-20B
29
  #decapoda-research/llama-7b-hf
 
30
  #define functions
 
 
 
 
 
31
  #def complete_with_gpt(text):
32
  # # Use the last 50 characters of the text as context
33
  # return text[:-50] + model4(text[-50:])
@@ -48,5 +58,7 @@ pipe = pipeline("text-generation", model='EleutherAI/gpt-neo-2.7B', trust_remote
48
 
49
  #if __name__ == "__main__":
50
  # demo1.launch(debug=True)
 
51
 
52
- gr.Interface.from_pipeline(pipe).launch()
 
 
1
  #import libraries and dependencies
 
2
  #from gradio.mix import Parallel
3
+
4
+ import gradio as gr
5
+ import torch
6
  from transformers import pipeline
7
 
8
  #instantiate variables as strings
 
20
  #]
21
 
22
  #instantiate variables as functions
23
+ #pipe = pipeline("text-generation", model='EleutherAI/gpt-neo-2.7B', trust_remote_code=True)
24
+
25
+ ans = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
26
 
27
  #model1 = gr.Interface.load("huggingface/bigscience/bloom-560m")
28
  #model2 = gr.Interface.load("huggingface/google/flan-t5-xl")
 
31
 
32
  #togethercomputer/GPT-NeoXT-Chat-Base-20B
33
  #decapoda-research/llama-7b-hf
34
+
35
  #define functions
36
+
37
+ def answer(query):
38
+ out=ans(query)
39
+ return out
40
+
41
  #def complete_with_gpt(text):
42
  # # Use the last 50 characters of the text as context
43
  # return text[:-50] + model4(text[-50:])
 
58
 
59
  #if __name__ == "__main__":
60
  # demo1.launch(debug=True)
61
+ #gr.Interface.from_pipeline(pipe).launch()
62
 
63
+ Demo = gr.Interface(fn=answer,inputs='text',outputs='text',examples=[['What is the capital of India ?']])
64
+ Demo.launch()