Kush1 commited on
Commit
1c1b7ff
·
1 Parent(s): fd53629

Initial Commit

Browse files
Files changed (2) hide show
  1. app.py +36 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ import transformers
3
+ import torch
4
+ import streamlit as st
5
+
6
+ prompt = st.text_input('Prompt', 'Hello, How you doing ?')
7
+
8
+ model = "meta-llama/Llama-2-13b-chat-hf"
9
+
10
+ #Not Working
11
+ #tokenizer = AutoTokenizer.from_pretrained(model)
12
+
13
+
14
+ pipeline = transformers.pipeline(
15
+ "text-generation",
16
+ model=model,
17
+ torch_dtype=torch.float16,
18
+ device_map="auto",
19
+ do_sample=True,
20
+ )
21
+
22
+ def get_llama_response(prompt):
23
+
24
+ sequences = pipeline(
25
+ prompt,
26
+ do_sample=True,
27
+ top_k=10,
28
+ num_return_sequences=1,
29
+ max_length=256,
30
+ )
31
+ print(sequences[0]['generated_text'])
32
+
33
+ #prompt="Can you help me to write rest api endpoints in python ?"
34
+ response = get_llama_response(prompt)
35
+
36
+ st.write('Answer: ',response)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ torch