Robert Castagna commited on
Commit
185e83d
·
1 Parent(s): 5950d94

input context and prompt

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -11,14 +11,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
11
 
12
  pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
13
 
14
- input_text = st.text_input(label='press generate')
 
15
 
16
  messages = [
17
  {
18
  "role": "system",
19
- "content": "You are a friendly chatbot who always responds in the style of a pirate",
20
  },
21
- {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
22
  ]
23
 
24
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
11
 
12
  pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
13
 
14
+ input_text = st.text_input(label='prompt:')
15
+ context = st.text_input(label='how do you want me to answer the question? ie. respond as if you are explaining to a child')
16
 
17
  messages = [
18
  {
19
  "role": "system",
20
+ "content": f"{context}",
21
  },
22
+ {"role": "user", "content": f"{input_text}"},
23
  ]
24
 
25
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)