hruday96 commited on
Commit
8fceb5b
1 Parent(s): ed1f063

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -7
app.py CHANGED
@@ -1,19 +1,44 @@
1
  import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # App header
5
- st.header("Know Your Medicine")
6
 
7
  # Load the model and tokenizer
8
  @st.cache_resource
9
- def load_model():
10
- model_name = "meta-llama/Llama-3.2-1B" # Replace with correct model ID from Hugging Face if needed
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
  return model, tokenizer
14
 
15
- model, tokenizer = load_model()
 
16
 
17
- st.write("LLaMA 3.2 model integrated!")
 
18
 
19
- # You can now add future steps for input and interaction later
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  # App header
5
+ st.header("Know Your Medicine - Multiplication Table Generator")
6
 
7
  # Load the model and tokenizer
8
  @st.cache_resource
9
+ def load_model_direct():
10
+ model_name = "meta-llama/Llama-3.2-1B"
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
  return model, tokenizer
14
 
15
+ # Load the model
16
+ model, tokenizer = load_model_direct()
17
 
18
+ # Input for the number to generate the multiplication table
19
+ number = st.number_input("Enter a number:", min_value=1, max_value=100, value=5)
20
 
21
+ # Define the prompt for generating the multiplication table
22
+ prompt = f"Give me the multiplication table of {number} up to 12."
23
+
24
+ # Generate text based on the input
25
+ if st.button("Generate Multiplication Table"):
26
+ # Tokenize the input prompt
27
+ tokenized_input = tokenizer(prompt, return_tensors="pt")
28
+ input_ids = tokenized_input["input_ids"].cuda() # If running on GPU
29
+ attention_mask = tokenized_input["attention_mask"].cuda() # If running on GPU
30
+
31
+ # Generate the response from the model
32
+ response_token_ids = model.generate(
33
+ input_ids,
34
+ attention_mask=attention_mask,
35
+ max_new_tokens=150,
36
+ pad_token_id=tokenizer.eos_token_id
37
+ )
38
+
39
+ # Decode the generated tokens to text
40
+ generated_text = tokenizer.decode(response_token_ids[0], skip_special_tokens=True)
41
+
42
+ # Display the generated multiplication table
43
+ st.write("Generated Multiplication Table:")
44
+ st.write(generated_text)