hruday96 commited on
Commit
eff09be
1 Parent(s): 4230190

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -40
app.py CHANGED
@@ -2,43 +2,56 @@ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # App header
5
- st.header("Know Your Medicine - Multiplication Table Generator")
6
-
7
- # Load the model and tokenizer
8
- @st.cache_resource
9
- def load_model():
10
- model_name = "gpt2" # Using GPT-2 for faster builds
11
- tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
- return model, tokenizer
14
-
15
- # Load the model
16
- model, tokenizer = load_model()
17
-
18
- # Input for the number to generate the multiplication table
19
- number = st.number_input("Enter a number:", min_value=1, max_value=100, value=5)
20
-
21
- # Define the prompt for generating the multiplication table
22
- prompt = f"Give me the multiplication table of {number} up to 12."
23
-
24
- # Generate text based on the input
25
- if st.button("Generate Multiplication Table"):
26
- # Tokenize the input prompt
27
- tokenized_input = tokenizer(prompt, return_tensors="pt")
28
- input_ids = tokenized_input["input_ids"] # Using CPU for simplicity
29
- attention_mask = tokenized_input["attention_mask"] # Using CPU for simplicity
30
-
31
- # Generate the response from the model
32
- response_token_ids = model.generate(
33
- input_ids,
34
- attention_mask=attention_mask,
35
- max_new_tokens=150,
36
- pad_token_id=tokenizer.eos_token_id
37
- )
38
-
39
- # Decode the generated tokens to text
40
- generated_text = tokenizer.decode(response_token_ids[0], skip_special_tokens=True)
41
-
42
- # Display the generated multiplication table
43
- st.write("Generated Multiplication Table:")
44
- st.write(generated_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # App header
5
+ st.header("Know Your Medicine")
6
+
7
+ # Retrieve the API key from Streamlit secrets
8
+ GOOGLE_API_KEY = st.secrets["GEMINI_API_KEY"]
9
+
10
+ # Configure the Google Generative AI API with your API key
11
+ genai.configure(api_key=GOOGLE_API_KEY)
12
+
13
+ # Input field for the medicine name
14
+ st.subheader("Enter Medicine Details:")
15
+ medicine_name = st.text_input('Medicine Name', '')
16
+
17
+ # Create the prompt based on user input
18
+ if medicine_name:
19
+ prompt = f"""
20
+ Analyze the following details:
21
+ 1. Write the medicine name and purpose of the medicine.
22
+ 2. Write down the symptoms for which this medicine should be used.
23
+ 3. List the possible side effects of the medicine.
24
+ 4. Mention any common drug interactions or contraindications.
25
+ 5. Provide common brand names or generic alternatives, if available.
26
+ 6. Mention any specific precautions (e.g., avoid alcohol, potential allergies).
27
+
28
+ Medicine Name = {medicine_name}
29
+ """
30
+
31
+ # Button to submit the prompt
32
+ if st.button("Generate"):
33
+ if medicine_name: # Ensure the medicine name is entered
34
+ try:
35
+ # Initialize the generative model (adjust model name if needed)
36
+ model = genai.GenerativeModel('gemini-pro') # Ensure this is the correct model name
37
+
38
+ # Generate content based on the prompt
39
+ response = model.generate_content(prompt)
40
+
41
+ # Check if there is a response from the model
42
+ if response:
43
+ st.subheader("Generated Medicine Analysis:")
44
+ st.write(response.text) # Display the generated response
45
+ else:
46
+ st.error("Error: Unable to generate the analysis.")
47
+
48
+ except Exception as e:
49
+ st.error(f"Error: {e}")
50
+ else:
51
+ st.error("Please enter a medicine name to generate the analysis.")
52
+
53
+ # Add space or content at the bottom
54
+ st.write("\n" * 20) # Adds space to push the content down
55
+
56
+ # Footer
57
+ st.markdown("Built with 🧠 by Hruday & Google Gemini")