GlastonR commited on
Commit
f50d361
·
verified ·
1 Parent(s): fb28713

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -37
app.py CHANGED
@@ -1,41 +1,17 @@
1
  import streamlit as st
2
- import openai
3
- import os
4
 
5
- # Set up the OpenAI API key from Hugging Face Secrets
6
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
7
 
 
 
 
8
 
9
- # Define a function to generate text using GPT-3.5-Turbo
10
- def generate_response(prompt):
11
- response = openai.ChatCompletion.create(
12
- model="gpt-3.5-turbo",
13
- messages=[{"role": "user", "content": prompt}],
14
- max_tokens=150,
15
- temperature=0.7
16
- )
17
- message = response['choices'][0]['message']['content']
18
- return message
19
-
20
- # Streamlit app
21
- def main():
22
- st.title("GPT-3.5 Turbo Text Generator")
23
- st.write("Enter a prompt, and GPT-3.5 Turbo will generate a response.")
24
-
25
- # Text input for user prompt
26
- user_prompt = st.text_area("Enter your prompt:", "")
27
-
28
- if st.button("Generate Response"):
29
- if user_prompt:
30
- # Get the response from GPT-3.5 Turbo
31
- with st.spinner("Generating response..."):
32
- response = generate_response(user_prompt)
33
-
34
- # Display the response
35
- st.write("### GPT-3.5 Turbo Response")
36
- st.write(response)
37
- else:
38
- st.warning("Please enter a prompt to generate a response.")
39
-
40
- if __name__ == "__main__":
41
- main()
 
1
  import streamlit as st
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "flax-community/t5-recipe-generation"
6
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
+ # Streamlit app for recipe generation
10
+ st.title("AI-Powered Recipe Generator")
11
+ prompt = st.text_input("Enter ingredients or a recipe title:")
12
 
13
+ if st.button("Generate Recipe"):
14
+ inputs = tokenizer(prompt, return_tensors="pt")
15
+ outputs = model.generate(inputs["input_ids"], max_length=150, num_return_sequences=1)
16
+ recipe = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
+ st.write(recipe)