vasilisklv commited on
Commit
dcabda1
1 Parent(s): 0b1267e

Update app.py

Browse files

now create images too

Files changed (1) hide show
  1. app.py +139 -28
app.py CHANGED
@@ -1,17 +1,34 @@
1
  from openai import OpenAI
2
  import gradio as gr
3
- import os
 
 
 
 
 
4
 
5
 
6
- # create model for story creation
 
 
 
 
7
  client = OpenAI(
8
  base_url="https://api-inference.huggingface.co/v1/",
9
- api_key=os.environ["genai_stories"]
10
  )
11
-
12
  # the model to utilize
13
  model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
14
 
 
 
 
 
 
 
 
 
 
15
 
16
  #------------------------------------------------------------------------------------------------
17
 
@@ -22,19 +39,76 @@ def chat_with_llm(model, messages):
22
  model=model,
23
  messages=messages,
24
  max_tokens=2048,
25
- temperature=0)
 
 
26
  return completion.choices[0].message.content
27
 
28
 
29
  #------------------------------------------------------------------------------------------------
30
 
31
 
32
- # Initialize the story
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  def generate_story(story_type):
34
  global model, messages
35
  messages = []
36
- messages.append({"role": "system", "content": f"You are a story creator for kids. The player is a kid, so you must use simple words. The player selects this type of story: {story_type}, according to which you will be asked to create a story. Keep the plot interesting and concise, and write the story as if you talk to children, with simple words. If you are asked to create a story, a question and the answers, then you must create these and only these. Do not create anything else. If you are asked to create the ending of the story, then you must create this and only this. Do not create anything else. "})
37
- messages.append({"role": "user", "content": f"You must create and return ONLY a story part, a question and 4 answers. To do that, you must explicitly follow these steps: 1) Create the first part of the story, according to the story_type from your system content, within 50 and 60 words, without switching to new line. Then you must switch line by adding this: '\n\n'. 2) Then create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. 3) Then create the 4 potential answers for the question in step 2. The answers of every question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Don't explicitly specify 'Story', 'Question', or 'Answer'. You must reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. Do not create any other stuff."})
38
  messages = [messages[0]] + [messages[-1]]
39
  response = chat_with_llm(model, messages)
40
  lines = response.split("\n\n")
@@ -42,13 +116,17 @@ def generate_story(story_type):
42
  question = lines[1] # The second to last line is the question
43
  answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
44
  messages.append({"role": "assistant", "content": "I am waiting for next command."})
45
- return story, question, gr.Radio(choices=answers, interactive=True), gr.Radio(choices=[story_type], interactive=False), gr.Button(interactive=False)
 
 
 
 
46
 
47
 
48
  # Continue the story based on what happened so far and the player's latest answer
49
- def continue_story(previous_story, selected_option):
50
  global model, messages
51
- messages.append({"role": "user", "content": f"You must create and return a story part, a question and 4 answers. To do that, you must explicitly follow these steps: 1) Based on this story so far: '{previous_story} {selected_option}', continue the story and create the next part of the story within 50 and 60 words, without changing lines. You must provide ONLY the new part that you created. Then you must switch line by adding this: '\n\n'. 2) Then create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. 3) Then create the 4 potential answers for the question in step 2. The answers of every question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|' and no other separator. Don't explicitly specify 'Story', 'Question', or 'Answer'. You must reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. Do not create any other stuff."})
52
  messages = [messages[0]] + [messages[-1]]
53
  response = chat_with_llm(model, messages)
54
  lines = response.split("\n\n")
@@ -57,19 +135,46 @@ def continue_story(previous_story, selected_option):
57
  answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
58
  messages.append({"role": "assistant", "content": "I am waiting for next command."})
59
  story = previous_story + '\n\n' + next_story
60
- return story, question, gr.Radio(choices=answers, interactive=True)
61
-
62
-
63
- # End the story
64
- def end_story(previous_story, selected_option):
 
 
 
 
 
 
 
 
65
  global model, messages
66
- messages.append({"role": "user", "content": f"Please create an ending for this story: '{previous_story}' and considering the latest answer: '{selected_option}'. You must provide only the ending of the story in an exciting way!."})
67
  end_story = chat_with_llm(model, messages)
68
  # lines = response.split("\n\n")
69
  # end_story = lines[0] # Everything before the last two lines is the story
70
  messages.append({"role": "assistant", "content": "I ended the story successfully. Now I am not waiting for any more responses from the player."})
71
  story = previous_story + '\n\n' + end_story
72
- return story
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
 
75
  # initialize messages
@@ -82,14 +187,16 @@ with gr.Blocks() as game_ui:
82
  # Left column for the story
83
  with gr.Column(scale=1):
84
  story = gr.Textbox(label="Story", interactive=False, lines=12)
 
85
 
86
  # Right column for the question, answers, and buttons
87
  with gr.Column(scale=1):
88
- story_type = gr.Radio(label="What story to create?", choices=["A knight who leads an army attacking a castle",
89
- "A kid alien who lands on earth and explores around",
90
- "Animals who participate in song contest",
91
- "A little princess who is saved by a prince"])
92
-
 
93
  start_button = gr.Button("Start Game!")
94
 
95
  question = gr.Textbox(label="Question", interactive=False)
@@ -98,11 +205,15 @@ with gr.Blocks() as game_ui:
98
  submit_button = gr.Button("Submit your answer")
99
  end_button = gr.Button("End the story")
100
 
101
- # what the button do
102
- start_button.click(fn=generate_story, inputs=[story_type], outputs=[story, question, answers, story_type, start_button])
103
- submit_button.click(fn=continue_story, inputs=[story, answers], outputs=[story, question, answers])
104
- end_button.click(fn=end_story, inputs=[story, answers], outputs=[story])
 
 
 
105
 
106
 
107
  # Launch the Gradio interface
108
- game_ui.launch(share=True)
 
 
1
  from openai import OpenAI
2
  import gradio as gr
3
+ import requests
4
+ import matplotlib.pyplot as plt
5
+ import io
6
+ from PIL import Image, UnidentifiedImageError
7
+ import random
8
+ import time
9
 
10
 
11
+ #------------------------------------------------------------------------------------------------
12
+
13
+
14
+ api_key = os.environ["genai_stories"]
15
+ # define LLM model for story creation, with OpenAI's format
16
  client = OpenAI(
17
  base_url="https://api-inference.huggingface.co/v1/",
18
+ api_key=api_key
19
  )
 
20
  # the model to utilize
21
  model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
22
 
23
+ # define model for image creation
24
+ # API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
25
+ # API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
26
+ # API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
27
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
28
+
29
+ bearer_key = "Bearer " + api_key
30
+ headers = {"Authorization": bearer_key}
31
+
32
 
33
  #------------------------------------------------------------------------------------------------
34
 
 
39
  model=model,
40
  messages=messages,
41
  max_tokens=2048,
42
+ temperature=0.7,
43
+ top_p=0.98,
44
+ seed=random.randint(0, 100))
45
  return completion.choices[0].message.content
46
 
47
 
48
  #------------------------------------------------------------------------------------------------
49
 
50
 
51
+ # connect with image model and create images
52
+ def request_to_image_model(story):
53
+ response = requests.post(API_URL, headers=headers, json=story)
54
+ return response.content
55
+
56
+ # create the final image, adjusting the model's hyperparameters and converting to RGB
57
+ def create_image(prompt):
58
+ try:
59
+ image_bytes = request_to_image_model({
60
+ "inputs": prompt,
61
+ "parameters": {
62
+ # "num_inference_steps": 50, # Optional: controls generation quality
63
+ # "guidance_scale": 7.5, # Optional: controls adherence to the prompt
64
+ "seed": random.randint(0, 100) # Set the seed to a random number
65
+ }
66
+ })
67
+ except (UnidentifiedImageError) as e:
68
+ time.sleep(60)
69
+ image_bytes = request_to_image_model({
70
+ "inputs": prompt,
71
+ "parameters": {
72
+ # "num_inference_steps": 50, # Optional: controls generation quality
73
+ # "guidance_scale": 7.5, # Optional: controls adherence to the prompt
74
+ "seed": random.randint(0, 100) # Set the seed to a random number
75
+ }
76
+ })
77
+
78
+ image = Image.open(io.BytesIO(image_bytes))
79
+ # Convert the image to RGB if it's not
80
+ if image.mode != 'RGB':
81
+ image = image.convert('RGB')
82
+
83
+ return image
84
+
85
+
86
+ #------------------------------------------------------------------------------------------------
87
+
88
+
89
+ # create a prompt for the image model based on the character
90
+ def create_character_prompt(story_type):
91
+ main_character = story_type.split(' ')[2]
92
+ if main_character == 'king':
93
+ character_prompt = " The " + main_character + " has a strong jawline and piercing eyes, dark hair, shining silver armor with simple decorations, red cape falling behind him, large sword."
94
+ elif main_character == 'alien':
95
+ character_prompt = " The " + main_character + " has a round, shiny head, big eyes glow bright blue, small and green with tiny arms, silver spacesuit covers its body."
96
+ elif main_character == 'rabbit':
97
+ character_prompt = " The " + main_character + " has soft white fur, big ears stand up tall, wears a tiny red bow tie, fluffy tail bounces as it hops."
98
+ elif main_character == 'princess':
99
+ character_prompt = " The " + main_character + " has long brown hair, sparkling blue eyes shine brightly, wears a flowing pink gown, small jeweled crown rests on her head."
100
+ return character_prompt
101
+
102
+
103
+ #------------------------------------------------------------------------------------------------
104
+
105
+
106
+ # Initialize the story based on the story_type selected by the player
107
  def generate_story(story_type):
108
  global model, messages
109
  messages = []
110
+ messages.append({"role": "system", "content": f"You are a structured storytelling assistant. Your purpose is to generate stories, questions, and answers in a specific format. Always adhere strictly to the rules provided by the user. This is the topic that you must create a story about: story_type = {story_type}"})
111
+ messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Create the initial part of the story, according to the story_type from your system content, within 50 and 60 words, without switching to new line. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between ther story part and question and between the question and the answers."})
112
  messages = [messages[0]] + [messages[-1]]
113
  response = chat_with_llm(model, messages)
114
  lines = response.split("\n\n")
 
116
  question = lines[1] # The second to last line is the question
117
  answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
118
  messages.append({"role": "assistant", "content": "I am waiting for next command."})
119
+ # main_character = story_type.split(' ')[2]
120
+ character_prompt = create_character_prompt(story_type)
121
+ image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story + character_prompt
122
+ image = create_image(image_prompt)
123
+ return story, question, gr.Radio(choices=answers, interactive=True), gr.Radio(choices=[story_type], interactive=False), gr.Button(interactive=False), image
124
 
125
 
126
  # Continue the story based on what happened so far and the player's latest answer
127
+ def continue_story(previous_story, selected_option, story_type):
128
  global model, messages
129
+ messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Based on this story so far: '{previous_story} {selected_option}', continue the story and create the next part of the story within 50 and 60 words, without changing lines. You must provide ONLY the new part that you created. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between story part and question that you generate. 5) Always change lines with '\n\n' between question and the answers that you generate"})
130
  messages = [messages[0]] + [messages[-1]]
131
  response = chat_with_llm(model, messages)
132
  lines = response.split("\n\n")
 
135
  answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
136
  messages.append({"role": "assistant", "content": "I am waiting for next command."})
137
  story = previous_story + '\n\n' + next_story
138
+ # image_prompt = "Cartoon image of: " + next_story + ". The image has bright colors and simple shapes, intended for children story."
139
+ main_character = story_type.split(' ')[2].lower()
140
+ character_prompt = create_character_prompt(story_type)
141
+ if main_character in next_story.lower():
142
+ image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story + character_prompt
143
+ else:
144
+ image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story
145
+ image = create_image(image_prompt)
146
+ return story, question, gr.Radio(choices=answers, interactive=True), image
147
+
148
+
149
+ # End the story based on what happened so far and the player's latest answer
150
+ def end_story(previous_story, selected_option, story_type):
151
  global model, messages
152
+ messages.append({"role": "user", "content": f"You must create an ending for this story: '{previous_story}' and also considering the latest answer: '{selected_option}'. You must provide only the ending of the story in an exciting way. Do not return any other stuff in your response."})
153
  end_story = chat_with_llm(model, messages)
154
  # lines = response.split("\n\n")
155
  # end_story = lines[0] # Everything before the last two lines is the story
156
  messages.append({"role": "assistant", "content": "I ended the story successfully. Now I am not waiting for any more responses from the player."})
157
  story = previous_story + '\n\n' + end_story
158
+ # image_prompt = "Cartoon image of " + end_story + ". The image has bright colors and simple shapes, intended for children story."
159
+ main_character = story_type.split(' ')[2].lower()
160
+ character_prompt = create_character_prompt(story_type)
161
+ if main_character in end_story.lower():
162
+ image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story + character_prompt
163
+ else:
164
+ image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story
165
+ image = create_image(image_prompt)
166
+ return story, image
167
+
168
+
169
+ # Function to handle resetting the app
170
+ # def reset_app():
171
+ # # Return empty values for all outputs
172
+ # # global messages
173
+ # # messages = []
174
+ # return "", "", "", gr.Radio(choices=["A knight who leads an army attacking a castle",
175
+ # "A kid alien who lands on earth and explores around",
176
+ # "Animals who participate in song contest",
177
+ # "A little princess who is saved by a prince"], interactive=True), gr.Button(interactive=True)
178
 
179
 
180
  # initialize messages
 
187
  # Left column for the story
188
  with gr.Column(scale=1):
189
  story = gr.Textbox(label="Story", interactive=False, lines=12)
190
+ story_image = gr.Image() # height=256, width=256
191
 
192
  # Right column for the question, answers, and buttons
193
  with gr.Column(scale=1):
194
+ story_type = gr.Radio(label="What story to create?", choices=["A fearless king who leads an army attacking a castle.",
195
+ "A kid alien who lands on earth and explores around.",
196
+ "A joyful rabbit who participates in song contest.",
197
+ "A beautiful princess who tries to find her way home."
198
+ ])
199
+
200
  start_button = gr.Button("Start Game!")
201
 
202
  question = gr.Textbox(label="Question", interactive=False)
 
205
  submit_button = gr.Button("Submit your answer")
206
  end_button = gr.Button("End the story")
207
 
208
+ # reset_button = gr.Button("Reset and play again")
209
+
210
+ # what the buttons do
211
+ start_button.click(fn=generate_story, inputs=[story_type], outputs=[story, question, answers, story_type, start_button, story_image])
212
+ submit_button.click(fn=continue_story, inputs=[story, answers, story_type], outputs=[story, question, answers, story_image])
213
+ end_button.click(fn=end_story, inputs=[story, answers, story_type], outputs=[story, story_image])
214
+ # reset_button.click(fn=reset_app, inputs=[], outputs=[story, question, answers, story_type, start_button])
215
 
216
 
217
  # Launch the Gradio interface
218
+ game_ui.launch()
219
+ # game_ui.launch(share=True)