import streamlit as st from llama_cpp import Llama # Load the model llm = Llama.from_pretrained( repo_id="DavidAU/Command-R-01-200xq-Ultra-NEO-V1-35B-IMATRIX-GGUF", filename="CommandR-35B-NEO-V1-D_AU-IQ3_XS-0200xq-imat13.gguf", ) # Function to generate story (model) def generate_story(user_input): response = llm.create_chat_completion( messages=[ { "role": "user", "content": user_input } ] ) return response['choices'][0]['message']['content'] # Start storytelling def start_storytelling(): print("Welcome to the Storytelling bot.") story_prompt = st.text_input("Please give a prompt for the story.") story_part = generate_story(story_prompt) st.write("\nHere's the beginning of your story:") st.write(story_part) # Continuation of story (generate part two) while True: continue_story = st.text_input("\nWould you like to continue with this story?") if continue_story.lower() == 'yes': user_input = st.text_input("\nWhat should happen next?") story_part = generate_story(user_input) st.write("\nContinued:") st.write(story_part) else: st.write("\nEnjoy!") break # Function start start_storytelling()