import streamlit as st import streamlit.components.v1 as components import os import json import random import base64 import glob import math import openai import pytz import re import requests import textract import time import zipfile import huggingface_hub import dotenv from audio_recorder_streamlit import audio_recorder from bs4 import BeautifulSoup from collections import deque from datetime import datetime from dotenv import load_dotenv from huggingface_hub import InferenceClient from io import BytesIO from openai import ChatCompletion from PyPDF2 import PdfReader from templates import bot_template, css, user_template from xml.etree import ElementTree as ET from PIL import Image from urllib.parse import quote # Ensure this import is included # Set page configuration with a title and favicon st.set_page_config( page_title="๐งโโ๏ธ๐บ๏ธRolePlayAI", page_icon="๐บ๏ธ๐งโโ๏ธ", layout="wide", initial_sidebar_state="expanded", menu_items={ 'Get Help': 'https://huggingface.co/awacke1', 'Report a bug': "https://huggingface.co/spaces/awacke1/WebDataDownload", 'About': "# Midjourney: https://discord.com/channels/@me/997514686608191558" } ) PromptPrefix = 'Create a markdown outline and table with appropriate emojis for roleplay rulesets which define the method steps of play for the topics of ' PromptPrefix2 = 'Create a streamlit python user app. Show full code listing. Create a UI implementing each feature using variables, reusable functions with parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic: ' st.markdown('''### ๐บ๏ธ๐ ๏ธ RolePlayAI''') with st.expander("Help / About ๐", expanded=False): st.markdown(''' - ๐ **Elevate Your Game:** Strategy meets creativity in the card game universe.- ๐ **Adventure Awaits:** Elevate your prowess with an AI that makes vocabulary engaging. - ๐ **Offers:** Craft intricate word lists and embark on captivating vocabulary quests. - ๐งโโ๏ธ **Your Journey:** Become a linguistic mage, mastering language arts. - ๐ฎ **How to Play:** Start your quest with URL challenges, like `?q=Cyberpunk` or `?query=Dungeons and Dragons`. ''') # 9. Sidebar with UI controls to review and re-run prompts and continue responses @st.cache_resource def get_table_download_link(file_path): with open(file_path, 'r') as file: data = file.read() b64 = base64.b64encode(data.encode()).decode() file_name = os.path.basename(file_path) ext = os.path.splitext(file_name)[1] # get the file extension if ext == '.txt': mime_type = 'text/plain' elif ext == '.py': mime_type = 'text/plain' elif ext == '.xlsx': mime_type = 'text/plain' elif ext == '.csv': mime_type = 'text/plain' elif ext == '.htm': mime_type = 'text/html' elif ext == '.md': mime_type = 'text/markdown' elif ext == '.wav': mime_type = 'audio/wav' else: mime_type = 'application/octet-stream' # general binary data type href = f'{file_name}' return href def FileSidebar(): # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ # Compose a file sidebar of markdown md files: all_files = glob.glob("*.md") all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order if st.sidebar.button("๐ Delete All Text"): for file in all_files: os.remove(file) st.experimental_rerun() if st.sidebar.button("โฌ๏ธ Download All"): zip_file = create_zip_of_files(all_files) st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True) file_contents='' next_action='' for file in all_files: col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed with col1: if st.button("๐", key="md_"+file): # md emoji button with open(file, 'r') as f: file_contents = f.read() next_action='md' with col2: st.markdown(get_table_download_link(file), unsafe_allow_html=True) with col3: if st.button("๐", key="open_"+file): # open emoji button with open(file, 'r') as f: file_contents = f.read() next_action='open' with col4: if st.button("๐", key="read_"+file): # search emoji button with open(file, 'r') as f: file_contents = f.read() next_action='search' with col5: if st.button("๐", key="delete_"+file): os.remove(file) st.experimental_rerun() if len(file_contents) > 0: if next_action=='open': file_content_area = st.text_area("File Contents:", file_contents, height=500) if next_action=='md': st.markdown(file_contents) buttonlabel = '๐Run with Llama and GPT.' if st.button(key='RunWithLlamaandGPT', label = buttonlabel): user_prompt = file_contents # Llama versus GPT Battle! all="" try: st.write('๐Running with Llama.') response = StreamLLMChatResponse(file_contents) filename = generate_filename(user_prompt, "md") create_file(filename, file_contents, response, should_save) all=response #SpeechSynthesis(response) except: st.markdown('Llama is sleeping. Restart ETA 30 seconds.') # gpt try: st.write('๐Running with GPT.') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=all+response2 #SpeechSynthesis(response2) except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) if next_action=='search': file_content_area = st.text_area("File Contents:", file_contents, height=500) st.write('๐Running with Llama and GPT.') user_prompt = file_contents # Llama versus GPT Battle! all="" try: st.write('๐Running with Llama.') response = StreamLLMChatResponse(file_contents) filename = generate_filename(user_prompt, ".md") create_file(filename, file_contents, response, should_save) all=response #SpeechSynthesis(response) except: st.markdown('Llama is sleeping. Restart ETA 30 seconds.') # gpt try: st.write('๐Running with GPT.') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=all+response2 #SpeechSynthesis(response2) except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) FileSidebar() # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ # ---- Art Card Sidebar with Random Selection of image------------------------------ # Image Prompt for Midjourney: Living grid shelf of Monsters Miniatures with a grid arranged with set of living diorama with D&D Monsters! 28MM Scale Fantasy MINIS, Painted, Photorealistic 3D Dungeons and Dragons Miniatures. 64 small 28mm minis (1-1.5"); Dragon (~6"), 3 large figures- Sharkenbear, Lamia, Arch Devi, Dungeons & Dragons book covers: Men & Magic, Monsters & Treasure, Underworld & Wilderness Adventures, Greyhawk, Blackmoor, Eldritch Wizardry, Gods, Demi-Gods & Heroes, Swords & Spells, Basic Set, Dungeon Masters Guidde, Playerr's Handbook, Deities and Demigods, Legends & Lore, Unearthed Arcana, Monster Manual, Fiend Folio, Monster Manual II, The Rogues Gallery, Oriental Adventures, DragonLance Adventures, Greyhawk Adventures. --v 6.0 --ar 1:9 def get_image_as_base64(url): response = requests.get(url) if response.status_code == 200: # Convert the image to base64 return base64.b64encode(response.content).decode("utf-8") else: return None def create_download_link(filename, base64_str): href = f'Download Image' return href # List of image URLs - These can be copied and pasted from clipboard to get commit URL of autosaved image base64 as png link image_urls = [ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/ZRZXM6kXqct-IKs6xqD5n.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/PBV0gDGYzCwYIN6iv0rXL.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/nbZOuPUdXJ7OJZeT23rbE.png" ] # Select a random URL from the list selected_image_url = random.choice(image_urls) # Get the base64 encoded string of the selected image selected_image_base64 = get_image_as_base64(selected_image_url) if selected_image_base64 is not None: with st.sidebar: st.markdown("""### ๐บ๏ธ๐ ๏ธ RolePlayAI""") # Display the image st.markdown(f"![image](data:image/png;base64,{selected_image_base64})") # Create and display the download link download_link = create_download_link("downloaded_image.png", selected_image_base64) st.markdown(download_link, unsafe_allow_html=True) else: st.sidebar.write("Failed to load the image.") # ---- Art Card Sidebar with random selection of image. ------------------------------- #def get_image_as_base64(url): # response = requests.get(url) # if response.status_code == 200: # Convert the image to base64 # return base64.b64encode(response.content).decode("utf-8") # else: # return None #def create_download_link(filename, base64_str): # href = f'Download Image' # return href #image_url = "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/o1FI3G_BueAAanLBeqoo3.png" #image_base64 = get_image_as_base64(image_url) #if image_base64 is not None: # with st.sidebar: # st.markdown("""### ๐บ๏ธ๐ ๏ธ RolePlayAI""") # st.markdown(f"![image](data:image/png;base64,{image_base64})") # download_link = create_download_link("downloaded_image.png", image_base64) # st.markdown(download_link, unsafe_allow_html=True) #else: # st.sidebar.write("Failed to load the image.") # ------------------------------------------------------------- Art Card Sidebar # Ensure the directory for storing scores exists score_dir = "scores" os.makedirs(score_dir, exist_ok=True) # Function to generate a unique key for each button, including an emoji def generate_key(label, header, idx): return f"{header}_{label}_{idx}_key" # Function to increment and save score def update_score(key, increment=1): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) else: score_data = {"clicks": 0, "score": 0} score_data["clicks"] += 1 score_data["score"] += increment with open(score_file, "w") as file: json.dump(score_data, file) return score_data["score"] # Function to load score def load_score(key): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) return score_data["score"] return 0 roleplaying_glossary = { "๐ฒ Core Rulebooks": { "Dungeons and Dragons": ["Player's Handbook", "Dungeon Master's Guide", "Monster Manual"], "Call of Cthulhu": ["Keeper's Rulebook", "Investigator Handbook", "Monster Manual"], "GURPS": ["Basic Set: Characters", "Basic Set: Campaigns", "GURPS Lite"], "Pathfinder": ["Core Rulebook", "Bestiary", "GameMastery Guide"], "Kindred of the East": ["Core Rulebook", "Companion Guide", "Kindred Secrets"], "Changeling": ["The Dreaming Core", "Players Guide", "Storytellers Handbook"], }, "๐บ๏ธ Maps & Settings": { "Dungeons and Dragons": ["The Forgotten Realms", "Eberron", "Ravenloft"], "Gamma World": ["Gamma Terra", "The Wastelands"], "Car Wars": ["Autoduel America", "The Arenas"], "Top Secret": ["World of Espionage", "Mission Modules"], "Kindred of the East": ["The Eastern Realms", "Cities of Darkness", "Ancestral Voices"], "Changeling": ["Freeholds & Hidden Glens", "Isles of the Mighty", "Kingdoms of Will"], }, "๐ ๏ธ Game Mechanics & Tools": { "GURPS": ["GURPS Character Sheet", "GURPS Calculator", "Modular System for Custom Campaigns"], "Pathfinder": ["Pathfinder Character Sheet", "Pathfinder Spell Database", "Adventure Path Modules"], "Call of Cthulhu": ["Sanity System", "Investigator's Companion", "Mythos Tomes"], "Kindred of the East": ["Karmic Wheel", "Yin-Yang Balance", "Chi Cultivation"], "Changeling": ["Glamour Harnessing", "Arts and Realms", "Cantrip Casting"], }, "๐ง Monsters & Adversaries": { "Dungeons and Dragons": ["Beholder", "Dragon", "Lich"], "Call of Cthulhu": ["Great Old Ones", "Deep Ones", "Shoggoth"], "Pathfinder": ["Goblins", "Dragons", "Vampires"], "Kindred of the East": ["Kuei-jin Ancestors", "Demon Hordes", "Yama Kings"], "Changeling": ["Thallain Nightmares", "Autumn People", "Banality's Minions"], }, "๐ Campaigns & Adventures": { "Dungeons and Dragons": ["Curse of Strahd", "Tomb of Annihilation", "Waterdeep: Dragon Heist"], "Gamma World": ["Legion of Gold", "Famine in Far-Go"], "Call of Cthulhu": ["The Masks of Nyarlathotep", "The Haunting"], "GURPS": ["Banestorm", "Infinite Worlds"], "Kindred of the East": ["Blood and Silk", "Dharma Book: Bone Flowers"], "Changeling": ["Dreams and Nightmares", "War in Concordia"], }, "๐จ Creatives & Assets": { "General RPG": ["Character Artwork", "Map Generators", "Token Creators"], "Digital Tools": ["Roll20 Assets", "Fantasy Grounds Modules", "Tabletop Simulator Packs"], }, "๐ง Game Master Resources": { "General Tips": ["Campaign Planning", "NPC Creation", "World-Building"], "Adventure Hooks": ["Mystery", "Exploration", "Conflict"], "Puzzle & Trap Ideas": ["Logic Puzzles", "Physical Traps", "Social Encounters"], }, "๐ Lore & Background": { "Dungeons and Dragons": ["The Pantheon of Deities", "Historic Timelines", "Cultural Customs"], "Pathfinder": ["Golarion", "Inner Sea Gods", "The Starstone"], "Call of Cthulhu": ["The Cthulhu Mythos", "Historic Campaign Settings", "Cosmic Horrors"], "Kindred of the East": ["Eastern Mythology", "The Five Courts", "Undead Politics"], "Changeling": ["Faerie Lore", "The Dreaming", "Changeling Society"], }, "๐๏ธ Character Development": { "Skill Systems": ["Class-Based", "Skill-Based", "Level-Less Progression"], "Alignment & Morality": ["Law vs. Chaos", "Good vs. Evil", "Neutrality"], "Character Backstories": ["Origin Stories", "Character Flaws", "Personal Goals"], }, "โ๏ธ Homebrew Content": { "Rule Modifications": ["House Rules", "Alternate Magic Systems", "Custom Races & Classes"], "Homebrew Campaigns": ["World Building Guides", "Custom Adventures", "NPC Galleries"], "Creative Tools": ["Homebrewery", "D&D Beyond Homebrew", "GMBinder"], }, "๐ General Topics": { "Fantasy Cultures": ["High Elven Traditions", "Dwarven Kingdoms", "Nomadic Tribes"], "World Cultures": ["Ancient Egypt Mysteries", "Feudal Japan Lore", "Viking Raiders"], "Magic": ["Arcane Arts", "Divine Blessings", "Ritual Craft"], "Fantasy Races": ["Elves and Dwarves", "Orcs and Goblins", "Fae and Spirits"], "Creatures of Myth and Legend": ["Dragons and Wyverns", "Ghosts and Ghouls", "Titans and Giants"], "Dress and Costume": ["Medieval Attire", "Warrior's Armor", "Ceremonial Robes"], "Arms, Armor, and Armies": ["Sword and Shield", "Siege Warfare", "Cavalry Tactics"], "Anatomy of a Castle": ["Keep and Bailey", "Castle Defenses", "Living Quarters"], }, } def search_glossary(query): for category, terms in roleplaying_glossary.items(): if query.lower() in (term.lower() for term in terms): st.markdown(f"#### {category}") st.write(f"- {query}") all="" query2 = PromptPrefix + query # Add prompt preface for method step task behavior # st.write('## ' + query2) st.write('## ๐ Running with GPT.') # ------------------------------------------------------------------------------------------------- response = chat_with_model(query2) filename = generate_filename(query2 + ' --- ' + response, "md") create_file(filename, query, response, should_save) query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior # st.write('## ' + query3) st.write('## ๐ Coding with GPT.') # ------------------------------------------------------------------------------------------------- response2 = chat_with_model(query3) filename_txt = generate_filename(query + ' --- ' + response2, "py") create_file(filename_txt, query, response2, should_save) all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2 filename_txt2 = generate_filename(query + ' --- ' + all, "md") create_file(filename_txt2, query, all, should_save) SpeechSynthesis(all) return all # Function to display the glossary in a structured format def display_glossary(glossary, area): if area in glossary: st.subheader(f"๐ Glossary for {area}") for game, terms in glossary[area].items(): st.markdown(f"### {game}") for idx, term in enumerate(terms, start=1): st.write(f"{idx}. {term}") # Function to display the entire glossary in a grid format with links def display_glossary_grid(roleplaying_glossary): search_urls = { "๐": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}", "๐": lambda k: f"https://www.google.com/search?q={quote(k)}", "โถ๏ธ": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}", "๐": lambda k: f"https://www.bing.com/search?q={quote(k)}", "๐ฒ": lambda k: f"https://huggingface.co/spaces/awacke1/MixableRolePlayAI?q={quote(k)}", # this url plus query! } for category, details in roleplaying_glossary.items(): st.write(f"### {category}") cols = st.columns(len(details)) # Create dynamic columns based on the number of games for idx, (game, terms) in enumerate(details.items()): with cols[idx]: st.markdown(f"#### {game}") for term in terms: links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()]) st.markdown(f"{term} {links_md}", unsafe_allow_html=True) game_emojis = { "Dungeons and Dragons": "๐", "Call of Cthulhu": "๐", "GURPS": "๐ฒ", "Pathfinder": "๐บ๏ธ", "Kindred of the East": "๐ ", "Changeling": "๐", } topic_emojis = { "Core Rulebooks": "๐", "Maps & Settings": "๐บ๏ธ", "Game Mechanics & Tools": "โ๏ธ", "Monsters & Adversaries": "๐น", "Campaigns & Adventures": "๐", "Creatives & Assets": "๐จ", "Game Master Resources": "๐ ๏ธ", "Lore & Background": "๐", "Character Development": "๐ง", "Homebrew Content": "๐ง", "General Topics": "๐", } # Adjusted display_buttons_with_scores function def display_buttons_with_scores(): for category, games in roleplaying_glossary.items(): category_emoji = topic_emojis.get(category, "๐") # Default to search icon if no match st.markdown(f"## {category_emoji} {category}") for game, terms in games.items(): game_emoji = game_emojis.get(game, "๐ฎ") # Default to generic game controller if no match for term in terms: key = f"{category}_{game}_{term}".replace(' ', '_').lower() score = load_score(key) if st.button(f"{game_emoji} {term} {score}", key=key): update_score(key) # Create a dynamic query incorporating emojis and formatting for clarity query_prefix = f"{category_emoji} {game_emoji} **{game} - {category}:**" query_body = f"Create a detailed outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements." response = search_glossary(query_prefix + query_body) def fetch_wikipedia_summary(keyword): # Placeholder function for fetching Wikipedia summaries # In a real app, you might use requests to fetch from the Wikipedia API return f"Summary for {keyword}. For more information, visit Wikipedia." def create_search_url_youtube(keyword): base_url = "https://www.youtube.com/results?search_query=" return base_url + keyword.replace(' ', '+') def create_search_url_bing(keyword): base_url = "https://www.bing.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_wikipedia(keyword): base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search=" return base_url + keyword.replace(' ', '+') def create_search_url_google(keyword): base_url = "https://www.google.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_ai(keyword): base_url = "https://huggingface.co/spaces/awacke1/MixableRolePlayAI?q=" return base_url + keyword.replace(' ', '+') def display_images_and_wikipedia_summaries(): st.markdown('### ๐บ๏ธ๐ ๏ธ RolePlayAI Gallery') image_files = [f for f in os.listdir('.') if f.endswith('.png')] if not image_files: st.write("No PNG images found in the current directory.") return for image_file in image_files: image = Image.open(image_file) st.image(image, caption=image_file, use_column_width=True) keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension # Display Wikipedia and Google search links wikipedia_url = create_search_url_wikipedia(keyword) google_url = create_search_url_google(keyword) youtube_url = create_search_url_youtube(keyword) bing_url = create_search_url_bing(keyword) ai_url = create_search_url_ai(keyword) links_md = f""" [Wikipedia]({wikipedia_url}) | [Google]({google_url}) | [YouTube]({youtube_url}) | [Bing]({bing_url}) | [AI]({ai_url}) """ st.markdown(links_md) def get_all_query_params(key): return st.query_params().get(key, []) def clear_query_params(): st.query_params() # Function to display content or image based on a query def display_content_or_image(query): # Check if the query matches any glossary term for category, terms in transhuman_glossary.items(): for term in terms: if query.lower() in term.lower(): st.subheader(f"Found in {category}:") st.write(term) return True # Return after finding and displaying the first match # Check for an image match in a predefined directory (adjust path as needed) image_dir = "images" # Example directory where images are stored image_path = f"{image_dir}/{query}.png" # Construct image path with query if os.path.exists(image_path): st.image(image_path, caption=f"Image for {query}") return True # If no content or image is found st.warning("No matching content or image found.") return False def add_Med_Licensing_Exam_Dataset(): import streamlit as st from datasets import load_dataset dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split st.title("USMLE Step 1 Dataset Viewer") if len(dataset) == 0: st.write("๐ข The dataset is empty.") else: st.write(""" ๐ Use the search box to filter questions or use the grid to scroll through the dataset. """) # ๐ฉโ๐ฌ Search Box search_term = st.text_input("Search for a specific question:", "") # ๐ Pagination records_per_page = 100 num_records = len(dataset) num_pages = max(int(num_records / records_per_page), 1) # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page) if num_pages > 1: page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1))) else: page_number = 1 # Only one page # ๐ Display Data start_idx = (page_number - 1) * records_per_page end_idx = start_idx + records_per_page # ๐งช Apply the Search Filter filtered_data = [] for record in dataset[start_idx:end_idx]: if isinstance(record, dict) and 'text' in record and 'id' in record: if search_term: if search_term.lower() in record['text'].lower(): st.markdown(record) filtered_data.append(record) else: filtered_data.append(record) # ๐ Render the Grid for record in filtered_data: st.write(f"## Question ID: {record['id']}") st.write(f"### Question:") st.write(f"{record['text']}") st.write(f"### Answer:") st.write(f"{record['answer']}") st.write("---") st.write(f"๐ Total Records: {num_records} | ๐ Displaying {start_idx+1} to {min(end_idx, num_records)}") # 1. Constants and Top Level UI Variables # My Inference API Copy API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama # Meta's Original - Chat HF Free Version: #API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf" API_KEY = os.getenv('API_KEY') MODEL1="meta-llama/Llama-2-7b-chat-hf" MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf" HF_KEY = os.getenv('HF_KEY') headers = { "Authorization": f"Bearer {HF_KEY}", "Content-Type": "application/json" } key = os.getenv('OPENAI_API_KEY') prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface." should_save = st.sidebar.checkbox("๐พ Save", value=True, help="Save your session data.") # 2. Prompt label button demo for LLM def add_witty_humor_buttons(): with st.expander("Wit and Humor ๐คฃ", expanded=True): # Tip about the Dromedary family st.markdown("๐ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.") # Define button descriptions descriptions = { "Generate Limericks ๐": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ญ", "Wise Quotes ๐ง": "Generate ten wise quotes that are tweet length ๐ฆ", "Funny Rhymes ๐ค": "Create ten funny rhymes that are tweet length ๐ถ", "Medical Jokes ๐": "Create ten medical jokes that are tweet length ๐ฅ", "Minnesota Humor โ๏ธ": "Create ten jokes about Minnesota that are tweet length ๐จ๏ธ", "Top Funny Stories ๐": "Create ten funny stories that are tweet length ๐", "More Funny Rhymes ๐๏ธ": "Create ten more funny rhymes that are tweet length ๐ต" } # Create columns col1, col2, col3 = st.columns([1, 1, 1], gap="small") # Add buttons to columns if col1.button("Wise Limericks ๐"): StreamLLMChatResponse(descriptions["Generate Limericks ๐"]) if col2.button("Wise Quotes ๐ง"): StreamLLMChatResponse(descriptions["Wise Quotes ๐ง"]) #if col3.button("Funny Rhymes ๐ค"): # StreamLLMChatResponse(descriptions["Funny Rhymes ๐ค"]) col4, col5, col6 = st.columns([1, 1, 1], gap="small") if col4.button("Top Ten Funniest Clean Jokes ๐"): StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐"]) if col5.button("Minnesota Humor โ๏ธ"): StreamLLMChatResponse(descriptions["Minnesota Humor โ๏ธ"]) if col6.button("Origins of Medical Science True Stories"): StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"]) col7 = st.columns(1, gap="small") if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"): StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"]) def SpeechSynthesis(result): documentHTML5='''