awacke1 commited on
Commit
dfddde1
β€’
1 Parent(s): 293f9c5

Update backupapp.py

Browse files
Files changed (1) hide show
  1. backupapp.py +98 -38
backupapp.py CHANGED
@@ -1,4 +1,4 @@
1
- # Modify the program below to add a few picture buttons which will use emojis and a witty title to describe a prompt. the first prompt I want is "Write ten random adult limerick based on quotes that are tweet length and make you laugh. Show as numbered bold faced and large font markdown outline with emojis for each." Modify this code to add the prompt emoji labeled buttons above the text box. when you click them pass the varible they contain to a function which runs the chat through the Llama web service call in the code below. refactor it so it is function based. Put variables that set description for button and label for button right before the st.button() function calls and use st.expander() function to create a expanded description container with a witty label so user could collapse st.expander to hide buttons of a particular type. This first type will be Wit and Humor. Make sure each label contains appropriate emojis. Code: # Imports
2
  import base64
3
  import glob
4
  import json
@@ -44,41 +44,94 @@ prompt = f"Write instructions to teach anyone to write a discharge plan. List th
44
  st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
45
 
46
  # UI Controls
47
- should_save = st.sidebar.checkbox("πŸ’Ύ Save", value=True)
48
 
49
- # Functions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  def StreamLLMChatResponse(prompt):
51
- endpoint_url = API_URL
52
- hf_token = API_KEY
53
- client = InferenceClient(endpoint_url, token=hf_token)
54
- gen_kwargs = dict(
55
- max_new_tokens=512,
56
- top_k=30,
57
- top_p=0.9,
58
- temperature=0.2,
59
- repetition_penalty=1.02,
60
- stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
61
- )
62
- stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
63
- report=[]
64
- res_box = st.empty()
65
- collected_chunks=[]
66
- collected_messages=[]
67
- for r in stream:
68
- if r.token.special:
69
- continue
70
- if r.token.text in gen_kwargs["stop_sequences"]:
71
- break
72
- collected_chunks.append(r.token.text)
73
- chunk_message = r.token.text
74
- collected_messages.append(chunk_message)
75
- try:
76
- report.append(r.token.text)
77
- if len(r.token.text) > 0:
78
- result="".join(report).strip()
79
- res_box.markdown(f'*{result}*')
80
- except:
81
- st.write(' ')
 
 
 
 
 
 
82
 
83
  def query(payload):
84
  response = requests.post(API_URL, headers=headers, json=payload)
@@ -335,14 +388,21 @@ def get_zip_download_link(zip_file):
335
  return href
336
 
337
  def main():
338
- st.title(" DrLlama7B")
 
339
  prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
340
- example_input = st.text_input("Enter your example text:", value=prompt)
341
- if st.button("Run Prompt With Dr Llama"):
 
 
 
 
342
  try:
343
  StreamLLMChatResponse(example_input)
344
  except:
345
- st.write('Dr. Llama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
 
 
346
  openai.api_key = os.getenv('OPENAI_KEY')
347
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
348
  choice = st.sidebar.selectbox("Output File Type:", menu)
 
1
+ # Imports
2
  import base64
3
  import glob
4
  import json
 
44
  st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
45
 
46
  # UI Controls
47
+ should_save = st.sidebar.checkbox("πŸ’Ύ Save", value=True, help="Save your session data.")
48
 
49
+ # Function to add witty and humor buttons
50
+ def add_witty_humor_buttons():
51
+ with st.expander("Wit and Humor 🀣", expanded=True):
52
+ # Tip about the Dromedary family
53
+ st.markdown("πŸ”¬ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.")
54
+
55
+ # Define button descriptions
56
+ descriptions = {
57
+ "Generate Limericks πŸ˜‚": "Write ten random adult limericks based on quotes that are tweet length and make you laugh 🎭",
58
+ "Wise Quotes πŸ§™": "Generate ten wise quotes that are tweet length πŸ¦‰",
59
+ "Funny Rhymes 🎀": "Create ten funny rhymes that are tweet length 🎢",
60
+ "Medical Jokes πŸ’‰": "Create ten medical jokes that are tweet length πŸ₯",
61
+ "Minnesota Humor ❄️": "Create ten jokes about Minnesota that are tweet length 🌨️",
62
+ "Top Funny Stories πŸ“–": "Create ten funny stories that are tweet length πŸ“š",
63
+ "More Funny Rhymes πŸŽ™οΈ": "Create ten more funny rhymes that are tweet length 🎡"
64
+ }
65
+
66
+ # Create columns
67
+ col1, col2, col3 = st.columns([1, 1, 1], gap="small")
68
+
69
+ # Add buttons to columns
70
+ if col1.button("Generate Limericks πŸ˜‚"):
71
+ StreamLLMChatResponse(descriptions["Generate Limericks πŸ˜‚"])
72
+
73
+ if col2.button("Wise Quotes πŸ§™"):
74
+ StreamLLMChatResponse(descriptions["Wise Quotes πŸ§™"])
75
+
76
+ if col3.button("Funny Rhymes 🎀"):
77
+ StreamLLMChatResponse(descriptions["Funny Rhymes 🎀"])
78
+
79
+ col4, col5, col6 = st.columns([1, 1, 1], gap="small")
80
+
81
+ if col4.button("Medical Jokes πŸ’‰"):
82
+ StreamLLMChatResponse(descriptions["Medical Jokes πŸ’‰"])
83
+
84
+ if col5.button("Minnesota Humor ❄️"):
85
+ StreamLLMChatResponse(descriptions["Minnesota Humor ❄️"])
86
+
87
+ if col6.button("Top Funny Stories πŸ“–"):
88
+ StreamLLMChatResponse(descriptions["Top Funny Stories πŸ“–"])
89
+
90
+ col7 = st.columns(1, gap="small")
91
+
92
+ if col7[0].button("More Funny Rhymes πŸŽ™οΈ"):
93
+ StreamLLMChatResponse(descriptions["More Funny Rhymes πŸŽ™οΈ"])
94
+
95
+
96
+ # Function to Stream Inference Client for Inference Endpoint Responses
97
  def StreamLLMChatResponse(prompt):
98
+
99
+ try:
100
+ endpoint_url = API_URL
101
+ hf_token = API_KEY
102
+ client = InferenceClient(endpoint_url, token=hf_token)
103
+ gen_kwargs = dict(
104
+ max_new_tokens=512,
105
+ top_k=30,
106
+ top_p=0.9,
107
+ temperature=0.2,
108
+ repetition_penalty=1.02,
109
+ stop_sequences=["\nUser:", "<|endoftext|>", "</s>"],
110
+ )
111
+ stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs)
112
+ report=[]
113
+ res_box = st.empty()
114
+ collected_chunks=[]
115
+ collected_messages=[]
116
+ for r in stream:
117
+ if r.token.special:
118
+ continue
119
+ if r.token.text in gen_kwargs["stop_sequences"]:
120
+ break
121
+ collected_chunks.append(r.token.text)
122
+ chunk_message = r.token.text
123
+ collected_messages.append(chunk_message)
124
+ try:
125
+ report.append(r.token.text)
126
+ if len(r.token.text) > 0:
127
+ result="".join(report).strip()
128
+ res_box.markdown(f'*{result}*')
129
+ except:
130
+ st.write(' ')
131
+ except:
132
+ st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
133
+
134
+
135
 
136
  def query(payload):
137
  response = requests.post(API_URL, headers=headers, json=payload)
 
388
  return href
389
 
390
  def main():
391
+
392
+ st.title("DromeLlama7B")
393
  prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each."
394
+
395
+ # Add Wit and Humor buttons
396
+ add_witty_humor_buttons()
397
+
398
+ example_input = st.text_input("Enter your example text:", value=prompt, help="Enter text to get a response from DromeLlama.")
399
+ if st.button("Run Prompt With DromeLlama", help="Click to run the prompt."):
400
  try:
401
  StreamLLMChatResponse(example_input)
402
  except:
403
+ st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
404
+
405
+
406
  openai.api_key = os.getenv('OPENAI_KEY')
407
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
408
  choice = st.sidebar.selectbox("Output File Type:", menu)