kwabs22 commited on
Commit
079e3f7
·
1 Parent(s): 3267959

Some prompt changes

Browse files
Files changed (1) hide show
  1. app.py +22 -37
app.py CHANGED
@@ -3,11 +3,21 @@ import psutil
3
  import subprocess
4
  import time
5
 
6
- def generate_response_by_api():
 
7
  FinalOutput = ""
8
  #return FinalOutput
9
  pass
10
 
 
 
 
 
 
 
 
 
 
11
  def generate_response(user_message): #generate_response_token_by_token
12
  cmd = [
13
  "/app/llama.cpp/main", # Path to the executable
@@ -69,19 +79,6 @@ def generate_response(user_message): #generate_response_token_by_token
69
  if process.returncode != 0:
70
  error_message = process.stderr.read()
71
  print(f"Error: {error_message}")
72
-
73
- # def custom_generate_response(cust_user_message, prompt_index):
74
- # """
75
- # Generates a custom response based on the user message and the selected prompt,
76
- # including a custom ending specific to the prompt.
77
-
78
- # Parameters:
79
- # - cust_user_message: The message input from the user.
80
- # - prompt_index: The index of the custom prompt to use.
81
- # """
82
- # prompt, ending = CustomPrompts[prompt_index] # Unpack the prompt and its ending
83
- # cust_user_message = f"{prompt}\n\n{cust_user_message}\n\n{ending}"
84
- # yield from generate_response(cust_user_message)
85
 
86
  def custom_generate_response(cust_user_message, prompt_index, prompts_list):
87
  """
@@ -109,18 +106,20 @@ CustomPrompts = [
109
  ]
110
 
111
  BusinessPrompts = [
112
- ("Write an outline for a business plan for " , ""),
113
- ("Write an outline for a Executive Summary for " , "Executive Summary:"),
114
- ("Write an outline for a Company Description for " , "Company Description:"),
115
- ("Write an outline for a Market Analysis for " , "Market Analysis:"),
116
- ("Write an outline for a Marketing and Sales Strategy for " , "Marketing and Sales Strategy:"),
117
- ("Write an outline for a Product Development for " , "Product Development:"),
118
- ("Write an outline for a Operations and Management for " , "Operations and Management:"),
119
- ("Write an outline for a Financial Projections for " , "Financial Projections:"),
 
 
120
  ]
121
 
122
  with gr.Blocks() as iface:
123
- gr.HTML("Stabilityai's demo - https://huggingface.co/spaces/stabilityai/stablelm-2-1_6b-zephyr")
124
  gr.Interface(
125
  fn=generate_response,
126
  inputs=gr.Textbox(lines=2, placeholder="Type your message here..."),
@@ -137,20 +136,6 @@ with gr.Blocks() as iface:
137
  MainOutput = gr.TextArea(placeholder='Output will show here')
138
  CustomButtonInput = gr.TextArea(lines=1, placeholder='Prompt goes here')
139
 
140
- # with gr.Accordion("Random Ideas"):
141
- # with gr.Group():
142
- # # Dynamically create buttons and assign actions
143
- # for index, (prompt, _) in enumerate(CustomPrompts):
144
- # button = gr.Button(prompt)
145
- # button.click(custom_generate_response, inputs=[CustomButtonInput, gr.State(index)], outputs=MainOutput)
146
-
147
- # with gr.Accordion("General Product based", open=False):
148
- # with gr.Group():
149
- # # Dynamically create buttons and assign actions
150
- # for index, (prompt, _) in enumerate(BusinessPrompts):
151
- # button = gr.Button(prompt)
152
- # button.click(custom_generate_response, inputs=[CustomButtonInput, gr.State(index)], outputs=MainOutput)
153
-
154
  with gr.Accordion("Random Ideas"):
155
  with gr.Group():
156
  for index, (prompt, _) in enumerate(CustomPrompts):
 
3
  import subprocess
4
  import time
5
 
6
+ #placeholders for api use
7
+ def generate_response_by_api(user_message):
8
  FinalOutput = ""
9
  #return FinalOutput
10
  pass
11
 
12
+ def custom_generate_response_by_api(cust_user_message, prompt_index, prompts_list):
13
+ prompt, ending = prompts_list[prompt_index] # Unpack the prompt and its ending from the provided list
14
+ cust_user_message = f"{prompt}\n\n{cust_user_message}\n\n{ending}"
15
+ #return generate_response(cust_user_message)
16
+ pass
17
+
18
+ #-----------------------------------------------------------------------------------------------------------------------
19
+
20
+ #Local gguf model using llama.cpp
21
  def generate_response(user_message): #generate_response_token_by_token
22
  cmd = [
23
  "/app/llama.cpp/main", # Path to the executable
 
79
  if process.returncode != 0:
80
  error_message = process.stderr.read()
81
  print(f"Error: {error_message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  def custom_generate_response(cust_user_message, prompt_index, prompts_list):
84
  """
 
106
  ]
107
 
108
  BusinessPrompts = [
109
+ ("Suggest Product ideas just based off the following text:", "Products:"),
110
+ ("Write an outline for a business plan for: " , ""),
111
+ ("Write an example of a detailed report for a Executive Summary for " , "Executive Summary:"),
112
+ ("Write an example of a detailed report for a Company Description for " , "Company Description:"),
113
+ ("Write an example of a detailed report for a Market Analysis for " , "Market Analysis:"),
114
+ ("Write an example of a detailed report for a Marketing and Sales Strategy for " , "Marketing and Sales Strategy:"),
115
+ ("Write an example of a detailed report for a Product Development for " , "Product Development:"),
116
+ ("Write an example of a detailed report for a Operations and Management for " , "Operations and Management:"),
117
+ ("Write an example of a detailed report for a Financial Projections for " , "Financial Projections:"),
118
+ ("Explain how this to make this product unique from competitors:", "Considerations:"),
119
  ]
120
 
121
  with gr.Blocks() as iface:
122
+ gr.HTML("<a href='https://huggingface.co/spaces/stabilityai/stablelm-2-1_6b-zephyr'> -- Original StabilityAI demo -- </a> | ")
123
  gr.Interface(
124
  fn=generate_response,
125
  inputs=gr.Textbox(lines=2, placeholder="Type your message here..."),
 
136
  MainOutput = gr.TextArea(placeholder='Output will show here')
137
  CustomButtonInput = gr.TextArea(lines=1, placeholder='Prompt goes here')
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  with gr.Accordion("Random Ideas"):
140
  with gr.Group():
141
  for index, (prompt, _) in enumerate(CustomPrompts):