prithivMLmods commited on
Commit
73fb3c7
1 Parent(s): c4d588e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -24
app.py CHANGED
@@ -3,16 +3,14 @@ import sambanova_gradio
3
 
4
  # Function to load the selected model
5
  def load_model(model_choice):
6
- model = gr.load(
7
- name=model_choice,
8
- src=sambanova_gradio.registry
9
- )
10
  return model
11
 
12
- # Define a function to handle the user input and return a response
13
  def generate_response(model_choice, user_input):
14
  model = load_model(model_choice)
15
- output = model.predict(user_input)
16
  return output
17
 
18
  # Available model choices
@@ -24,40 +22,39 @@ model_choices = [
24
  "Meta-Llama-3.1-70B-lnstruct"
25
  ]
26
 
27
- # Create Gradio interface
28
  with gr.Blocks() as demo:
29
  gr.Markdown("# Meta-Llama Model Selector")
30
-
31
- # Dropdown for selecting the model
32
  model_choice = gr.Dropdown(
33
- choices=model_choices,
34
  label="Choose a Meta-Llama Model",
35
- value="Meta-Llama-3.2-3B-Instruct" # default value
36
  )
37
-
38
  # Textbox for user input
39
  input_box = gr.Textbox(
40
  label="Input",
41
  placeholder="Ask a question..."
42
  )
43
-
44
- # Output for the model's response
45
  output_box = gr.Textbox(label="Output")
46
-
47
- # Button to generate the output
48
  generate_button = gr.Button("Generate Response")
49
-
50
- # Add example inputs
51
  examples = gr.Examples(
52
- examples=["Explain quantum gravity to a 5-year old.",
53
- "How many R are there in the word Strawberry?"],
54
  inputs=input_box
55
  )
56
-
57
- # Action when button is clicked
58
  generate_button.click(
59
- fn=generate_response,
60
- inputs=[model_choice, input_box],
61
  outputs=output_box
62
  )
63
 
 
3
 
4
  # Function to load the selected model
5
  def load_model(model_choice):
6
+ # Directly load the model without using the ChatInterface
7
+ model = sambanova_gradio.registry(model_choice)
 
 
8
  return model
9
 
10
+ # Function to generate response
11
  def generate_response(model_choice, user_input):
12
  model = load_model(model_choice)
13
+ output = model(user_input) # Assuming model is callable like a function
14
  return output
15
 
16
  # Available model choices
 
22
  "Meta-Llama-3.1-70B-lnstruct"
23
  ]
24
 
25
+ # Gradio interface
26
  with gr.Blocks() as demo:
27
  gr.Markdown("# Meta-Llama Model Selector")
28
+
29
+ # Dropdown for model selection
30
  model_choice = gr.Dropdown(
31
+ choices=model_choices,
32
  label="Choose a Meta-Llama Model",
33
+ value="Meta-Llama-3.2-3B-Instruct" # Default selected model
34
  )
35
+
36
  # Textbox for user input
37
  input_box = gr.Textbox(
38
  label="Input",
39
  placeholder="Ask a question..."
40
  )
41
+
42
+ # Output textbox for displaying the response
43
  output_box = gr.Textbox(label="Output")
44
+
45
+ # Button to generate a response
46
  generate_button = gr.Button("Generate Response")
47
+
48
+ # Example inputs
49
  examples = gr.Examples(
50
+ examples=["Explain quantum gravity to a 5-year-old.", "How many R are there in the word Strawberry?"],
 
51
  inputs=input_box
52
  )
53
+
54
+ # Define button click action
55
  generate_button.click(
56
+ fn=generate_response,
57
+ inputs=[model_choice, input_box],
58
  outputs=output_box
59
  )
60