ColeGuion commited on
Commit
e7570e2
1 Parent(s): e061323

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -19
app.py CHANGED
@@ -2,12 +2,13 @@
2
  import gradio as gr
3
  from transformers import pipeline
4
 
 
5
  # Load the model and tokenizer using the pipeline API
6
  model_pipeline = pipeline("text-generation", model="grammarly/coedit-large")
7
 
8
  def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, top_k=50):
9
  # Generate text using the model
10
- output = model_pipeline(input_text, temperature=temperature, max_length=(len(input_text.split()) + max_new_tokens), top_p=top_p, top_k=top_k, return_full_text=False)
11
  # Extract and return the generated text
12
  return output[0]['generated_text']
13
 
@@ -20,24 +21,11 @@ additional_inputs=[
20
  gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
21
  ]
22
 
23
- #gr.ChatInterface(
24
- # fn=generate_text,
25
- # chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
26
- # additional_inputs=additional_inputs,
27
- # title="My Grammarly Space",
28
- # concurrency_limit=20,
29
- #).launch(show_api=False)
30
-
31
-
32
- iface = gr.Interface(
33
  fn=generate_text,
34
- inputs=[
35
- gr.inputs.Textbox(lines=2, placeholder="Enter text here..."),
36
- *additional_inputs
37
- ],
38
- outputs="text",
39
  title="My Grammarly Space",
40
- description="Generate text with grammarly/coedit-large model",
41
- )
42
 
43
- iface.launch(share=True)
 
2
  import gradio as gr
3
  from transformers import pipeline
4
 
5
+ gr.load("models/grammarly/coedit-large").launch()
6
  # Load the model and tokenizer using the pipeline API
7
  model_pipeline = pipeline("text-generation", model="grammarly/coedit-large")
8
 
9
  def generate_text(input_text, temperature=0.9, max_new_tokens=50, top_p=0.95, top_k=50):
10
  # Generate text using the model
11
+ output = model_pipeline(input_text, temperature=temperature, max_length=max_new_tokens, top_p=top_p, top_k=top_k)
12
  # Extract and return the generated text
13
  return output[0]['generated_text']
14
 
 
21
  gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"),
22
  ]
23
 
24
+ gr.ChatInterface(
 
 
 
 
 
 
 
 
 
25
  fn=generate_text,
26
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
27
+ additional_inputs=additional_inputs,
 
 
 
28
  title="My Grammarly Space",
29
+ concurrency_limit=20,
30
+ ).launch(show_api=False)
31