Update app/main.py
Browse files- app/main.py +6 -8
app/main.py
CHANGED
@@ -12,18 +12,16 @@ PWD = os.getenv("PWD")
|
|
12 |
def main(args):
|
13 |
demo = gr.ChatInterface(
|
14 |
fn=chat,
|
15 |
-
examples=["
|
16 |
-
title="
|
17 |
-
description="This space is a template that
|
18 |
"This space let you build LLM powered idea on top of [Gradio](https://www.gradio.app/) "
|
19 |
"and open LLM served locally by [TGI(Text Generation Inference)](https://huggingface.co/docs/text-generation-inference/en/index). "
|
20 |
"Below is a placeholder Gradio ChatInterface for you to try out Mistral-7B backed by the power of TGI's efficiency. \n\n"
|
21 |
"To use this space for your own usecase, follow the simple steps below:\n"
|
22 |
-
"1.
|
23 |
"2. Set which LLM you wish to use (i.e. mistralai/Mistral-7B-Instruct-v0.2). \n"
|
24 |
-
"3. Inside
|
25 |
-
"4. (Bonus➕) [app/gen](https://huggingface.co/spaces/chansung/gradio_together_tgi/tree/main/app/gen) provides handy utility functions "
|
26 |
-
"to aynchronously generate text by interacting with the locally served LLM.",
|
27 |
multimodal=False
|
28 |
)
|
29 |
|
@@ -33,7 +31,7 @@ def main(args):
|
|
33 |
).launch(auth=(USERNAME, PWD), server_name="0.0.0.0", server_port=args.port)
|
34 |
|
35 |
if __name__ == "__main__":
|
36 |
-
parser = argparse.ArgumentParser(description="
|
37 |
parser.add_argument("--port", type=int, default=7860, help="Port to expose Gradio app")
|
38 |
|
39 |
args = parser.parse_args()
|
|
|
12 |
def main(args):
|
13 |
demo = gr.ChatInterface(
|
14 |
fn=chat,
|
15 |
+
examples=["Explain the enteerprise adoption challenges", "How can we identify a fraud transaction?", "Por que os grandes modelos de linguagem de AI halucinam?"],
|
16 |
+
title="Chat and LLM server in the same application",
|
17 |
+
description="This space is a template that we can duplicate for your own usage. "
|
18 |
"This space let you build LLM powered idea on top of [Gradio](https://www.gradio.app/) "
|
19 |
"and open LLM served locally by [TGI(Text Generation Inference)](https://huggingface.co/docs/text-generation-inference/en/index). "
|
20 |
"Below is a placeholder Gradio ChatInterface for you to try out Mistral-7B backed by the power of TGI's efficiency. \n\n"
|
21 |
"To use this space for your own usecase, follow the simple steps below:\n"
|
22 |
+
"1. Duplicate this space. \n"
|
23 |
"2. Set which LLM you wish to use (i.e. mistralai/Mistral-7B-Instruct-v0.2). \n"
|
24 |
+
"3. Inside app/main.py write Gradio application. \n",
|
|
|
|
|
25 |
multimodal=False
|
26 |
)
|
27 |
|
|
|
31 |
).launch(auth=(USERNAME, PWD), server_name="0.0.0.0", server_port=args.port)
|
32 |
|
33 |
if __name__ == "__main__":
|
34 |
+
parser = argparse.ArgumentParser(description="A MAGIC example by ConceptaTech")
|
35 |
parser.add_argument("--port", type=int, default=7860, help="Port to expose Gradio app")
|
36 |
|
37 |
args = parser.parse_args()
|