umair894 commited on
Commit
33e6746
·
1 Parent(s): d1b9251

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -26
app.py CHANGED
@@ -77,7 +77,7 @@ llm_model = os.environ.get("LLM_MODEL", "mistral") # or "zephyr"
77
 
78
  title = f"Voice chat with {llm_model.capitalize()} and Coqui XTTS"
79
 
80
- DESCRIPTION = f"""# Voice chat with {llm_model.capitalize()} and Coqui XTTS"""
81
  css = """.toast-wrap { display: none !important } """
82
 
83
  from huggingface_hub import HfApi
@@ -143,7 +143,7 @@ else:
143
  from llama_cpp import Llama
144
  # set GPU_LAYERS to 15 if you have a 8GB GPU so both models can fit in
145
  # else 35 full layers + XTTS works fine on T4 16GB
146
- GPU_LAYERS=int(os.environ.get("GPU_LAYERS", 15))
147
 
148
  LLAMA_VERBOSE=False
149
  print("Running LLM")
@@ -689,21 +689,21 @@ latent_map["AI Assistant"] = get_latents("examples/female.wav")
689
 
690
  #### GRADIO INTERFACE ####
691
  EXAMPLES = [
692
- [[],"What is 42?"],
693
- [[],"Speak in French, tell me how are you doing?"],
694
- [[],"Antworten Sie mir von nun an auf Deutsch"],
695
 
696
  ]
697
 
698
-
699
- OTHER_HTML=f"""<div>
700
- <a style="display:inline-block" href='https://github.com/coqui-ai/TTS'><img src='https://img.shields.io/github/stars/coqui-ai/TTS?style=social' /></a>
701
- <a style='display:inline-block' href='https://discord.gg/5eXr5seRrv'><img src='https://discord.com/api/guilds/1037326658807533628/widget.png?style=shield' /></a>
702
- <a href="https://huggingface.co/spaces/coqui/voice-chat-with-mistral?duplicate=true">
703
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
704
- <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=0d00920c-8cc9-4bf3-90f2-a615797e5f59" />
705
- </div>
706
- """
707
  with gr.Blocks(title=title) as demo:
708
  gr.Markdown(DESCRIPTION)
709
  gr.Markdown(OTHER_HTML)
@@ -780,17 +780,17 @@ with gr.Blocks(title=title) as demo:
780
 
781
  file_msg.then(lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), None, [txt, btn], queue=False)
782
 
783
- gr.Markdown(
784
- """
785
- This Space demonstrates how to speak to a chatbot, based solely on open-source models.
786
- It relies on 3 stage models:
787
- - Speech to Text : [Whisper-large-v2](https://sanchit-gandhi-whisper-large-v2.hf.space/) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).
788
- - LLM Model : [Mistral-7b-instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) as the chat model, GGUF Q5_K_M quantized version used locally via llama_cpp[huggingface_hub](TheBloke/Mistral-7B-Instruct-v0.1-GGUF).
789
- - Text to Speech : [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a Multilingual TTS model, to generate the chatbot answers. This time, the model is hosted locally.
790
- Note:
791
- - By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml
792
- - Responses generated by chat model should not be assumed correct or taken serious, as this is a demonstration example only
793
- - iOS (Iphone/Ipad) devices may not experience voice due to autoplay being disabled on these devices by Vendor"""
794
- )
795
  demo.queue()
796
  demo.launch(debug=True)
 
77
 
78
  title = f"Voice chat with {llm_model.capitalize()} and Coqui XTTS"
79
 
80
+ DESCRIPTION = f"""# Voice/text chat with Vikk AI"""
81
  css = """.toast-wrap { display: none !important } """
82
 
83
  from huggingface_hub import HfApi
 
143
  from llama_cpp import Llama
144
  # set GPU_LAYERS to 15 if you have a 8GB GPU so both models can fit in
145
  # else 35 full layers + XTTS works fine on T4 16GB
146
+ GPU_LAYERS=int(os.environ.get("GPU_LAYERS", 35))
147
 
148
  LLAMA_VERBOSE=False
149
  print("Running LLM")
 
689
 
690
  #### GRADIO INTERFACE ####
691
  EXAMPLES = [
692
+ [[],"What are my rights if I've been arrested?"],
693
+ [[],"What should I do if I've been injured in an accident due to someone else's negligence?"],
694
+ [[],"How can I protect my assets and plan for the future through estate planning?"],
695
 
696
  ]
697
 
698
+ OTHER_HTML= ''
699
+ # OTHER_HTML=f"""<div>
700
+ # <a style="display:inline-block" href='https://github.com/coqui-ai/TTS'><img src='https://img.shields.io/github/stars/coqui-ai/TTS?style=social' /></a>
701
+ # <a style='display:inline-block' href='https://discord.gg/5eXr5seRrv'><img src='https://discord.com/api/guilds/1037326658807533628/widget.png?style=shield' /></a>
702
+ # <a href="https://huggingface.co/spaces/coqui/voice-chat-with-mistral?duplicate=true">
703
+ # <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
704
+ # <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=0d00920c-8cc9-4bf3-90f2-a615797e5f59" />
705
+ # </div>
706
+ # """
707
  with gr.Blocks(title=title) as demo:
708
  gr.Markdown(DESCRIPTION)
709
  gr.Markdown(OTHER_HTML)
 
780
 
781
  file_msg.then(lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), None, [txt, btn], queue=False)
782
 
783
+ # gr.Markdown(
784
+ # """
785
+ # This Space demonstrates how to speak to a chatbot, based solely on open-source models.
786
+ # It relies on 3 stage models:
787
+ # - Speech to Text : [Whisper-large-v2](https://sanchit-gandhi-whisper-large-v2.hf.space/) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).
788
+ # - LLM Model : [Mistral-7b-instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) as the chat model, GGUF Q5_K_M quantized version used locally via llama_cpp[huggingface_hub](TheBloke/Mistral-7B-Instruct-v0.1-GGUF).
789
+ # - Text to Speech : [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a Multilingual TTS model, to generate the chatbot answers. This time, the model is hosted locally.
790
+ # Note:
791
+ # - By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml
792
+ # - Responses generated by chat model should not be assumed correct or taken serious, as this is a demonstration example only
793
+ # - iOS (Iphone/Ipad) devices may not experience voice due to autoplay being disabled on these devices by Vendor"""
794
+ # )
795
  demo.queue()
796
  demo.launch(debug=True)