jbilcke-hf HF staff commited on
Commit
43740c8
1 Parent(s): 00202bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -1,5 +1,8 @@
1
  from __future__ import annotations
2
  import os
 
 
 
3
  # we need to compile a CUBLAS version
4
  # Or get it from https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/
5
  os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python==0.2.11')
@@ -25,7 +28,7 @@ import langid
25
  import uuid
26
  import emoji
27
  import pathlib
28
- import base64
29
  import datetime
30
 
31
  from scipy.io.wavfile import write
@@ -52,8 +55,6 @@ import numpy as np
52
  from gradio_client import Client
53
  from huggingface_hub import InferenceClient
54
 
55
- SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
56
-
57
  # This will trigger downloading model
58
  print("Downloading if not downloaded Coqui XTTS V2")
59
 
@@ -63,6 +64,7 @@ ModelManager().download_model(model_name)
63
  model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
64
  print("XTTS downloaded")
65
 
 
66
  print("Loading XTTS")
67
  config = XttsConfig()
68
  config.load_json(os.path.join(model_path, "config.json"))
@@ -78,9 +80,12 @@ model.load_checkpoint(
78
  model.cuda()
79
  print("Done loading TTS")
80
 
81
- title = "Voice chat with Zephyr and Coqui XTTS"
 
 
82
 
83
- DESCRIPTION = """# Voice chat with Zephyr and Coqui XTTS"""
 
84
 
85
  from huggingface_hub import HfApi
86
 
@@ -88,9 +93,11 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
88
  # will use api to restart space on a unrecoverable error
89
  api = HfApi(token=HF_TOKEN)
90
 
91
- repo_id = "jbilcke-hf/zephyr-xtts"
92
-
 
93
  SENTENCE_SPLIT_LENGTH=250
 
94
 
95
  default_system_message = f"""
96
  You're the storyteller, crafting a short tale for young listeners. Please abide by these guidelines:
 
1
  from __future__ import annotations
2
  import os
3
+ #download for mecab
4
+ os.system('python -m unidic download')
5
+
6
  # we need to compile a CUBLAS version
7
  # Or get it from https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/
8
  os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python==0.2.11')
 
28
  import uuid
29
  import emoji
30
  import pathlib
31
+
32
  import datetime
33
 
34
  from scipy.io.wavfile import write
 
55
  from gradio_client import Client
56
  from huggingface_hub import InferenceClient
57
 
 
 
58
  # This will trigger downloading model
59
  print("Downloading if not downloaded Coqui XTTS V2")
60
 
 
64
  model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
65
  print("XTTS downloaded")
66
 
67
+
68
  print("Loading XTTS")
69
  config = XttsConfig()
70
  config.load_json(os.path.join(model_path, "config.json"))
 
80
  model.cuda()
81
  print("Done loading TTS")
82
 
83
+ #####llm_model = os.environ.get("LLM_MODEL", "mistral") # or "zephyr"
84
+
85
+ title = "Voice chat with Zephyr/Mistral and Coqui XTTS"
86
 
87
+ DESCRIPTION = """# Voice chat with Zephyr/Mistral and Coqui XTTS"""
88
+ css = """.toast-wrap { display: none !important } """
89
 
90
  from huggingface_hub import HfApi
91
 
 
93
  # will use api to restart space on a unrecoverable error
94
  api = HfApi(token=HF_TOKEN)
95
 
96
+ # config changes by Julian ---------------
97
+ repo_id = "jbilcke-hf/ai-bedtime-story-server"
98
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
99
  SENTENCE_SPLIT_LENGTH=250
100
+ # ----------------------------------------
101
 
102
  default_system_message = f"""
103
  You're the storyteller, crafting a short tale for young listeners. Please abide by these guidelines: