freddyaboulton HF staff commited on
Commit
e85fa31
·
1 Parent(s): 5d86b2a
Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +1 -1
app.py CHANGED
@@ -13,7 +13,7 @@ from parler_tts import ParlerTTSForConditionalGeneration
13
  from pydub import AudioSegment
14
  from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
15
  from transformers.generation.streamers import BaseStreamer
16
- from huggingface_hub import InferrenceClient
17
 
18
  device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
19
  torch_dtype = torch.float16 if device != "cpu" else torch.float32
 
13
  from pydub import AudioSegment
14
  from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
15
  from transformers.generation.streamers import BaseStreamer
16
+ from huggingface_hub import InferenceClient
17
 
18
  device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
19
  torch_dtype = torch.float16 if device != "cpu" else torch.float32
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- "gradio-client @ git+https://github.com/gradio-app/gradio@bed454c3d22cfacedc047eb3b0ba987b485ac3fd#subdirectory=client/python"
2
  https://gradio-builds.s3.amazonaws.com/bed454c3d22cfacedc047eb3b0ba987b485ac3fd/gradio-4.40.0-py3-none-any.whl
3
  git+https://github.com/huggingface/parler-tts.git
4
  accelerate
 
1
+ git+https://github.com/gradio-app/gradio@bed454c3d22cfacedc047eb3b0ba987b485ac3fd#subdirectory=client/python
2
  https://gradio-builds.s3.amazonaws.com/bed454c3d22cfacedc047eb3b0ba987b485ac3fd/gradio-4.40.0-py3-none-any.whl
3
  git+https://github.com/huggingface/parler-tts.git
4
  accelerate