kevinwang676 commited on
Commit
6638745
1 Parent(s): 6d7e145

Create Docker

Browse files
Files changed (1) hide show
  1. Docker +97 -0
Docker ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023 Agung Wijaya
2
+ # Installing Gradio via Dockerfile
3
+
4
+ # pull docker
5
+ FROM python:3.8.16-slim-bullseye
6
+
7
+ # install virtualenv
8
+ RUN apt update \
9
+ && apt install -y aria2 wget curl tree unzip ffmpeg build-essential \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # clean up
13
+ RUN apt-get clean; \
14
+ rm -rf /etc/machine-id /var/lib/dbus/machine-id /var/lib/apt/lists/* /tmp/* /var/tmp/*; \
15
+ find /var/log -name "*.log" -type f -delete
16
+
17
+ # set tmp
18
+ RUN mkdir -p /content/tmp
19
+ RUN chmod -R 777 /content/tmp
20
+ RUN rm -rf /tmp
21
+ RUN ln -s /content/tmp /tmp
22
+
23
+ # make dir
24
+ RUN mkdir -p /content
25
+ RUN chmod -R 777 /content
26
+
27
+ # try fix mplconfigdir
28
+ RUN mkdir -p /content/mplconfig
29
+ RUN chmod -R 777 /content/mplconfig
30
+
31
+ # try fix
32
+ # RuntimeError: cannot cache function '__shear_dense': no locator available for file '/usr/local/lib/python3.8/site-packages/librosa/util/utils.py'
33
+ RUN mkdir -p /content/numbacache
34
+ RUN chmod -R 777 /content/numbacache
35
+
36
+ # try fix
37
+ # PermissionError: [Errno 13] Permission denied: '/.cache' (demucs)
38
+ RUN mkdir -p /content/demucscache
39
+ RUN chmod -R 777 /content/demucscache
40
+ RUN ln -s /content/demucscache /.cache
41
+
42
+ # set workdir
43
+ WORKDIR /content
44
+
45
+ # set environment
46
+ # PYTORCH_NO_CUDA_MEMORY_CACHING is can help users with even smaller RAM such as 2GB (Demucs)
47
+ ENV PYTORCH_NO_CUDA_MEMORY_CACHING=1 \
48
+ MPLCONFIGDIR=/content/mplconfig \
49
+ NUMBA_CACHE_DIR=/content/numbacache
50
+
51
+ # upgrade pip
52
+ RUN python -m pip install --no-cache-dir --upgrade pip
53
+
54
+ # install library
55
+ RUN pip install --no-cache-dir --upgrade gradio
56
+ RUN pip install --no-cache-dir --upgrade setuptools wheel
57
+ RUN pip install --no-cache-dir faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2
58
+
59
+ # copying requirements.txt
60
+ COPY requirements.txt /content/requirements.txt
61
+
62
+ # install requirements
63
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
64
+
65
+ # copying files
66
+ COPY . .
67
+
68
+ # download hubert_base
69
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content -o hubert_base.pt
70
+
71
+ # download library infer_pack
72
+ RUN mkdir -p infer_pack
73
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/attentions.py -d /content/infer_pack -o attentions.py
74
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/commons.py -d /content/infer_pack -o commons.py
75
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models.py -d /content/infer_pack -o models.py
76
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models_onnx.py -d /content/infer_pack -o models_onnx.py
77
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/models_onnx_moess.py -d /content/infer_pack -o models_onnx_moess.py
78
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/modules.py -d /content/infer_pack -o modules.py
79
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://raw.githubusercontent.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/main/infer_pack/transforms.py -d /content/infer_pack -o transforms.py
80
+
81
+ # download library infer_pipeline.py
82
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/vc_infer_pipeline.py -d /content -o vc_infer_pipeline.py
83
+
84
+ # download library config.py and util.py
85
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/config.py -d /content -o config.py
86
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/spaces/DJQmUKV/rvc-inference/raw/main/util.py -d /content -o util.py
87
+
88
+ # check /tmp
89
+ RUN ls -l /tmp
90
+
91
+ # expose port gradio
92
+ EXPOSE 7860
93
+
94
+ # run app
95
+ CMD ["python", "app.py"]
96
+
97
+ # Enjoy run Gradio!