File size: 5,325 Bytes
565faca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import json
import logging
import os
import shlex
import subprocess
import tempfile
from pathlib import Path
from typing import Literal, Optional

import fastapi
import fastapi.middleware.cors
import torch
import tyro
import uvicorn
from attr import dataclass
from fastapi import Request
from fastapi.responses import Response
from huggingface_hub import snapshot_download

from fam.llm.sample import (
    InferenceConfig,
    Model,
    build_models,
    get_first_stage_path,
    get_second_stage_path,
    # sample_utterance,
)
from fam.llm.fast_inference import TTS

logger = logging.getLogger(__name__)


## Setup FastAPI server.
app = fastapi.FastAPI()


@dataclass
class ServingConfig:
    huggingface_repo_id: str
    """Absolute path to the model directory."""

    max_new_tokens: int = 864 * 2
    """Maximum number of new tokens to generate from the first stage model."""

    temperature: float = 1.0
    """Temperature for sampling applied to both models."""

    top_k: int = 200
    """Top k for sampling applied to both models."""

    seed: int = 1337
    """Random seed for sampling."""

    dtype: Literal["bfloat16", "float16", "float32", "tfloat32"] = "bfloat16"
    """Data type to use for sampling."""

    enhancer: Optional[Literal["df"]] = "df"
    """Enhancer to use for post-processing."""

    port: int = 58003


# Singleton
class _GlobalState:
    config: ServingConfig
    tts: TTS


GlobalState = _GlobalState()

@dataclass(frozen=True)
class TTSRequest:
    text: str
    guidance: Optional[float] = 3.0
    top_p: Optional[float] = 0.95
    speaker_ref_path: Optional[str] = None
    top_k: Optional[int] = None


def sample_utterance(
    text: str,
    spk_cond_path: str | None,
    guidance_scale,
    max_new_tokens,
    top_k,
    top_p,
    temperature,
) -> str:
    return GlobalState.tts.synthesise(
        text,
        spk_cond_path,
        top_p=top_p,
        guidance_scale=guidance_scale,
        temperature=temperature,
    )


@app.post("/tts", response_class=Response)
async def text_to_speech(req: Request):
    audiodata = await req.body()
    payload = None
    wav_out_path = None

    try:
        headers = req.headers
        payload = headers["X-Payload"]
        payload = json.loads(payload)
        tts_req = TTSRequest(**payload)
        with tempfile.NamedTemporaryFile(suffix=".wav") as wav_tmp:
            if tts_req.speaker_ref_path is None:
                wav_path = _convert_audiodata_to_wav_path(audiodata, wav_tmp)
            else:
                wav_path = tts_req.speaker_ref_path
            wav_out_path = sample_utterance(
                tts_req.text,
                wav_path,
                guidance_scale=tts_req.guidance,
                max_new_tokens=GlobalState.config.max_new_tokens,
                temperature=GlobalState.config.temperature,
                top_k=tts_req.top_k,
                top_p=tts_req.top_p,
            )
        with open(wav_out_path, "rb") as f:
            return Response(content=f.read(), media_type="audio/wav")
    except Exception as e:
        # traceback_str = "".join(traceback.format_tb(e.__traceback__))
        logger.exception(f"Error processing request {payload}")
        return Response(
            content="Something went wrong. Please try again in a few mins or contact us on Discord",
            status_code=500,
        )
    finally:
        if wav_out_path is not None:
            Path(wav_out_path).unlink(missing_ok=True)


def _convert_audiodata_to_wav_path(audiodata, wav_tmp):
    with tempfile.NamedTemporaryFile() as unknown_format_tmp:
        assert unknown_format_tmp.write(audiodata) > 0
        unknown_format_tmp.flush()

        subprocess.check_output(
            # arbitrary 2 minute cutoff
            shlex.split(f"ffmpeg -t 120 -y -i {unknown_format_tmp.name} -f wav {wav_tmp.name}")
        )

        return wav_tmp.name


if __name__ == "__main__":
    # This has to be here to avoid some weird audiocraft shenaningans messing up matplotlib
    from fam.llm.enhancers import get_enhancer

    for name in logging.root.manager.loggerDict:
        logger = logging.getLogger(name)
        logger.setLevel(logging.INFO)
    logging.root.setLevel(logging.INFO)

    GlobalState.config = tyro.cli(ServingConfig)
    app.add_middleware(
        fastapi.middleware.cors.CORSMiddleware,
        allow_origins=["*", f"http://localhost:{GlobalState.config.port}", "http://localhost:3000"],
        allow_credentials=True,
        allow_methods=["*"],
        allow_headers=["*"],
    )

    device = "cuda" if torch.cuda.is_available() else "cpu"
    common_config = dict(
        num_samples=1,
        seed=1337,
        device=device,
        dtype=GlobalState.config.dtype,
        compile=False,
        init_from="resume",
        output_dir=tempfile.mkdtemp(),
    )
    model_dir = snapshot_download(repo_id=GlobalState.config.huggingface_repo_id)
    config1 = InferenceConfig(
        ckpt_path=get_first_stage_path(model_dir),
        **common_config,
    )

    config2 = InferenceConfig(
        ckpt_path=get_second_stage_path(model_dir),
        **common_config,
    )

    GlobalState.tts = TTS()

    # start server
    uvicorn.run(
        app,
        host="127.0.0.1",
        port=GlobalState.config.port,
        log_level="info",
    )