File size: 1,239 Bytes
8b90ad5 ecd9090 2eb1363 967efaf 0250d76 ee6e9e2 efbaaff 8647971 029e32c b9f4a2a 2eb1363 efbaaff 2eb1363 fac22d0 fce8087 b9c73b1 696db06 a6b442f 8b90ad5 a6b442f 8b90ad5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import Linlada
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware( # add the middleware
CORSMiddleware,
allow_credentials=True, # allow credentials
allow_origins=["*"], # allow all origins
allow_methods=["*"], # allow all methods
allow_headers=["*"], # allow all headers
)
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token)
pipe = pipe.to("cpu")
pipe.enable_attention_slicing()
def dummy(images, **kwargs):
return images, False
pipe.safety_checker = dummy
@app.get("/")
def hello():
return "Hello, I'm Artist"
@app.post('/generate_completion')
async def generate_completion(
model: str = Query('gpt-4', description='The model to use for generating the completion'),
messages: List[Dict[str, str]] = Query(..., description='The list of messages to generate the completion for'),
stream: bool = Query(False, description='Whether to stream the response')
):
response = index._create_completion(model=model, messages=messages, stream=stream)
result = []
for message in response:
result.append(message)
return result
|