Raumkommander commited on
Commit
63b12f2
·
1 Parent(s): 3f61ba7

inital deployment1

Browse files
Files changed (1) hide show
  1. app.py +0 -16
app.py CHANGED
@@ -6,22 +6,6 @@ from diffusers import StableDiffusionPipeline,AutoPipelineForImage2Image,Autoenc
6
  from transformers import AutoProcessor, AutoModel, AutoTokenizer
7
  from PIL import Image
8
 
9
- # Load the Real-Time Latent Consistency Model
10
- #device = "cuda" if torch.cuda.is_available() else "cpu"
11
- ##realtime_pipe = StableDiffusionPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)
12
-
13
- # Load the model (optimized for inference)#
14
- #model_id = "radames/Real-Time-Latent-Consistency-Model"
15
-
16
- # model_id = "stabilityai/sd-turbo"
17
- # AutoPipelineForImage2Image.from_pretrained(base_model)
18
- #
19
- # tokenizer = AutoTokenizer.from_pretrained(model_id)
20
- #
21
- # realtime_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
22
- # realtime_pipe.to("cuda") # Use GPU for faster inference
23
-
24
-
25
  #
26
  #
27
  # def predict(prompt, frame):
 
6
  from transformers import AutoProcessor, AutoModel, AutoTokenizer
7
  from PIL import Image
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  #
10
  #
11
  # def predict(prompt, frame):