alexfremont commited on
Commit
d3f8823
·
1 Parent(s): 9c984c7

testing parralelisme

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. main.py +65 -28
Dockerfile CHANGED
@@ -30,4 +30,4 @@ EXPOSE 7860
30
  # git clone $(cat /run/secrets/api_read)
31
 
32
  # Commande pour lancer l'application
33
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--workers", "2"]
 
30
  # git clone $(cat /run/secrets/api_read)
31
 
32
  # Commande pour lancer l'application
33
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py CHANGED
@@ -126,41 +126,78 @@ class BatchPredictRequest(BaseModel):
126
  modelName: str
127
 
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  @app.post("/batch_predict")
130
  async def batch_predict(request: BatchPredictRequest):
131
  model_name = request.modelName
132
- results = []
133
-
134
- # Verify if the model is loaded
135
  if model_name not in model_pipelines:
136
  raise HTTPException(status_code=404, detail="Model not found")
137
 
138
  model = model_pipelines[model_name]
139
 
140
- # Asynchronously process each image
141
- async with httpx.AsyncClient() as client:
142
- for image_url in request.imageUrls:
143
- try:
144
- response = await client.get(image_url)
145
- image = Image.open(BytesIO(response.content))
146
- except Exception as e:
147
- results.append({"imageUrl": image_url, "error": "Invalid image URL"})
148
- continue
149
-
150
- # Preprocess the image
151
- processed_image = process_image(image, size=image_size)
152
-
153
- # Convert to tensor
154
- image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
155
-
156
- # Perform inference
157
- with torch.no_grad():
158
- outputs = model(image_tensor)
159
- probabilities = torch.nn.functional.softmax(outputs, dim=1)
160
- predicted_probabilities = probabilities.numpy().tolist()
161
- confidence = round(predicted_probabilities[0][1], 2)
162
-
163
- results.append({"imageUrl": image_url, "confidence": confidence})
164
 
165
- # Return the results as JSON
166
  return JSONResponse(content={"results": results})
 
126
  modelName: str
127
 
128
 
129
+ # @app.post("/batch_predict")
130
+ # async def batch_predict(request: BatchPredictRequest):
131
+ # model_name = request.modelName
132
+ # results = []
133
+
134
+ # # Verify if the model is loaded
135
+ # if model_name not in model_pipelines:
136
+ # raise HTTPException(status_code=404, detail="Model not found")
137
+
138
+ # model = model_pipelines[model_name]
139
+
140
+ # # Asynchronously process each image
141
+ # async with httpx.AsyncClient() as client:
142
+ # for image_url in request.imageUrls:
143
+ # try:
144
+ # response = await client.get(image_url)
145
+ # image = Image.open(BytesIO(response.content))
146
+ # except Exception as e:
147
+ # results.append({"imageUrl": image_url, "error": "Invalid image URL"})
148
+ # continue
149
+
150
+ # # Preprocess the image
151
+ # processed_image = process_image(image, size=image_size)
152
+
153
+ # # Convert to tensor
154
+ # image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
155
+
156
+ # # Perform inference
157
+ # with torch.no_grad():
158
+ # outputs = model(image_tensor)
159
+ # probabilities = torch.nn.functional.softmax(outputs, dim=1)
160
+ # predicted_probabilities = probabilities.numpy().tolist()
161
+ # confidence = round(predicted_probabilities[0][1], 2)
162
+
163
+ # results.append({"imageUrl": image_url, "confidence": confidence})
164
+
165
+ # # Return the results as JSON
166
+ # return JSONResponse(content={"results": results})
167
+
168
+ from concurrent.futures import ProcessPoolExecutor
169
+
170
+
171
+ def process_single_image(image_url, model):
172
+ try:
173
+ response = requests.get(image_url)
174
+ image = Image.open(BytesIO(response.content))
175
+ processed_image = process_image(image, size=image_size)
176
+ image_tensor = transforms.ToTensor()(processed_image).unsqueeze(0)
177
+
178
+ with torch.no_grad():
179
+ outputs = model(image_tensor)
180
+ probabilities = torch.nn.functional.softmax(outputs, dim=1)
181
+ predicted_probabilities = probabilities.numpy().tolist()
182
+ confidence = round(predicted_probabilities[0][1], 2)
183
+ return {"imageUrl": image_url, "confidence": confidence}
184
+ except Exception as e:
185
+ return {"imageUrl": image_url, "error": str(e)}
186
+
187
+
188
  @app.post("/batch_predict")
189
  async def batch_predict(request: BatchPredictRequest):
190
  model_name = request.modelName
 
 
 
191
  if model_name not in model_pipelines:
192
  raise HTTPException(status_code=404, detail="Model not found")
193
 
194
  model = model_pipelines[model_name]
195
 
196
+ with ProcessPoolExecutor() as executor:
197
+ results = list(
198
+ executor.map(
199
+ lambda url: process_single_image(url, model), request.imageUrls
200
+ )
201
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
 
203
  return JSONResponse(content={"results": results})