I think windows update broke it.

#4
by Zuluknob - opened

It was working fine via pinokio but now it just errors after a few seconds with "ERROR: Exception in ASGI application" in the terminal
I cleared the cache in pinokio which re-downloads dependencies and reinstalled hallo, same result.
verbose...

Microsoft Windows [Version 10.0.22621.3880]
(c) Microsoft Corporation. All rights reserved.

C:\Users***\pinokio\api\hallo.git\app>conda_hook && conda deactivate && conda deactivate && conda deactivate && conda activate base && C:\Users***\pinokio\api\hallo.git\app\env\Scripts\activate C:\Users***\pinokio\api\hallo.git\app\env && python scripts/app.py
A matching Triton is not available, some optimizations will not be enabled
Traceback (most recent call last):
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\xformers_init_.py", line 57, in _is_triton_available
import triton # noqa
ModuleNotFoundError: No module named 'triton'
Running on local URL: http://127.0.0.1:7860

[Start proxy] Local Sharing http://127.0.0.1:7860
Proxy Started {"target":"http://127.0.0.1:7860","proxy":"http://10.25.188.186:8001"}
INFO:httpx:HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 "
To create a public link, set share=True in launch().up-events "HTTP/1.1 200 OK"
INFO:httpx:HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK"
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\uvicorn\protocols\http\httptools_impl.py", line 399, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\uvicorn\middleware\proxy_headers.py", line 70, in call
return await self.app(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\fastapi\applications.py", line 1054, in call
await super().call(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\applications.py", line 123, in call
await self.middleware_stack(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\middleware\errors.py", line 186, in call
raise exc
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\middleware\errors.py", line 164, in call
await self.app(scope, receive, _send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\route_utils.py", line 714, in call
await self.app(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\middleware\exceptions.py", line 65, in call
await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 64, in wrapped_app
raise exc
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\routing.py", line 756, in call
await self.middleware_stack(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\routing.py", line 776, in app
await route.handle(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\routing.py", line 297, in handle
await self.app(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\routing.py", line 77, in app
await wrap_app_handling_exceptions(app, request)(scope, receive, send)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 64, in wrapped_app
raise exc
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "C:\Users**\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\routing.py", line 75, in app
await response(scope, receive, send)
File "C:\Users**\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\responses.py", line 352, in call
await send(
File "C:\Users**\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 50, in sender
await send(message)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette_exception_handler.py", line 50, in sender
await send(message)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\starlette\middleware\errors.py", line 161, in _send
await send(message)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\uvicorn\protocols\http\httptools_impl.py", line 526, in send
raise RuntimeError("Response content longer than Content-Length")
RuntimeError: Response content longer than Content-Length
WARNING:py.warnings:C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\onnxruntime\capi\onnxruntime_inference_collection.py:69: UserWarning: Specified provider 'CUDAExecutionProvider' is not in available provider names.Available providers: 'AzureExecutionProvider, CPUExecutionProvider'
warnings.warn(

Applied providers: ['CPUExecutionProvider'], with options: {'CPUExecutionProvider': {}}
find model: ./pretrained_models/face_analysis\models\1k3d68.onnx landmark_3d_68 ['None', 3, 192, 192] 0.0 1.0
Applied providers: ['CPUExecutionProvider'], with options: {'CPUExecutionProvider': {}}
find model: ./pretrained_models/face_analysis\models\2d106det.onnx landmark_2d_106 ['None', 3, 192, 192] 0.0 1.0
Applied providers: ['CPUExecutionProvider'], with options: {'CPUExecutionProvider': {}}
find model: ./pretrained_models/face_analysis\models\genderage.onnx genderage ['None', 3, 96, 96] 0.0 1.0
Applied providers: ['CPUExecutionProvider'], with options: {'CPUExecutionProvider': {}}
find model: ./pretrained_models/face_analysis\models\glintr100.onnx recognition ['None', 3, 112, 112] 127.5 127.5
Applied providers: ['CPUExecutionProvider'], with options: {'CPUExecutionProvider': {}}
find model: ./pretrained_models/face_analysis\models\scrfd_10g_bnkps.onnx detection [1, 3, '?', '?'] 127.5 128.0
set det-size: (640, 640)
Traceback (most recent call last):
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\queueing.py", line 541, in process_events
response = await route_utils.call_process_api(
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\route_utils.py", line 276, in call_process_api
output = await app.get_blocks().process_api(
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\blocks.py", line 1928, in process_api
result = await self.call_function(
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\blocks.py", line 1514, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\anyio_backends_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "C:\Users**\pinokio\api\hallo.git\app\env\lib\site-packages\anyio_backends_asyncio.py", line 859, in run
result = context.run(func, *args)
File "C:\Users***\pinokio\api\hallo.git\app\env\lib\site-packages\gradio\utils.py", line 833, in wrapper
response = f(*args, **kwargs)
File "C:\Users***\pinokio\api\hallo.git\app\scripts\app.py", line 47, in predict
return inference_process(args)
File "C:\Users***\pinokio\api\hallo.git\app\scripts\inference.py", line 162, in inference_process
source_image_lip_mask = image_processor.preprocess(
File "C:\Users***\pinokio\api\hallo.git\app\hallo\datasets\image_processor.py", line 124, in preprocess
face = sorted(faces, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * (x["bbox"][3] - x["bbox"][1]))[-1]
IndexError: list index out of range

Same here. Worked once and now only gives this error. Reinstalling Hallo does nothing.

Sign up or log in to comment