File size: 1,418 Bytes
7197456 9c85732 622f8d4 3dbc4a6 622f8d4 3dbc4a6 aa239ce 9b6e7fb c0ce540 9b6e7fb 223299e 7b9d50b 622f8d4 f199171 9c85732 8c2b61b 223299e 622f8d4 f199171 9c85732 7197456 9c85732 c0ce540 7197456 367d07a 61f930c 223299e 7197456 b194cd1 7197456 3f8ee6d 7b99542 34619cb 7b99542 7197456 10ecedc 7197456 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import spaces
import gradio as gr
import platform
import os;
import socket;
import torch
device = "cuda";
if not torch.cuda.is_available() and device == "cuda":
raise RuntimeError("CUDA device unavailable, please use Dockerfile.cpu instead.")
RandomTensor = torch.randn(1, 2) # Example audio tensor
print("Putting Tensor in device ...", device);
RandomTensor.to(device)
def sysinfo(newdev = device):
currentDevice = RandomTensor.get_device();
tensorExample = RandomTensor.to(newdev).norm(p=2, dim=1, keepdim=True).unsqueeze(-1).to(newdev)
tocpu = tensorExample.cpu().squeeze().half().tolist()
return f"""
hostname: {platform.node()} {socket.gethostname()}
dev cur: {currentDevice}
dev ini: {device}
dev new: {newdev}
tensor: {tensorExample}
toCpu: {tocpu}
""";
@spaces.GPU
def gpu():
return sysinfo();
def nogpu():
return sysinfo("cpu");
with gr.Blocks() as demo:
outgpu = gr.Textbox(lines=5);
outnpu = gr.Textbox(lines=5);
btngpu = gr.Button(value="gpu");
btngpun = gr.Button(value="ngpu");
btngpu.click(gpu, None, [outgpu]);
btngpun.click(nogpu, None, [outnpu]);
if __name__ == "__main__":
demo.launch(
share=False,
debug=False,
server_port=7860,
server_name="0.0.0.0"
)
|