|
import spaces
|
|
import gradio as gr
|
|
import platform
|
|
import os;
|
|
import socket;
|
|
import torch
|
|
|
|
device = "cuda";
|
|
|
|
if not torch.cuda.is_available() and device == "cuda":
|
|
raise RuntimeError("CUDA device unavailable, please use Dockerfile.cpu instead.")
|
|
|
|
print("DEVICE: ", device);
|
|
|
|
RandomTensor = torch.randn(1, 2)
|
|
RandomTensor.to(device)
|
|
|
|
def sysinfo(device):
|
|
tensorExample = RandomTensor.to(device)
|
|
|
|
tocpu = tensorExample.cpu().squeeze().half().tolist()
|
|
|
|
return f"""
|
|
hostname: {platform.node()} {socket.gethostname()}
|
|
device: {device}
|
|
tensor: {tensorExample}
|
|
toCpu: {tocpu}
|
|
""";
|
|
|
|
@spaces.GPU
|
|
def gpu():
|
|
|
|
return sysinfo("cuda:0");
|
|
|
|
|
|
def nogpu():
|
|
|
|
return sysinfo("cpu");
|
|
|
|
|
|
|
|
with gr.Blocks() as demo:
|
|
outgpu = gr.Textbox(lines=5);
|
|
outnpu = gr.Textbox(lines=5);
|
|
btngpu = gr.Button(value="gpu");
|
|
btngpun = gr.Button(value="ngpu");
|
|
|
|
btngpu.click(gpu, None, [outgpu]);
|
|
btngpun.click(nogpu, None, [outnpu]);
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
demo.launch(
|
|
share=False,
|
|
debug=False,
|
|
server_port=7860,
|
|
server_name="0.0.0.0"
|
|
)
|
|
|