Doğukan Tuna
commited on
Commit
·
ad09f1c
1
Parent(s):
f373725
add: launch appliation
Browse files- .DS_Store +0 -0
- QKTCC_simPennylane-26032022174332.pth +3 -0
- README.md +5 -5
- app.py +111 -0
- requirements.txt +7 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
QKTCC_simPennylane-26032022174332.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35f5b1f9bdf4513efe780a87620c18df40824c8c33d1c3c0502be9cf075a63e3
|
3 |
+
size 44791559
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 2.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
1 |
---
|
2 |
+
title: QTL
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 2.8.14
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
import gradio as gr
|
4 |
+
import torch.nn as nn
|
5 |
+
import pennylane as qml
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
from pennylane import numpy as np
|
9 |
+
from torchvision import transforms
|
10 |
+
|
11 |
+
qubits = 4
|
12 |
+
batch_size = 8
|
13 |
+
depth = 6
|
14 |
+
delta = 0.01
|
15 |
+
|
16 |
+
is_cuda_available = torch.cuda.is_available()
|
17 |
+
device = torch.device("cuda:0" if is_cuda_available else "cpu")
|
18 |
+
|
19 |
+
if is_cuda_available:
|
20 |
+
print ("CUDA is available, selected:", device)
|
21 |
+
else:
|
22 |
+
print ("CUDA not available, selected:", device)
|
23 |
+
|
24 |
+
dev = qml.device("default.qubit", wires=qubits)
|
25 |
+
|
26 |
+
def H_layer(nqubits):
|
27 |
+
for idx in range(nqubits):
|
28 |
+
qml.Hadamard(wires=idx)
|
29 |
+
|
30 |
+
def RY_layer(w):
|
31 |
+
for idx, element in enumerate(w):
|
32 |
+
qml.RY(element, wires=idx)
|
33 |
+
|
34 |
+
def entangling_layer(nqubits):
|
35 |
+
for i in range(0, nqubits - 1, 2):
|
36 |
+
qml.CNOT(wires=[i, i + 1])
|
37 |
+
for i in range(1, nqubits - 1, 2):
|
38 |
+
qml.CNOT(wires=[i, i + 1])
|
39 |
+
|
40 |
+
@qml.qnode(dev, interface="torch")
|
41 |
+
def quantum_net(q_input_features, q_weights_flat):
|
42 |
+
q_weights = q_weights_flat.reshape(depth, qubits)
|
43 |
+
H_layer(qubits)
|
44 |
+
RY_layer(q_input_features)
|
45 |
+
|
46 |
+
for k in range(depth):
|
47 |
+
entangling_layer(qubits)
|
48 |
+
RY_layer(q_weights[k])
|
49 |
+
|
50 |
+
exp_vals = [qml.expval(qml.PauliZ(position)) for position in range(qubits)]
|
51 |
+
return tuple(exp_vals)
|
52 |
+
|
53 |
+
class QuantumNet(nn.Module):
|
54 |
+
def __init__(self):
|
55 |
+
super().__init__()
|
56 |
+
self.pre_net = nn.Linear(512, qubits)
|
57 |
+
self.q_params = nn.Parameter(delta * torch.randn(depth * qubits))
|
58 |
+
self.post_net = nn.Linear(qubits, 2)
|
59 |
+
|
60 |
+
def forward(self, input_features):
|
61 |
+
pre_out = self.pre_net(input_features)
|
62 |
+
q_in = torch.tanh(pre_out) * np.pi / 2.0
|
63 |
+
q_out = torch.Tensor(0, qubits)
|
64 |
+
q_out = q_out.to(device)
|
65 |
+
for elem in q_in:
|
66 |
+
q_out_elem = quantum_net(elem, self.q_params).float().unsqueeze(0)
|
67 |
+
q_out = torch.cat((q_out, q_out_elem))
|
68 |
+
return self.post_net(q_out)
|
69 |
+
|
70 |
+
def classify(image):
|
71 |
+
mhModel = torch.load("QKTCC_simPennylane-26032022174332.pth", map_location=device)
|
72 |
+
mMModel = torchvision.models.resnet18(pretrained=True)
|
73 |
+
for param in mMModel.parameters():
|
74 |
+
param.requires_grad = False
|
75 |
+
mMModel.fc = QuantumNet()
|
76 |
+
mMModel = mMModel.to(device)
|
77 |
+
qModel = mMModel
|
78 |
+
qModel.load_state_dict(mhModel)
|
79 |
+
|
80 |
+
from PIL import Image
|
81 |
+
|
82 |
+
data_transforms = transforms.Compose([
|
83 |
+
transforms.Resize(256),
|
84 |
+
transforms.CenterCrop(224),
|
85 |
+
transforms.ToTensor(),
|
86 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
87 |
+
])
|
88 |
+
|
89 |
+
PIL_img = image
|
90 |
+
img = data_transforms(PIL_img)
|
91 |
+
img_input = img.unsqueeze(0)
|
92 |
+
|
93 |
+
qModel.eval()
|
94 |
+
with torch.no_grad():
|
95 |
+
outputs = qModel(img_input)
|
96 |
+
base_labels = (("mask", outputs[0, 0]), ("no_mask", outputs[0, 1]))
|
97 |
+
expvals, preds = torch.max(outputs, 1)
|
98 |
+
expvals_min, preds_min = torch.min(outputs, 1)
|
99 |
+
if expvals == base_labels[0][1]:
|
100 |
+
labels = base_labels[0][0]
|
101 |
+
else:
|
102 |
+
labels = base_labels[1][0]
|
103 |
+
outp = "Classified with output: " + labels + ", Tensor: " + str(expvals) + " (" + str(expvals_min) + ")"
|
104 |
+
return outp
|
105 |
+
|
106 |
+
out = gr.outputs.Label(label='Result: ',type='auto')
|
107 |
+
iface = gr.Interface(classify, gr.inputs.Image(type="pil"), outputs=out,
|
108 |
+
title="Quantum Layered TL RN-18 Face Mask Detector",
|
109 |
+
description="🤗 This proof-of-concept quantum machine learning model takes a face image input and detects a face that has a mask or no mask: ")
|
110 |
+
|
111 |
+
iface.launch(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
Pillow
|
3 |
+
gradio
|
4 |
+
pennylane
|
5 |
+
matplotlib
|
6 |
+
torchvision
|
7 |
+
huggingface_hub
|