Spaces:
Runtime error
Runtime error
File size: 8,595 Bytes
6364b8e 063d7d0 436d80d e452552 268e717 096bee8 436d80d 6364b8e 436d80d 759d503 53c3b30 a446974 9f5e757 e793dfb a446974 1637cc2 387ecb3 6364b8e 387ecb3 b69e293 6364b8e b69e293 436d80d bec3144 e452552 6364b8e 5c55b9f 9a7bb33 a446974 88cf8f1 c0cd7e7 c9eb8a2 c0cd7e7 1637cc2 c0cd7e7 387ecb3 8ac9fae b69e293 387ecb3 cafb60e 88cf8f1 387ecb3 88cf8f1 9a7bb33 a446974 88cf8f1 dc236e4 1637cc2 dc236e4 387ecb3 391de58 387ecb3 8ac9fae b69e293 387ecb3 cafb60e 88cf8f1 387ecb3 88cf8f1 9a7bb33 a446974 88cf8f1 6fc4472 dc236e4 1637cc2 dc236e4 9f5e757 8ac9fae 9f5e757 a446974 096bee8 a446974 539820e a446974 1637cc2 4eabc16 1637cc2 6364b8e a446974 f5d8deb 9a7bb33 a446974 c9eb8a2 bc2df81 9a7bb33 a446974 c9eb8a2 bc2df81 9a7bb33 a446974 c9eb8a2 9f5e757 a446974 b43095b c9bd707 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
from numpy import exp
import pandas as pd
from PIL import Image
import urllib.request
import uuid
uid=uuid.uuid4()
def softmax(vector):
e = exp(vector)
return e / e.sum()
models=[
"Nahrawy/AIorNot",
"umm-maybe/AI-image-detector",
"arnolfokam/ai-generated-image-detector",
"Binyamin/Hybrid_1",
"HuggingSara/model_soups",
"psyne/AIResnetClone",
]
fin_sum=[]
#fin_res={f'{uid}':''}
#fin_sum.append(fin_res)
#tmp_res=
def aiornot0(image):
labels = ["Real", "AI"]
mod=models[0]
feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
model0 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor0(image, return_tensors="pt")
with torch.no_grad():
outputs = model0(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
tmp_res={f'{uid}-0':results}
fin_sum.append(tmp_res)
return gr.HTML.update(html_out),results
def aiornot1(image):
labels = ["Real", "AI"]
mod=models[1]
feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
model1 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor1(image, return_tensors="pt")
with torch.no_grad():
outputs = model1(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
tmp_res={f'{uid}-1':results}
fin_sum.append(tmp_res)
return gr.HTML.update(html_out),results
def aiornot2(image):
labels = ["AI", "Real"]
mod=models[2]
feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
model2 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor2(image, return_tensors="pt")
with torch.no_grad():
outputs = model2(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[1][0]}<br>
AI: {px[0][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
tmp_res={f'{uid}-2':results}
fin_sum.append(tmp_res)
return gr.HTML.update(html_out),results
def aiornot3(image):
labels = ["Real", "AI"]
mod=models[3]
feature_extractor3 = AutoFeatureExtractor.from_pretrained(mod)
model3 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor3(image, return_tensors="pt")
with torch.no_grad():
outputs = model3(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
def aiornot4(image):
labels = ["Real", "AI"]
mod=models[4]
feature_extractor4 = AutoFeatureExtractor.from_pretrained(mod)
model4 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor4(image, return_tensors="pt")
with torch.no_grad():
outputs = model4(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[0][0]}<br>
AI: {px[1][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
def aiornot5(image):
labels = ["AI", "Real"]
mod=models[5]
feature_extractor5 = AutoFeatureExtractor.from_pretrained(mod)
model5 = AutoModelForImageClassification.from_pretrained(mod)
input = feature_extractor5(image, return_tensors="pt")
with torch.no_grad():
outputs = model5(**input)
logits = outputs.logits
probability = softmax(logits)
px = pd.DataFrame(probability.numpy())
prediction = logits.argmax(-1).item()
label = labels[prediction]
html_out = f"""
<h1>This image is likely: {label}</h1><br><h3>
Probabilites:<br>
Real: {px[1][0]}<br>
AI: {px[0][0]}"""
results = {}
for idx,result in enumerate(px):
results[labels[idx]] = px[idx][0]
#results[labels['label']] = result['score']
return gr.HTML.update(html_out),results
def load_url(url):
try:
urllib.request.urlretrieve(
f'{url}',
f"{uid}tmp_im.png")
image = Image.open(f"{uid}tmp_im.png")
mes = "Image Loaded"
except Exception as e:
image=None
mes=f"Image not Found<br>Error: {e}"
return image,mes
def tot_prob():
try:
fin_out = fin_sum[f'{uid}-0']['Real']+fin_sum[f'{uid}-1']['Real']+fin_sum[f'{uid}-2']['Real']
print (fin_out)
except Exception as e:
print (f'ERROR :: {e}')
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
in_url=gr.Textbox(label="Image URL")
with gr.Row():
load_btn=gr.Button("Load URL")
btn = gr.Button("Detect AI")
mes = gr.HTML("""""")
inp = gr.Pil()
with gr.Group():
with gr.Row():
with gr.Box():
lab0 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[0]}'>{models[0]}</a></b>""")
n_out0=gr.Label(label="Output")
outp0 = gr.HTML("""""")
with gr.Box():
lab1 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[1]}'>{models[1]}</a></b>""")
n_out1=gr.Label(label="Output")
outp1 = gr.HTML("""""")
with gr.Box():
lab2 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[2]}'>{models[2]}</a></b>""")
n_out2=gr.Label(label="Output")
outp2 = gr.HTML("""""")
with gr.Row():
with gr.Box():
lab3 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[3]}'>{models[3]}</a></b>""")
n_out3=gr.Label(label="Output")
outp3 = gr.HTML("""""")
with gr.Box():
lab4 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[4]}'>{models[4]}</a></b>""")
n_out4=gr.Label(label="Output")
outp4 = gr.HTML("""""")
with gr.Box():
lab5 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[5]}'>{models[5]}</a></b>""")
n_out5=gr.Label(label="Output")
outp5 = gr.HTML("""""")
load_btn.click(load_url,in_url,[inp,mes])
btn.click(aiornot0,[inp],[outp0,n_out0]).then(tot_prob,None,None)
btn.click(aiornot1,[inp],[outp1,n_out1]).then(tot_prob,None,None)
btn.click(aiornot2,[inp],[outp2,n_out2]).then(tot_prob,None,None)
#btn.click(aiornot3,[inp],[outp3,n_out3])
#btn.click(aiornot4,[inp],[outp4,n_out4])
#btn.click(aiornot5,[inp],[outp5,n_out5])
app.queue(concurrency_count=20).launch() |