A23_CLIP / app.py
mrrahul011's picture
Update app.py
c25fdc0 verified
raw
history blame
1.43 kB
import assignment23
from assignment23 import make_train_valid_dfs
from assignment23 import get_image_embeddings
from assignment23 import inference_CLIP
import gradio as gr
import zipfile
import os
import pandas as pd
import subprocess
image_path = "./Images"
captions_path = "."
data_source = 'flickr8k.zip'
with zipfile.ZipFile(data_source, 'r') as zip_ref:
zip_ref.extractall('.')
cmd = "pwd"
output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "ls -l"
output1 = subprocess.check_output(cmd, shell=True).decode("utf-8")
df = pd.read_csv("captions.txt")
df['id'] = [id_ for id_ in range(df.shape[0] // 5) for _ in range(5)]
df.to_csv("captions.csv", index=False)
df = pd.read_csv("captions.csv")
_, valid_df = make_train_valid_dfs()
model, image_embeddings = get_image_embeddings(valid_df, "best.pt")
examples = ["man and women on road"]
def greet(query_text):
print("Going to invoke inference_CLIP")
return inference_CLIP(query_text)
gallery = gr.Gallery(
label="CLIP result images", show_label=True, elem_id="gallery",
columns=[3], rows=[3], object_fit="contain", height="auto")
demo = gr.Interface(fn=greet,
inputs=gr.Dropdown(choices=examples, label="Search Image by text prompt"),
outputs=gallery,
title="OpenAI CLIP-Contrastive Language-Image Pre-training")
demo.launch("debug")