### -------------------------------- ### ### libraries ### ### -------------------------------- ### import gradio as gr import numpy as np import os import tensorflow as tf ### -------------------------------- ### ### model loading ### ### -------------------------------- ### model = tf.keras.models.load_model('model.h5') ## --------------------------------- ### ### reading: categories.txt ### ### -------------------------------- ### labels = ['please upload categories.txt' for i in range(10)] # placeholder if os.path.isfile("categories.txt"): # open categories.txt in read mode categories = open("categories.txt", "r") labels = categories.readline().split() ## --------------------------------- ### ### page description ### ### -------------------------------- ### title = "Cast parts: Deffective or Okay?" description = "A Hugging Space demo created by datasith!" article = \ ''' #### Deffective or Okay? Demo app including a binary classification model for casted parts This is a test project to get familiar with Hugging Face! The space includes the necessary files for everything to run smoothly on HF's Spaces: - app.py - reader.py - requirements.txt - model.h5 (TensorFlow/Keras) - categories.txt - info.txt The data used to train the model is available as a [Kaggle dataset](https://www.kaggle.com/datasets/ravirajsinh45/real-life-industrial-dataset-of-casting-product). The space was inspired by @Isabel's wonderful [cat or pug](https://huggingface.co/spaces/isabel/pug-or-cat-image-classifier) one. If you enjoy my work feel free to follow me here on HF and/or on: - [GitHub](https://github.com/datasith) - [Kaggle](https://kaggle.com/datasith) - [Twitter](https://twitter.com/datasith) - [LinkedIn](https://linkedin.com/in/datasith) Either way, enjoy! ''' ### -------------------------------- ### ### interface creation ### ### -------------------------------- ### samples = ['defective.jpeg', 'okay.jpeg'] def preprocess(image): img_grayscale = image[:,:,1] img_array = tf.keras.utils.img_to_array(img_grayscale) img_array = tf.expand_dims(img_array, 0) # image = np.array(image) / 255 # image = np.expand_dims(image, axis=0) return img_array def predict_image(image): # pred = model.predict(preprocess(image)) # results = {} # for row in pred: # for idx, item in enumerate(row): # results[labels[idx]] = float(item) predictions = model.predict(preprocess(image)) scores = tf.nn.softmax(predictions[0]) results = {} for idx, res in enumerate(scores): results[labels[idx]] = float(res) return results # generate img input and text label output image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here") label = gr.outputs.Label(num_top_classes=len(labels)) # generate and launch interface interface = gr.Interface(fn=predict_image, inputs=image, outputs=label, article=article, theme='default', title=title, allow_flagging='never', description=description, examples=samples) interface.launch()