File size: 2,653 Bytes
00c6f17
 
 
 
 
 
 
 
 
 
427de73
00c6f17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cae39e9
00c6f17
 
 
 
 
 
 
 
 
8a8d114
00c6f17
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import joblib
import numpy as np
import pandas as pd
from huggingface_hub import hf_hub_download
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder

# Load the trained model and scaler objects from file


REPO_ID = "Hemg/marketpredict" # hugging face  repo ID
MoDEL_FILENAME = "stx.joblib" # model file name
SCALER_FILENAME ="scaler.joblib" # scaler file name

model = joblib.load(hf_hub_download(repo_id=REPO_ID, filename=MoDEL_FILENAME))

scaler = joblib.load(hf_hub_download(repo_id=REPO_ID, filename=SCALER_FILENAME))
 
def encode_categorical_columns(df):
    label_encoder = LabelEncoder()
    ordinal_columns = df.select_dtypes(include=['object']).columns

    for col in ordinal_columns:
        df[col] = label_encoder.fit_transform(df[col])

    nominal_columns = df.select_dtypes(include=['object']).columns.difference(ordinal_columns)
    df = pd.get_dummies(df, columns=nominal_columns, drop_first=True)

    return df

# Define the prediction function
def predict_performance(Year,Instagram_Advertising,Facebook_Advertising,Event_Expenses,Internet_Expenses):
    # Prepare input data represents independent variables for house prediction
    input_data = [[Year,Instagram_Advertising,Facebook_Advertising,Event_Expenses,Internet_Expenses]]

    # Get the feature names from the Gradio interface inputs
    feature_names = ["Year","Instagram_Advertising","Facebook_Advertising","Event_Expenses","Internet_Expenses"]
    # Create a Pandas DataFrame with the input data and feature names
    input_df = pd.DataFrame(input_data, columns=feature_names)

    input_df = pd.DataFrame(input_data, columns=feature_names)

  
    df = encode_categorical_columns(input_df)

    # Scale the input data using the loaded scaler
    scaled_input = scaler.transform(df)

    # Make predictions using the loaded model
    prediction = model.predict(scaled_input)[0]
    
    return f"No of Forecast Admitted students: {prediction:,.2f}" 

# Create the Gradio app
iface = gr.Interface(
    fn=predict_performance,
    inputs=[
        gr.Slider(minimum=2024, maximum=2025, step=1, label="Year"),
        gr.Slider(minimum=10000, maximum=50000, step=500, label="Instagram_Advertising"),
        gr.Slider(minimum=10000, maximum=50000, step=500, label="Facebook_Advertising"),
        gr.Slider(minimum=20000, maximum=50000,step=500,label="Event_Expenses"),
        gr.Slider(minimum=5000, maximum=25000,step=500,label="Internet_EXpenses")
    ],
    outputs="text",
    title="Student_Admitted_Forecast",
    description="Student_Admitted"
)

# Run the app
if __name__ == "__main__":
    iface.launch(share=True)