import numpy as np import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import gradio as gr from sklearn import datasets from sklearn import linear_model from sklearn.svm import l1_min_c def train_it(solver, intersect_scaling, tol, max_iter): iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 2] y = y[y != 2] X /= X.max() cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16) clf = linear_model.LogisticRegression( penalty="l1", solver=solver, tol=tol, max_iter=int(max_iter), warm_start=True, intercept_scaling=intersect_scaling, ) coefs_ = [] for c in cs: clf.set_params(C=c) clf.fit(X, y) coefs_.append(clf.coef_.ravel().copy()) coefs_ = np.array(coefs_) plt.plot(np.log10(cs), coefs_, marker="o") ymin, ymax = plt.ylim() plt.xlabel("log(C)") plt.ylabel("Coefficients") plt.title("Logistic Regression Path") plt.axis("tight") return plt with gr.Blocks() as demo: gr.Markdown("# Regularization path of L1- Logistic Regression") gr.Markdown( """ This interactive demo is based on the [Regularization path of L1- Logistic Regression](https://scikit-learn.org/stable/auto_examples/linear_model/plot_logistic_path.html).This demonstrates how to perform l1-penalized logistic regression on a binary classification problem derived from the Iris dataset. The regularization path plots the progression of the coefficients from exactly 0 to non-zero values as the regularization becomes progressively looser. """ ) gr.Markdown( """ `solver`: This parameter determines the algorithm to use in the optimization problem. In the code, users can choose between "liblinear" and "saga". "liblinear" is suitable for small datasets, while "saga" is faster for large datasets. `Intersect Scaling`: This parameter is a scaling factor for the intercept. It determines the impact of the intercept on the regularization path. Higher values of `intersect_scaling` give more importance to the intercept in the regularization process. `Tolerance`: The `Tolerance` parameter represents the tolerance for stopping criteria. It determines the precision of the optimization algorithm. A smaller value of `tol` leads to a more accurate solution, but it may increase the training time. `Maximum Iterations`: The `Maximum Iterations` parameter specifies the maximum number of iterations for the optimization algorithm. It limits the number of iterations the algorithm performs to find the optimal solution. Setting a higher value may allow the algorithm to converge to a better solution but may increase the training time. By adjusting these parameters interactively in the Gradio interface, users can explore how they influence the regularization path of the L1-penalized logistic regression model. """ ) with gr.Row(): with gr.Column(): solver = gr.Radio(["liblinear", "saga"], label="Solver", value="liblinear") intersect_scaling = gr.Slider( value=10000.0, minimum=0, maximum=100000, step=0.1, label="Intersect Scaling", ) tol = gr.Slider( value=1e-5, minimum=0, maximum=1, step=0.01, label="Tolerance" ) max_iter = gr.Slider( value=1e6, minimum=0, maximum=1000000, step=0.1, label="Maximum Iterations", ) train_buttion = gr.Button(label="Train") train_buttion.click( train_it, inputs=[solver, intersect_scaling, tol, max_iter], outputs=gr.Plot() ) demo.launch()