sulpha commited on
Commit
578504d
·
1 Parent(s): b0a12a5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -0
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ===========================================================================
3
+ Gradio Demo to Plot Ridge coefficients as a function of the regularization
4
+ ===========================================================================
5
+
6
+ Shows the effect of collinearity in the coefficients of an estimator.
7
+
8
+ .. currentmodule:: sklearn.linear_model
9
+
10
+ :class:`Ridge` Regression is the estimator used in this example.
11
+ Each color represents a different feature of the
12
+ coefficient vector, and this is displayed as a function of the
13
+ regularization parameter.
14
+
15
+ This example also shows the usefulness of applying Ridge regression
16
+ to highly ill-conditioned matrices. For such matrices, a slight
17
+ change in the target variable can cause huge variances in the
18
+ calculated weights. In such cases, it is useful to set a certain
19
+ regularization (alpha) to reduce this variation (noise).
20
+
21
+ When alpha is very large, the regularization effect dominates the
22
+ squared loss function and the coefficients tend to zero.
23
+ At the end of the path, as alpha tends toward zero
24
+ and the solution tends towards the ordinary least squares, coefficients
25
+ exhibit big oscillations. In practise it is necessary to tune alpha
26
+ in such a way that a balance is maintained between both.
27
+ """
28
+
29
+ # Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
30
+ # License: BSD 3 clause
31
+
32
+ import numpy as np
33
+ import matplotlib.pyplot as plt
34
+ from sklearn import linear_model
35
+ import gradio as gr
36
+
37
+ def make_plot(size_X,min_alpha,max_alpha):
38
+ # X is the 10x10 Hilbert matrix
39
+ X = 1.0 / (np.arange(1, size_X+1) + np.arange(0, size_X)[:, np.newaxis])
40
+ y = np.ones(size_X)
41
+
42
+ # %%
43
+ # Compute paths
44
+ # -------------
45
+
46
+ fig = plt.figure()
47
+ n_alphas = 200
48
+ alphas = np.logspace(min_alpha, max_alpha, n_alphas)
49
+
50
+ coefs = []
51
+ for a in alphas:
52
+ ridge = linear_model.Ridge(alpha=a, fit_intercept=False)
53
+ ridge.fit(X, y)
54
+ coefs.append(ridge.coef_)
55
+
56
+ # %%
57
+ # Display results
58
+ # ---------------
59
+
60
+ ax = plt.gca()
61
+
62
+ ax.plot(alphas, coefs)
63
+ ax.set_xscale("log")
64
+ ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
65
+ plt.xlabel("alpha")
66
+ plt.ylabel("weights")
67
+ plt.title("Ridge coefficients as a function of the regularization")
68
+ plt.axis("tight")
69
+ return fig
70
+
71
+ title='Plot Ridge coefficients as a function of the regularization'
72
+
73
+ model_card=f"""
74
+ ## Description
75
+ Shows the effect of collinearity in the coefficients of an estimator.
76
+
77
+ This example also shows the usefulness of applying Ridge regression to highly ill-conditioned matrices.
78
+ For such matrices, a slight change in the target variable can cause huge variances in the calculated weights. In such cases, it is useful to set a certain regularization (alpha) to reduce this variation (noise).
79
+
80
+ When alpha is very large, the regularization effect dominates the squared loss function and the coefficients tend to zero. At the end of the path, as alpha tends toward zero and the solution tends towards the ordinary least squares, coefficients exhibit big oscillations. In practise it is necessary to tune alpha in such a way that a balance is maintained between both.
81
+
82
+ ## Model
83
+ currentmodule: sklearn.linear_model
84
+
85
+ class:`Ridge` Regression is the estimator used in this example.
86
+ Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter.
87
+
88
+ """
89
+
90
+ with gr.Blocks(title=title) as demo:
91
+ gr.Markdown('''
92
+ <div>
93
+ <h1 style='text-align: center'>Plot Ridge coefficients as a function of the regularization</h1>
94
+ </div>
95
+ ''')
96
+ gr.Markdown(model_card)
97
+ gr.Markdown("Author: <a href=\"https://huggingface.co/sulpha\">sulpha</a>")
98
+ d0 = gr.Slider(1,101,value=10,step=10,label='Select Size of Training Set')
99
+ with gr.Column():
100
+ with gr.Tab('Select Alpha Range'):
101
+ d1 = gr.Slider(-20,20,value=-10,step=1,label='')
102
+ d2 = gr.Slider(-20,20,value=-2,step=1,label='')
103
+
104
+ btn = gr.Button(value = 'Submit')
105
+
106
+ btn.click(make_plot,inputs=[d0,d1,d2],outputs=[gr.Plot()])
107
+
108
+ demo.launch()
109
+