durrani commited on
Commit
4437549
1 Parent(s): 61769f0
Files changed (1) hide show
  1. app.py +62 -12
app.py CHANGED
@@ -1,12 +1,62 @@
1
- import numpy as np
2
- import gradio as gr
3
- #function to predict the input hours
4
- def predict_score(hours):
5
- #hours = np.array(hours)
6
- pred_score = -0.5738734424645411 + 2.1659122905141825*hours
7
- return pred_score #np.round(pred_score[0], 2)
8
- input = gr.inputs.Number(label='Number of Hours studied')
9
- output = gr.outputs.Textbox(label='Predicted Score')
10
- gr.Interface( fn=predict_score,
11
- inputs=input,
12
- outputs=output).launch();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # Input data
4
+ x1 = torch.tensor([50, 60, 70, 80, 90])
5
+ x2 = torch.tensor([20, 21, 22, 23, 24])
6
+ y_actual = torch.tensor([30, 35, 40, 45, 50])
7
+
8
+ # Learning rate and maximum number of iterations
9
+ alpha = 0.01
10
+ max_iters = 1000
11
+
12
+ # Initial values for Theta0, Theta1, and Theta2
13
+ Theta0 = torch.tensor(0.0, requires_grad=True)
14
+ Theta1 = torch.tensor(0.0, requires_grad=True)
15
+ Theta2 = torch.tensor(0.0, requires_grad=True)
16
+
17
+ # Start the iteration counter
18
+ iter_count = 0
19
+
20
+ # Loop until convergence or maximum number of iterations
21
+ while iter_count < max_iters:
22
+ # Compute the predicted output
23
+ y_pred = Theta0 + Theta1 * x1 + Theta2 * x2
24
+
25
+ # Compute the errors
26
+ errors = y_pred - y_actual
27
+
28
+ # Compute the cost function
29
+ cost = torch.sum(errors ** 2) / (2 * len(x1))
30
+
31
+ # Print the cost function every 100 iterations
32
+ if iter_count % 100 == 0:
33
+ print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(),
34
+ Theta2.item()))
35
+
36
+ # Check for convergence (if the cost is decreasing by less than 0.0001)
37
+ if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
38
+ print("Converged after {} iterations".format(iter_count))
39
+ break
40
+
41
+ # Perform automatic differentiation to compute gradients
42
+ cost.backward()
43
+
44
+ # Update Theta0, Theta1, and Theta2 using gradient descent
45
+ with torch.no_grad():
46
+ Theta0 -= alpha * Theta0.grad
47
+ Theta1 -= alpha * Theta1.grad
48
+ Theta2 -= alpha * Theta2.grad
49
+
50
+ # Reset gradients for the next iteration
51
+ Theta0.grad.zero_()
52
+ Theta1.grad.zero_()
53
+ Theta2.grad.zero_()
54
+
55
+ # Update the iteration counter and previous cost
56
+ iter_count += 1
57
+ prev_cost = cost
58
+
59
+ # Print the final values of Theta0, Theta1, and Theta2
60
+ print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
61
+ print("Final Cost: Cost = {}".format(cost.item()))
62
+ print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))