File size: 2,738 Bytes
4437549
b1c769d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4437549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1c769d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch
import numpy as np
import gradio as gr

# Function to predict the input hours
def predict_score(x1, x2):
    Theta0 = torch.tensor(-0.5738734424645411)
    Theta1 = torch.tensor(2.1659122905141825)
    Theta2 = torch.tensor(0.0)
    pred_score = Theta0 + Theta1 * x1 + Theta2 * x2
    return pred_score.item()

input1 = gr.inputs.Number(label="Number of new students")
input2 = gr.inputs.Number(label="Number of temperature")

output = gr.outputs.Textbox(label='Predicted Score')

# Gradio interface for the prediction function
gr.Interface(fn=predict_score, inputs=[input1, input2], outputs=output).launch()

# Input data
x1 = torch.tensor([50, 60, 70, 80, 90])
x2 = torch.tensor([20, 21, 22, 23, 24])
y_actual = torch.tensor([30, 35, 40, 45, 50])

# Learning rate and maximum number of iterations
alpha = 0.01
max_iters = 1000

# Initial values for Theta0, Theta1, and Theta2
Theta0 = torch.tensor(0.0, requires_grad=True)
Theta1 = torch.tensor(0.0, requires_grad=True)
Theta2 = torch.tensor(0.0, requires_grad=True)

# Start the iteration counter
iter_count = 0

# Loop until convergence or maximum number of iterations
while iter_count < max_iters:
    # Compute the predicted output
    y_pred = Theta0 + Theta1 * x1 + Theta2 * x2

    # Compute the errors
    errors = y_pred - y_actual

    # Compute the cost function
    cost = torch.sum(errors ** 2) / (2 * len(x1))

    # Print the cost function every 100 iterations
    if iter_count % 100 == 0:
        print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(),
                                                                                      Theta2.item()))

    # Check for convergence (if the cost is decreasing by less than 0.0001)
    if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
        print("Converged after {} iterations".format(iter_count))
        break

    # Perform automatic differentiation to compute gradients
    cost.backward()

    # Update Theta0, Theta1, and Theta2 using gradient descent
    with torch.no_grad():
        Theta0 -= alpha * Theta0.grad
        Theta1 -= alpha * Theta1.grad
        Theta2 -= alpha * Theta2.grad

        # Reset gradients for the next iteration
        Theta0.grad.zero_()
        Theta1.grad.zero_()
        Theta2.grad.zero_()

    # Update the iteration counter and previous cost
    iter_count += 1
    prev_cost = cost

# Print the final values of Theta0, Theta1, and Theta2
print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
print("Final Cost: Cost = {}".format(cost.item()))
print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))