def ba_activation(x, weights, a, epsilon): # Ensure x is a torch tensor x = torch.as_tensor(x, dtype=torch.float32) # Modulate inputs based on weights for the activation x = weights * x # Apply the Ba-inspired operation # Clamp and normalize x to stabilize the operation x_normalized = torch.clamp(x, -1, 1) fractional_inspired = torch.pow(torch.abs(x_normalized), x_normalized) activation_result = epsilon * torch.cos(np.pi * a * fractional_inspired * torch.log(torch.abs(fractional_inspired) + 1e-7)) # Apply an additional non-linearity to ensure the output is stable activation_result = torch.tanh(activation_result) return activation_result # Define a custom model using the Ba-inspired activation function class CustomModel(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(CustomModel, self).__init__() self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) self.weights = nn.Parameter(torch.randn(hidden_size)) self.a = 0.5 # Parameter for the Ba-inspired activation self.epsilon = 0.1 # Parameter for the Ba-inspired activation def forward(self, x): x = self.linear1(x) x = ba_activation(x, self.weights, self.a, self.epsilon) # Use Ba-inspired activation x = self.linear2(x) return x