How to construct a network such that the input is a coordinate on a cross-section circle and the output is the sum of the squares of that coordinate.inputs=[x,y,1],outputs=[square(x)+square(y)]
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
import torch.optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
features = torch.tensor([[8.3572,-11.3008,1],[6.2795,-12.5886,1],[4.0056,-13.4958,1]
,[1.6219,-13.9933,1],[-0.8157,-14.0706,1],[-3.2280,-13.7250,1]
,[-5.5392,-12.9598,1],[-7.6952,-11.8073,1],[-9.6076,-10.3035,1],
[-11.2532,-8.4668,1],[-12.5568,-6.3425,1],[-13.4558,-4.0691,1],
[-13.9484,-1.7293,1],[-14.0218,0.7224,1],[-13.6791,3.1211,1],
[-12.9064,5.4561,1],[-11.7489,7.6081,1],[-10.2251,9.5447,1],
[5.4804,12.8044,1],[7.6332,11.6543,1],[9.5543,10.1454,1],
[11.1890,8.3117,1],[12.4705,6.2460,1],[13.3815,3.9556,1],
[13.8733,1.5884,1],[13.9509,-0.8663,1],[13.6014,-3.2793,1],
[12.8572,-5.5526,1],[11.7042,-7.7191,1],[10.1761,-9.6745,1],
[-8.4301,11.1605,1],[-6.3228,12.4433,1],[-4.0701,13.3401,1],
[-1.6816,13.8352,1],[0.7599,13.9117,1],[3.1672,13.5653,1]]).to(device)
labels = []
for i in range(features.shape[0]):
label=(features[i][0])**2+(features[i][1])**2
labels.append(label)
labels = torch.tensor(labels).to(device)
num_input ,num_hidden,num_output = 3,64,1
net = nn.Sequential(
nn.Linear(num_input,num_hidden),
nn.Linear(num_hidden,num_output)
).to(device)
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_normal_(m.weight)
net.apply(init_weights)
def init_bias(m):
if type(m) == nn.Linear:
nn.init.xavier_normal_(m.bias)
loss = nn.MSELoss()
num_epochs = 10
batch_size = 6
lr=0.001
trainer = torch.optim.RAdam(net.parameters(),lr=lr)
dataset = TensorDataset(features,labels)
data_loader = DataLoader(dataset,batch_size=batch_size,shuffle=True)
for i in range (num_epochs):
for X,y in data_loader:
y_hat = net(X)
l = loss(y_hat,y.reshape(y_hat.shape))
trainer.zero_grad()
l.backward()
trainer.step()
with torch.no_grad():
print(l.item())
I made a network with one hidden layer, but it has very high losses and is almost impossible to fit.
To improve the convergence of your code, you can do these suggestions:
mean = features[:,:2].mean(dim=0)
std = features[:,:2].std(dim=0)
features[:,:2] = (features[:,:2] - mean) / std
num_epochs
:num_epochs = 100
batch_size
:batch_size = 2
I run your code with the above changes, and it converges.