'list' object has no attribute 'to' in trainer function in torch to(self.device)
class Trainer:
def __init__(self, modelName,model,train_partial_dataset, val_dataset, loss_function, optimizer,path_save_best_model,device):
self.train_dataset = train_dataset
self.test_dataset = val_dataset
self.model = model
self.modelName=modelName
self.loss = loss_function
self.optimizer = optimizer
self.device = torch.device( device if torch.cuda.is_available() else 'cpu')
self.model.to( self.device )
print('using device: ',self.device)
def _train(self, train_dataset, num_epochs=1,clip=True ):
train_dataloader = DataLoader( self.train_dataset, batch_size= batch_size, shuffle= False, num_workers= num_dataset_workers)
val_dataloader = DataLoader( self.test_dataset, batch_size= batch_size, shuffle= False, num_workers= num_dataset_workers)
#test_dataloader = DataLoader( test_dataset, batch_size= batch_size, shuffle= False, num_workers= num_dataset_workers)
best_val_f1= -1
current_best = -1
best_res = None
torch.cuda.empty_cache()
start_time = time.time()
for epoch in range(num_epochs):
self.model.train()
train_losses=[]
print("epoch is ",epoch)
for batch_idx, batch in enumerate(train_dataloader):
#print(batch)
batch = { k : v.to(self.device) for k,v in batch.items() } // error statement list v has no function to
logits,_ = model(batch)
#resize=batch_dat_function[0].size(0)
cost = self.loss(logits, batch['targets'].float().to(self.device))
optimizer.zero_grad()
cost.backward()
if clip:
torch.nn.utils.clip_grad_norm(model.parameters(),0.5)
optimizer.step()
train_losses.append(cost.item())
print('train loss is ',np.average(train_losses))
with torch.set_grad_enabled(False):
matrix=compute_binary_accuracy(model, val_dataloader, DEVICE)
curr_val_f1=matrix["f1_score"]
curr_val_precision=matrix["precision"]
curr_val_recall=matrix["recall"]
if curr_val_f1>best_val_f1 :
best_val_f1=curr_val_f1
print("Saving Best Matrix",matrix)
counter=0
torch.save( self.model.state_dict(), path_save_best_model)
#dill.dump(self.model, open(BestModelDir+ "/"+modelName+"classifier.pkl","wb"))
SaveBestResult(matrix,ResultfilePath,self.modelName)
else :
print("F1-Score has not improved and it is : ",best_val_f1)
print('Time elapsed: {',(time.time() - start_time)/60 )
if __name__ == "__main__":
train_dataset = ReviewDataset(Clean_Train_Data, preprocessed= True, vocab= vocab)
test_dataset = ReviewDataset(Clean_Test_Data, preprocessed= True, vocab= vocab)
val_dataloader = DataLoader( test_dataset, batch_size= 1, shuffle= False, num_workers= 20)
batch_size=128
num_dataset_workers=20
model = MultiHeadAttBiLSTM(weights, embedding_length, hidden_size, output_size,batch_size)
path_save_best_model=os.path.join(BestModelDir+ "/"+modelName+".pt")
loss_function=F.binary_cross_entropy_with_logits
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
trainer = Trainer(modelName,model, train_dataset, test_dataset,loss_function, optimizer,path_save_best_model,DEVICE)
print('dkdbkbdfvbjfdbv')
trainer._train(train_dataset)
**error statement**
result_list=result_dataset_generator(val_dataloader)
Inside the trainer class in train function v is considered as list and showing to(self.device) not applicable, what is the mistake in the above code?
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|