Как построить график потерь и точности с помощью тензорной доски

#python #pytorch #cross-validation #tensorboard #loss-function

Вопрос:

У меня есть три набора данных (обучение, тестирование и проверка). Я объединяю набор обучающих данных и набор тестовых данных, чтобы выполнить перекрестную проверку в k раз. Я не использовал проверочный набор данных. Я новичок в тензорной доске из предыдущего вопроса, я могу выполнять точность потери графика во время обучения в течение каждой эпохи. Как я могу построить график потерь и точности, а также для тестирования в течение каждой эпохи. потому что я хочу видеть производительность в каждую эпоху. должен ли я использовать набор проверки для набора и как, если да?

 # Prepare dataset by concatenating Train/Test part; we split later.
training_set = CustomDataset('one_hot_train_data.txt','train_3states_target.txt') #training_set = CustomDataset_3('one_hot_train_data.txt','train_5_target.txt')
training_generator = torch.utils.data.DataLoader(training_set, **params)
val_set = CustomDataset('one_hot_val_data.txt','val_3states_target.txt')
test_set = CustomDataset('one_hot_test_data.txt','test_3states_target.txt')
testloader_ = torch.utils.data.DataLoader(test_set, **params)
dataset = ConcatDataset([training_set, test_set])
kfold = KFold(n_splits=k_folds, shuffle=True)

# Start print
print('--------------------------------')

# K-fold Cross Validation model evaluation
for fold, (train_ids, test_ids) in enumerate(kfold.split(dataset)):
    # Print
    print(f'FOLD {fold}')
    print('--------------------------------')

    # Sample elements randomly from a given list of ids, no replacement.
    train_subsampler = torch.utils.data.SubsetRandomSampler(train_ids)
    test_subsampler = torch.utils.data.SubsetRandomSampler(test_ids)

    # Define data loaders for training and testing data in this fold
    trainloader = torch.utils.data.DataLoader(
        dataset,**params, sampler=train_subsampler)
    testloader = torch.utils.data.DataLoader(
       dataset,
       **params, sampler=test_subsampler)
    # Init the neural network
    model = PPS()
    model.to(device)
    # Initialize optimizer
    optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
    # Run the training loop for defined number of epochs
    for epoch in range(0, N_EPOCHES):
        # Print epoch
        print(f'Starting epoch {epoch   1}')
        # Set current loss value
        running_loss = 0.0
        epoch_loss = 0.0
        a = []
        # Iterate over the DataLoader for training data
        for i, data in enumerate(trainloader, 0):
            inputs, targets = data
            inputs = inputs.unsqueeze(-1)
            #inputs = inputs.to(device)
            targets = targets.to(device)
            inputs = inputs.to(device)
            # print(inputs.shape,targets.shape)
            # Zero the gradients
            optimizer.zero_grad()
            # Perform forward pass
            loss,outputs = model(inputs,targets)
            outputs = outputs.to(device)

            # Perform backward pass
            loss.backward()
            # Perform optimization
            optimizer.step()
            # print statistics
            running_loss  = loss.item()
            epoch_loss  = loss
            a.append(torch.sum(outputs == targets))
            # print(outputs.shape,outputs.shape[0])

            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, ]] loss: %.3f' %
                      (epoch   1, i   1, running_loss / 2000), "acc",
                      torch.sum(outputs == targets) / float(outputs.shape[0]))
                running_loss = 0.0
            # sum_acc  = (outputs == stat_batch.argmax(1)).float().sum()
        print("epoch", epoch   1, "acc", sum(a) / len(train_subsampler), "loss", epoch_loss / len(trainloader))
        accuracy = 100 * sum(a) / len(training_set)
        avg_loss = sum(a) / len(training_set)
        writer.add_scalar('train/loss',
                          avg_loss.item(),
                          epoch)
        writer.add_scalar('accuracy/loss',
                          accuracy,
                          epoch)
    state = {'epoch': epoch   1, 'state_dict': model.state_dict(),
                     'optimizer': optimizer.state_dict() }
    torch.save(state, path   name_file   "model_epoch_i_"   str(epoch)   str(fold) ".cnn")
    #torch.save(model.state_dict(), path   name_file   "model_epoch_i_"   str(epoch)   ".cnn")
    # Print about testing
    print('Starting testing')

# Evaluation for this fold
    correct, total = 0, 0
    with torch.no_grad():
    # Iterate over the test data and generate predictions
     for i, data in enumerate(testloader, 0):
        # Get inputs
        inputs, targets = data
        #targets = targets.to(device)
        inputs = inputs.unsqueeze(-1)
        inputs = inputs.to(device)
        # Generate outputs
        loss,outputs = model(inputs,targets)
        outputs.to(device)
        print("out",outputs.shape)
        print("target",targets.shape)
        print("targetsize",targets.size(0))
        print("sum",(outputs == targets).sum().item())
        #print("sum",torch.sum(outputs == targets))

        # Set total and correct
       # _, predicted = torch.max(outputs.data, 1)
        total  = targets.size(0)
        correct  = (outputs == targets).sum().item()
        #correct  = torch.sum(outputs == targets)

    # Print accuracy
    print('Accuracy for fold %d: %d %%' % (fold,float( 100.0 * float(correct / total))))
    print('--------------------------------')
    results[fold] = 100.0 * float(correct / total)

# Print fold results
print(f'K-FOLD CROSS VALIDATION RESULTS FOR {k_folds} FOLDS')
print('--------------------------------')
sum = 0.0
for key, value in results.items():
    print(f'Fold {key}: {value} %')
    sum  = value
print(f'Average: {float(sum / len(results.items()))} %')