Skip to content

超参数Tuning

optuna

示例:

import optuna

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def objective(trial):
    lr = trial.suggest_float("lr", 1e-4, 1e-2, log=True)

    m = MyCNN().to(device)
    optimizer = optim.Adam(m.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(5):
        m.train()
        for inputs, labels in trainloader:
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = m(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

    # EVAL
    m.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in testloader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = m(inputs)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    return correct / total

def print_progress(study, trial):
    print(f"Trial {trial.number}: lr={trial.params['lr']:.6f}, acc={trial.value:.4f}")

study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=10, callbacks=[print_progress])

print("Best lr:", study.best_params)


评论 #