-
Notifications
You must be signed in to change notification settings - Fork 0
/
later.py
65 lines (65 loc) · 3.04 KB
/
later.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#
# def main():
# # Training settings
# parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
# parser.add_argument('--batch-size', type=int, default=64, metavar='N',
# help='input batch size for training (default: 64)')
# parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
# help='input batch size for testing (default: 1000)')
# parser.add_argument('--epochs', type=int, default=14, metavar='N',
# help='number of epochs to train (default: 14)')
# parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
# help='learning rate (default: 1.0)')
# parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
# help='Learning rate step gamma (default: 0.7)')
# parser.add_argument('--no-cuda', action='store_true', default=False,
# help='disables CUDA training')
# parser.add_argument('--no-mps', action='store_true', default=False,
# help='disables macOS GPU training')
# parser.add_argument('--dry-run', action='store_true', default=False,
# help='quickly check a single pass')
# parser.add_argument('--seed', type=int, default=1, metavar='S',
# help='random seed (default: 1)')
# parser.add_argument('--log-interval', type=int, default=10, metavar='N',
# help='how many batches to wait before logging training status')
# parser.add_argument('--save-model', action='store_true', default=False,
# help='For Saving the current Model')
# args = parser.parse_args()
#
# torch.manual_seed(args.seed)
#
# train_kwargs = {'batch_size': args.batch_size}
# test_kwargs = {'batch_size': args.test_batch_size}
# if use_cuda:
# cuda_kwargs = {'num_workers': 1,
# 'pin_memory': True,
# 'shuffle': True}
# train_kwargs.update(cuda_kwargs)
# test_kwargs.update(cuda_kwargs)
#
# transform=transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
# ])
# dataset1 = datasets.MNIST('../data', train=True, download=True,
# transform=transform)
# dataset2 = datasets.MNIST('../data', train=False,
# transform=transform)
# train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
# test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
#
# model = Net().to(device)
# optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
#
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# for epoch in range(1, args.epochs + 1):
# train(args, model, device, train_loader, optimizer, epoch)
# test(model, device, test_loader)
# scheduler.step()
#
# if args.save_model:
# torch.save(model.state_dict(), "mnist_cnn.pt")
#
#
# if __name__ == '__main__':
# main()