1#easiest solution is to wrap you model in DataParallel like so:
2
3device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
4model = Model(input_size, output_size)
5if torch.cuda.device_count() > 1:
6 print("Let's use", torch.cuda.device_count(), "GPUs!")
7 model = nn.DataParallel(model)
8
9model.to(device)