1In [1]: import torch
2
3In [2]: torch.cuda.current_device()
4Out[2]: 0
5
6In [3]: torch.cuda.device(0)
7Out[3]: <torch.cuda.device at 0x7efce0b03be0>
8
9In [4]: torch.cuda.device_count()
10Out[4]: 1
11
12In [5]: torch.cuda.get_device_name(0)
13Out[5]: 'GeForce GTX 950M'
14
15In [6]: torch.cuda.is_available()
16Out[6]: True
17
1import torch
2
3torch.cuda.is_available()
4>>> True
5
6torch.cuda.current_device()
7>>> 0
8
9torch.cuda.device(0)
10>>> <torch.cuda.device at 0x7efce0b03be0>
11
12torch.cuda.device_count()
13>>> 1
14
15torch.cuda.get_device_name(0)
16>>> 'GeForce GTX 950M'
17
1import torch
2import torch.nn as nn
3dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
4t1 = torch.randn(1,2)
5t2 = torch.randn(1,2).to(dev)
6print(t1) # tensor([[-0.2678, 1.9252]])
7print(t2) # tensor([[ 0.5117, -3.6247]], device='cuda:0')
8t1.to(dev)
9print(t1) # tensor([[-0.2678, 1.9252]])
10print(t1.is_cuda) # False
11t1 = t1.to(dev)
12print(t1) # tensor([[-0.2678, 1.9252]], device='cuda:0')
13print(t1.is_cuda) # True
14
15class M(nn.Module):
16 def __init__(self):
17 super().__init__()
18 self.l1 = nn.Linear(1,2)
19
20 def forward(self, x):
21 x = self.l1(x)
22 return x
23model = M() # not on cuda
24model.to(dev) # is on cuda (all parameters)
25print(next(model.parameters()).is_cuda) # True
1device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
2#或device = torch.device("cuda:0")
3device1 = torch.device("cuda:1")
4for batch_idx, (img, label) in enumerate(train_loader):
5 img=img.to(device)
6 label=label.to(device)
7