1import torch
2import torch.nn as nn
3dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
4t1 = torch.randn(1,2)
5t2 = torch.randn(1,2).to(dev)
6print(t1) # tensor([[-0.2678, 1.9252]])
7print(t2) # tensor([[ 0.5117, -3.6247]], device='cuda:0')
8t1.to(dev)
9print(t1) # tensor([[-0.2678, 1.9252]])
10print(t1.is_cuda) # False
11t1 = t1.to(dev)
12print(t1) # tensor([[-0.2678, 1.9252]], device='cuda:0')
13print(t1.is_cuda) # True
14
15class M(nn.Module):
16 def __init__(self):
17 super().__init__()
18 self.l1 = nn.Linear(1,2)
19
20 def forward(self, x):
21 x = self.l1(x)
22 return x
23model = M() # not on cuda
24model.to(dev) # is on cuda (all parameters)
25print(next(model.parameters()).is_cuda) # True