import torch
# Checking version of installed PyTorch
print(torch.__version__)
1.8.1+cu101
# Check if a CUDA-compatible device is available
print(torch.cuda.is_available())
False
# Initialize an empty tensor
x = torch.empty(5, 3)
print(x)
tensor([[3.8677e-29, 3.0848e-41, 3.3631e-44], [0.0000e+00, nan, 6.4460e-44], [1.1578e+27, 1.1362e+30, 7.1547e+22], [4.5828e+30, 1.2121e+04, 7.1846e+22], [9.2198e-39, 7.0374e+22, 6.3300e-30]])
# Initialize a zero tensor
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]])
# Initialize a random tensor
x = torch.rand(5, 3)
print(x)
tensor([[0.4805, 0.5674, 0.6114], [0.3374, 0.2846, 0.4141], [0.5330, 0.1123, 0.6748], [0.9518, 0.6749, 0.7500], [0.6629, 0.1617, 0.6080]])
# Initialize a specific tensor
x = torch.tensor([5.5, 3])
print(x)
tensor([5.5000, 3.0000])
# Create a separate copy an existing tensor using clone
x = torch.zeros(2, 3, dtype=torch.long)
y = x.clone()
x = x + 1
print(x)
print(y)
tensor([[1, 1, 1], [1, 1, 1]]) tensor([[0, 0, 0], [0, 0, 0]])
# Initialize with previous tensor's attributes
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x)
tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.]], dtype=torch.float64) tensor([[-0.5087, 0.8621, -0.7676], [ 0.9275, 1.4946, -0.2577], [-0.4062, -0.6010, -1.6853], [ 0.3337, -1.1937, 0.6211], [ 0.4633, 0.1690, 2.7207]])
y = torch.rand(5, 3)
print(x + y)
print(torch.add(x, y))
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
y.add_(x)
print(y)
tensor([[ 0.2997, 1.2734, -0.6246], [ 1.1735, 2.1222, 0.6334], [-0.0203, 0.0986, -1.4308], [ 1.2026, -0.2370, 0.9991], [ 0.5754, 0.5744, 3.2674]]) tensor([[ 0.2997, 1.2734, -0.6246], [ 1.1735, 2.1222, 0.6334], [-0.0203, 0.0986, -1.4308], [ 1.2026, -0.2370, 0.9991], [ 0.5754, 0.5744, 3.2674]]) tensor([[ 0.2997, 1.2734, -0.6246], [ 1.1735, 2.1222, 0.6334], [-0.0203, 0.0986, -1.4308], [ 1.2026, -0.2370, 0.9991], [ 0.5754, 0.5744, 3.2674]]) tensor([[ 0.2997, 1.2734, -0.6246], [ 1.1735, 2.1222, 0.6334], [-0.0203, 0.0986, -1.4308], [ 1.2026, -0.2370, 0.9991], [ 0.5754, 0.5744, 3.2674]])
# Get tensor size
print(x.size())
print(x.size(0), x.size()[1])
torch.Size([5, 3]) 5 3
# Get slice of a tensor
print(x[:, 1])
tensor([ 0.8621, 1.4946, -0.6010, -1.1937, 0.1690])
# Resize a tensor (i.e., change the view)
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# Get a single scalar from a tensor
x = torch.randn(1)
print(x)
print(x.item())
tensor([0.0680]) 0.06802535057067871
a = torch.ones(5)
print(a)
# Converting to a numpy array (uses the same memory location if on CPU)
b = a.numpy()
print(b)
a.add_(1)
print(a)
print(b)
tensor([1., 1., 1., 1., 1.]) [1. 1. 1. 1. 1.] tensor([2., 2., 2., 2., 2.]) [2. 2. 2. 2. 2.]
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a) # Uses the same memory location (if on CPU)
np.add(a, 1, out=a)
print(a)
print(b)
[2. 2. 2. 2. 2.] tensor([2., 2., 2., 2., 2.], dtype=torch.float64)
import torch
print(torch.__version__)
print(torch.cuda.is_available())
1.8.1+cu101 True
if torch.cuda.is_available():
device = torch.device("cuda")
x = torch.rand(5, 3)
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu"))
tensor([[1.0612, 1.3529, 1.4719], [1.7699, 1.3789, 1.2179], [1.3048, 1.9944, 1.0399], [1.0478, 1.7676, 1.6748], [1.3807, 1.9231, 1.9396]], device='cuda:0') tensor([[1.0612, 1.3529, 1.4719], [1.7699, 1.3789, 1.2179], [1.3048, 1.9944, 1.0399], [1.0478, 1.7676, 1.6748], [1.3807, 1.9231, 1.9396]])
# If available, run on the GPU, else run on the CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
y = torch.ones_like(x, device=device)
x = x.to(device)
z = x + y
print(z)
tensor([[1.6030, 1.0554, 1.8243], [1.6876, 1.5408, 1.2670], [1.8411, 1.4868, 1.9681], [1.8510, 1.2308, 1.2255], [1.7303, 1.8777, 1.6621]], device='cuda:0')