finish chapter 12

This commit is contained in:
Joseph Hopfmüller
2022-10-17 16:25:41 +02:00
parent 563f0ff8ec
commit 4d121641d1
6 changed files with 215 additions and 0 deletions

166
02_tensors.py Normal file
View File

@@ -0,0 +1,166 @@
import torch
# empty, zeros, ones of different sizes, specify datatype
print('empty, zeros, ones of different sizes, specify datatype')
x = torch.empty(1)
print(x)
x = torch.empty(3)
print(x)
x = torch.zeros(2, 3)
print(x)
x = torch.ones(2,3,4)
print(x)
x = torch.ones(2, 5, dtype=torch.float64)
print(x.dtype)
print()
# from data
print('from data')
x = torch.tensor([2.5, 0.1])
print(x)
print()
#basic ops
print('basic ops')
x = torch.rand(2,2)
y = torch.rand(2,2)
print('add')
z1 = x + y
z2 = torch.add(x,y)
print(x)
print(y)
print(z1)
print(z2)
# in place addition
x.add_(y)
print(x)
print('sub')
z1 = x - y
z2 = torch.sub(x,y)
print(x)
print(y)
print(z1)
print(z2)
# in place addition
x.sub_(y)
print(x)
print('mul')
z1 = x * y
z2 = torch.mul(x,y)
print(x)
print(y)
print(z1)
print(z2)
# in place addition
x.mul_(y)
print(x)
print('div')
z1 = x / y
z2 = torch.div(x,y)
print(x)
print(y)
print(z1)
print(z2)
# in place addition
x.div_(y)
print(x)
print()
#slicing
print('slicing, item')
x = torch.rand(2,3,2)
print(x)
print(x[:,2,:])
print(x[1, 2, 1])
print(x[1, 2, 1].item()) # for single element tensors only
print()
# reshaping
print('reshaping')
x = torch.rand(4,4)
print(x)
y = x.view(16)
print(y)
y = x.view(-1, 8)
print(y)
y = x.view(2, -1)
print(y)
# y = x.view(3, -1) # fails 'shape is invalid'
# print(y)
print()
# transposing
print('transposing')
x = torch.rand(2, 3)
print(x.size())
x = torch.transpose(x, 0, 1)
print(x.size())
x = torch.t(x)
print(x.size())
x = torch.rand(2, 3, 4)
print(f'Original: {x.size()}')
x = torch.transpose(x, 0, 1)
print(f'01: {x.size()}')
x = torch.rand(2, 3, 4)
x = torch.transpose(x, 1, 2)
print(f'12: {x.size()}')
x = torch.rand(2, 3, 4)
x = torch.transpose(x, 0, 2)
print(f'02: {x.size()}')
print()
# numpy
import numpy as np
print('numpy')
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
print(type(b))
a.add_(1) # vectors/tensors share same memory
print(a)
print(b)
c = np.ones(5)
print(c)
d = torch.from_numpy(c)
print(d)
e = d.to(dtype=torch.float32) # e has its own memory
print(e)
c += 1 # c and d share same memory
print(c)
print(d)
print(e)
# device
print('device')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print(device)
x = torch.ones(5, device=device)
y = torch.ones(5)
y = y.to(device)
z = x+y
print(z)
z = z.to('cpu') #move tensor back to cpu for conversion into numopy vector
print(z)
a = z.numpy()
print(a)
x = torch.ones(5, requires_grad=True) # enable grad for autograd
print(x)