import torch
import numpy as np"""
Numpy Torch對比課程
"""
# #tensor與numpy格式數據相互轉換
# np_data = np.arange(6).reshape((2,3))
# print(np_data)
#
# torch_data = torch.from_numpy(np_data)
# print('\n',torch_data)
#
# tensor2array = torch_data.numpy()
# print('\n',tensor2array)# #abs絕對值
# data = [-1,-2,1,2]
# tensor = torch.FloatTensor(data) #32bit
# print(abs(tensor))
# print('\n',np.abs(data))
# print('\n',torch.abs(tensor))# #矩陣相乘
# data = [[1,2],[3,4]]
# tensor = torch.FloatTensor(data) #32-bit
# print('\n',np.matmul(data,data))
# print('\n',torch.mm(tensor,tensor))
#
# #注意:numpy.dot實現的是叉乘,tensor.dot是點乘
# data = np.array(data)
# print('\n',data.dot(data))
# print('\n',tensor.dot(tensor)) #注意這里會報錯 新版本的要求dot需要一維輸入,而我們設定的tensor是二維,舊版本實現的是點乘"""
Variable變量課程
"""# #這節內容我學習了一下 但是無法運行,原因是新版本的torch的Variable函數將返回tensor數據,同時requires_grad將有默認值,無需填寫。var.backward()函數可以直接支持使用tensor了。
# from torch.autograd import Variable
#
# tensor = torch.FloatTensor[[1,2],[3,4]]
# variable = Variable(tensor,requires_grad = True)
#
# t_out = torch.mean(tensor*tensor)
# v_out = torch.mean(variable*variable)
#
# v_out.backward()
# print(variable.grad)
# print(variable.data) #tensor形勢
# print(variable.data.numpy)"""
激勵函數課程
"""
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt#fake data
x = torch.linspace(-5,5,200)
x = Variable(x)
x_np = x.data.numpy() #畫圖時要轉化為numpy格式y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()plt.subplot(221)
plt.plot(x_np,y_relu,c='red',label='relu')
plt.ylim((-1,5))
plt.legend(loc="best")plt.subplot(222)
plt.plot(x_np,y_sigmoid,c='red',label='sigmoid')
plt.ylim((-0.2,1.2))
plt.legend(loc="best")plt.subplot(223)
plt.plot(x_np,y_tanh,c='red',label='tanh')
plt.ylim((-1.2,1.2))
plt.legend(loc="best")plt.subplot(224)
plt.plot(x_np,y_softplus,c='red',label='softplus')
plt.ylim((-0.2,6))
plt.legend(loc="best")plt.show()