PyTorch筆記9----------Cifar10圖像分類

1.圖像分類網絡模型框架解讀

  • 分類網絡的基本結構
    • 數據加載模塊:對訓練數據加載
    • 數據重組:組合成網絡需要的形式,例如預處理、增強、各種網絡處理、loss函數計算
    • 優化器
  • 數據加載模塊
    • 使用公開數據集:torchvision.datasets
    • 使用自定義數據集:torch.utils.data下的Dataset、DataLoader
  • 數據增強模塊
    • 使用torchvision.transforms

2.Cifar10數據讀取

Cifar10數據集下載鏈接:https://pan.baidu.com/s/1Dc6eQ54CCLFdCA2ORuFChg 提取碼: 5279

下在好的數據集解壓后的文件

創建兩個文件夾dataTrain和dataTest用于存儲數據集的圖片

將數據集中的訓練集圖片和測試集圖片存入自建的文件夾中,代碼如下:

import os
import cv2
import numpy as np
import glob
def unpickle(file):import picklewith open(file, 'rb') as fo:dict = pickle.load(fo, encoding='bytes')return dict
label_name = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck'
]
# train_list = glob.glob('cifar-10-batches-py/data_batch_*') #下載訓練集圖片時使用此行
train_list = glob.glob('cifar-10-batches-py/test_batch*')
# save_path = 'cifar-10-batches-py/dataTrain' #下載訓練集圖片時使用此行
save_path = 'cifar-10-batches-py/dataTest'
for l in train_list:l_dict = unpickle(l)for im_idx,im_data in enumerate(l_dict[b'data']):im_label = l_dict[b'labels'][im_idx]im_name = l_dict[b'filenames'][im_idx]im_label_name = label_name[im_label]im_data = np.reshape(im_data,[3,32,32])im_data = np.transpose(im_data,(1,2,0))if not os.path.exists("{}/{}".format(save_path,im_label_name)):os.mkdir("{}/{}".format(save_path,im_label_name))cv2.imwrite("{}/{}/{}".format(save_path,im_label_name,im_name.decode("utf-8")),im_data)

3.自定數據集加載

from torchvision import transforms
from torch.utils.data import  DataLoader, Dataset
import os
from PIL  import Image
import numpy as np
import glob 
label_name = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck'
]
label_dict = {}
for idx,name in enumerate(label_name):label_dict[name] = idx
def default_loader(path):return Image.open(path).convert('RGB')
#數據增強方法
train_transform = transforms.Compose([transforms.RandomResizedCrop((28,28)), #隨機裁剪transforms.RandomHorizontalFlip(), #隨機水平翻轉transforms.RandomVerticalFlip(), #隨機垂直翻轉transforms.RandomRotation(90), #隨機旋轉transforms.RandomGrayscale(0.1), #隨機灰度化transforms.ColorJitter(0.3,0.3,0.3,0.3), #隨機顏色調整transforms.ToTensor() #轉換為張量
])
class MyDataset(Dataset):def __init__(self,im_list,transform=None,loader=default_loader): super(MyDataset,self).__init__()imgs = []for im_item in im_list:im_label_name = im_item.split("/")[-2]imgs.append([im_item,label_dict[im_label_name]])self.imgs = imgsself.transform = transformself.loader = loaderdef __getitem__(self,index):im_path,im_label = self.img[index]im_data = self.loader(im_path)if self.transform is not None:im_data = self.transform(im_data)return im_data,im_labeldef __len__(self):return len(self.imgs)
im_train_list = glob.glob("cifar-10-batches-py/dataTrain/*/*.png") #獲取訓練集圖片路徑列表
im_test_list = glob.glob("cifar-10-batches-py/dataTest/*/*.png") #獲取測試集圖片路徑列表
train_dataset = MyDataset(im_train_list,transform=train_transform) #創建訓練集數據集
test_dataset = MyDataset(im_test_list,transform=transforms.ToTensor()) #創建測試集數據集
train_data_loader = DataLoader(dataset=train_dataset,batch_size=6,shuffle=True,num_workers=4)#創建訓練集數據加載器
test_data_loader = DataLoader(dataset=train_dataset,batch_size=6,shuffle=False,num_workers=4)#創建測試集數據加載器
print("num_of_train:",len(train_dataset))
print("num_of_test:",len(test_dataset))

代碼運行結果:

num_of_train: 50000
num_of_test: 10000

?4.VGG網絡搭建

  • 模型網絡搭建
import torch
import torch.nn as nn
import torch.nn.functional as F
#定義vgg網絡
class VGGbase(nn.Module):#定義vgg網絡的初始化函數def __init__(self):super(VGGbase,self).__init__() #調用父類的初始化函數#定義第一個卷積層,圖像大小:28*28self.conv1 = nn.Sequential(nn.Conv2d(3,64,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(64),nn.ReLU())self.max_pooling1 = nn.MaxPool2d(kernel_size=2,stride=2) #定義最大池化層#定義第二個卷積層,圖像大小:14*14self.conv2_1 = nn.Sequential(nn.Conv2d(64,128,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(128),nn.ReLU())self.conv2_2 = nn.Sequential(nn.Conv2d(128,128,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(128),nn.ReLU())self.max_pooling2 = nn.MaxPool2d(kernel_size=2,stride=2) #定義最大池化層#定義第三個卷積層,圖像大小:7*7self.conv3_1 = nn.Sequential(nn.Conv2d(128,256,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(256),nn.ReLU())self.conv3_2 = nn.Sequential(nn.Conv2d(256,256,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(256),nn.ReLU())self.max_pooling3 = nn.MaxPool2d(kernel_size=2,stride=2,padding=1) #定義最大池化層##定義第四個卷積層,圖像大小:4*4self.conv4_1 = nn.Sequential(nn.Conv2d(256,512,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(512),nn.ReLU())self.conv4_2 = nn.Sequential(nn.Conv2d(512,512,kernel_size=3,stride=1,padding=1),nn.BatchNorm2d(512),nn.ReLU())self.max_pooling4 = nn.MaxPool2d(kernel_size=2,stride=2,padding=1) #定義最大池化層#定義FC層self.fc = nn.Linear(4608, 10)#定義vgg網絡的前向傳播函數def forward(self,x):batchsize = x.size(0)out = self.conv1(x)out = self.max_pooling1(out)out = self.conv2_1(out)out = self.conv2_2(out)out = self.max_pooling2(out)out = self.conv3_1(out)out = self.conv3_2(out)out = self.max_pooling3(out)out = self.conv4_1(out)out = self.conv4_2(out)out = self.max_pooling4(out)out = out.view(batchsize,-1) #將輸出的三維特征圖轉換為一維向量out = self.fc(out)out = F.log_softmax(out,dim=1) #使用log_softmax函數作為激活函數return out
def VGGNet():return VGGbase()
  • 模型訓練
import torch
import torch.nn as nn
import torchvision
from vggnet import  VGGNet
from cifar10Data import train_data_loader, test_data_loader
import os
import tensorboardX
# 定義訓練設備
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 訓練輪數
epoch_num = 200
# 學習率
lr = 0.01
# 加載網絡
net = VGGNet().to(device)
#定義loss
loss_func = nn.CrossEntropyLoss()
#定義優化器
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# 學習率衰減
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
if not os.path.exists("log"):os.mkdir("log")
writer = tensorboardX.SummaryWriter("log")
step_n = 0
for epoch in range(epoch_num):print("epoch is:",epoch)#訓練for i,data in enumerate(train_data_loader):net.train()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()writer.add_scalar("train loss",loss.item(),global_step=step_n)im = torchvision.utils.make_grid(inputs)writer.add_image("train image",im,global_step=step_n)step_n += 1if not os.path.exists("models"):os.mkdir("models")torch.save(net.state_dict(),"models/{}.path".format(epoch+1))scheduler.step()sum_loss = 0#測試for i,data in enumerate(train_data_loader):net.eval()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()sum_loss += loss.item()im = torchvision.utils.make_grid(inputs)writer.add_image("test image", im, global_step=step_n)test_loss = sum_loss * 1.0 / len(train_data_loader)writer.add_scalar("teest loss", test_loss, global_step=epoch+1)print('test_step:', i, 'loss is:', test_loss)
writer.close()
  • 訓練結果
epoch is: 0
test_step: 8333 loss is: 2.306014501994137
epoch is: 1
test_step: 8333 loss is: 2.220694358253868
epoch is: 2
test_step: 8333 loss is: 2.1626519183618202
epoch is: 3
  • 圖表結果

5.?ResNet網絡搭建

  • 模型網絡搭建
import torch
import torch.nn as nn
import torch.nn.functional as F
# 定義ResNet內部結構
class ResBlock(nn.Module):def __init__(self,in_channel,out_channel,stride=1):super(ResBlock,self).__init__()#主干分支self.layer = nn.Sequential(nn.Conv2d(in_channel,out_channel,kernel_size=3,stride=stride,padding=1),nn.BatchNorm2d(out_channel),nn.ReLU(),nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(out_channel),)#跳連分支,需要判斷是否需要跳連分支self.shortcut = nn.Sequential()if in_channel != out_channel or stride > 1:self.shortcut = nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),nn.BatchNorm2d(out_channel),)def forward(self, x):out1 =  self.layer(x)out2 = self.shortcut(x)out = out1 + out2out = F.relu(out)return out#ResNet模型搭建
class ResNet(nn.Module):def make_layer(self,block,out_channel,stride,num_block):layers_list = []for i in range(num_block):if i == 0:in_stride = strideelse:in_stride = 1layers_list.append(block(self.in_channel,out_channel,in_stride))self.in_channel = out_channelreturn nn.Sequential(*layers_list)def __init__(self,ResBlock):super(ResNet,self).__init__()self.in_channel = 32self.conv1 = nn.Sequential(nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(32),nn.ReLU(),)self.layer1 = self.make_layer(ResBlock,64,2,2)self.layer2 = self.make_layer(ResBlock, 128, 2, 2)self.layer3 = self.make_layer(ResBlock, 256, 2, 2)self.layer4 = self.make_layer(ResBlock, 512, 2, 2)self.fc = nn.Linear(512,10)def forward(self, x):out = self.conv1(x)out = self.layer1(out)out = self.layer2(out)out = self.layer3(out)out = self.layer4(out)out = F.avg_pool2d(out, 2)out = out.view(out.size(0), -1)out = self.fc(out)return out
def resnet():return ResNet(ResBlock)
  • 模型訓練
import torch
import torch.nn as nn
import torchvision
from resnet import  resnet
from cifar10Data import train_data_loader, test_data_loader
import os
import tensorboardX
# 定義訓練設備
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 訓練輪數
epoch_num = 200
# 學習率
lr = 0.01
# 加載網絡
net = resnet().to(device)
#定義loss
loss_func = nn.CrossEntropyLoss()
#定義優化器
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# 學習率衰減
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
if not os.path.exists("log1"):os.mkdir("log1")
writer = tensorboardX.SummaryWriter("log1")
step_n = 0
for epoch in range(epoch_num):print("epoch is:",epoch)#訓練for i,data in enumerate(train_data_loader):net.train()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()writer.add_scalar("train loss",loss.item(),global_step=step_n)im = torchvision.utils.make_grid(inputs)writer.add_image("train image",im,global_step=step_n)step_n += 1if not os.path.exists("models"):os.mkdir("models")torch.save(net.state_dict(),"models/{}.path".format(epoch+1))scheduler.step()sum_loss = 0#測試for i,data in enumerate(train_data_loader):net.eval()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()sum_loss += loss.item()im = torchvision.utils.make_grid(inputs)writer.add_image("test image", im, global_step=step_n)test_loss = sum_loss * 1.0 / len(train_data_loader)writer.add_scalar("teest loss", test_loss, global_step=epoch+1)print('test_step:', i, 'loss is:', test_loss)
writer.close()
  • 訓練結果
epoch is: 0
test_step: 8333 loss is: 2.3071022295024948
epoch is: 1
test_step: 8333 loss is: 2.226925660673022
epoch is: 2
test_step: 8333 loss is: 2.155742327815656
epoch is: 3
test_step: 8333 loss is: 2.11763518281998
epoch is: 4
test_step: 8333 loss is: 2.0863706607283063
  • 圖表結果

6.MobileNetv1網絡搭建

  • 模型網絡搭建
import torch
import torch.nn.functional as F
import torch.nn as nn
class mobilenet(nn.Module):def conv_dw(self,in_channel, out_channel, stride):return nn.Sequential(nn.Conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=3, stride=stride, padding=1,groups=in_channel, bias=False),nn.BatchNorm2d(in_channel),nn.ReLU(),nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=1, stride=1, padding=0, bias=False),nn.BatchNorm2d(out_channel),nn.ReLU(),)def __init__(self):super(mobilenet, self).__init__()self.conv1 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1),nn.BatchNorm2d(32),nn.ReLU(),)self.conv_dw2 = self.conv_dw(32, 32, 1)self.conv_dw3 = self.conv_dw(32, 64, 2)self.conv_dw4 = self.conv_dw(64, 64, 1)self.conv_dw5 = self.conv_dw(64, 128, 2)self.conv_dw6 = self.conv_dw(128, 128, 1)self.conv_dw7 = self.conv_dw(128, 256, 2)self.conv_dw8 = self.conv_dw(256, 256, 1)self.conv_dw9 = self.conv_dw(256, 512, 2)self.fc = nn.Linear(512,10)def forward(self, x):out = self.conv1(x)out = self.conv_dw2(out)out = self.conv_dw3(out)out = self.conv_dw4(out)out = self.conv_dw5(out)out = self.conv_dw6(out)out = self.conv_dw7(out)out = self.conv_dw8(out)out = self.conv_dw9(out)out = F.avg_pool2d(out, 2)out = out.view(-1,512)out = self.fc(out)return out
def mobilenetv1_small():return mobilenet()
  • 模型訓練
import torch
import torch.nn as nn
import torchvision
from mobilenetv1 import  mobilenetv1_small
from cifar10Data import train_data_loader, test_data_loader
import os
import tensorboardX
# 定義訓練設備
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 訓練輪數
epoch_num = 200
# 學習率
lr = 0.01
# 加載網絡
net = mobilenetv1_small().to(device)
#定義loss
loss_func = nn.CrossEntropyLoss()
#定義優化器
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# 學習率衰減
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
if not os.path.exists("log2"):os.mkdir("log2")
writer = tensorboardX.SummaryWriter("log2")
step_n = 0
for epoch in range(epoch_num):print("epoch is:",epoch)#訓練for i,data in enumerate(train_data_loader):net.train()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()writer.add_scalar("train loss",loss.item(),global_step=step_n)im = torchvision.utils.make_grid(inputs)writer.add_image("train image",im,global_step=step_n)step_n += 1if not os.path.exists("models"):os.mkdir("models")torch.save(net.state_dict(),"models/{}.path".format(epoch+1))scheduler.step()sum_loss = 0#測試for i,data in enumerate(train_data_loader):net.eval()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()sum_loss += loss.item()im = torchvision.utils.make_grid(inputs)writer.add_image("test image", im, global_step=step_n)test_loss = sum_loss * 1.0 / len(train_data_loader)writer.add_scalar("test loss", test_loss, global_step=epoch+1)print('test_step:', i, 'loss is:', test_loss)
writer.close()
  • 訓練結果
epoch is: 0
test_step: 8333 loss is: 2.3168991455678207
epoch is: 1
test_step: 8333 loss is: 58.0813152680072
epoch is: 2
test_step: 8333 loss is: 239.99653513472458
epoch is: 3
test_step: 8333 loss is: 1036.717976929159
epoch is: 4
test_step: 8333 loss is: 110.44223031090523
  • 圖表結果

7.Inception網絡搭建

  • 模型網絡搭建
import torch
import torch.nn as nn
import torch.nn.functional as F
def ConvBNRelu(in_channel, out_channel, kernel_size):return nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size, padding=kernel_size//2),nn.BatchNorm2d(out_channel),nn.ReLU(inplace=True),)
class BaseInception(nn.Module):def __init__(self,in_channel,out_channel_list,reduce_channel_list):super(BaseInception, self).__init__()self.branch1_conv = ConvBNRelu(in_channel, out_channel_list[0], 1)self.branch2_conv1 = ConvBNRelu(in_channel, reduce_channel_list[0], 1)self.branch2_conv2 = ConvBNRelu(reduce_channel_list[0], out_channel_list[1], 3)self.branch3_conv1 = ConvBNRelu(in_channel, reduce_channel_list[1], 1)self.branch3_conv2 = ConvBNRelu(reduce_channel_list[1], out_channel_list[2], 5)self.branch4_pool = nn.MaxPool2d(3, 1, padding=1)self.branch4_conv = ConvBNRelu(in_channel, out_channel_list[3], 3)def forward(self, x):out1 = self.branch1_conv(x)out2 = self.branch2_conv1(x)out2 = self.branch2_conv2(out2)out3 = self.branch3_conv1(x)out3 = self.branch3_conv2(out3)out4 = self.branch4_pool(x)out4 = self.branch4_conv(out4)out = torch.cat([out1, out2, out3, out4], 1)return outclass InceptionNet(nn.Module):def __init__(self):super(InceptionNet, self).__init__()self.block1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=1),nn.BatchNorm2d(64),nn.ReLU(),)self.block2 = nn.Sequential(nn.Conv2d(64,128,kernel_size=3,stride=2,padding=1),nn.BatchNorm2d(128),nn.ReLU(),)self.block3 = nn.Sequential(BaseInception(in_channel=128, out_channel_list=[64, 64, 64, 64], reduce_channel_list=[16, 16]),nn.MaxPool2d(3, stride=2, padding=1))self.block4 = nn.Sequential(BaseInception(in_channel=256, out_channel_list=[96, 96, 96, 96], reduce_channel_list=[32, 32]),nn.MaxPool2d(3, stride=2, padding=1))self.fc = nn.Linear(384,10)def forward(self, x):out = self.block1(x)out = self.block2(out)out = self.block3(out)out = self.block4(out)out = F.avg_pool2d(out, 2)out = out.view(out.size(0), -1)out = self.fc(out)return out
def InceptionNetSmall():return InceptionNet()
  • 模型訓練
import torch
import torch.nn as nn
import torchvision
from inception import  InceptionNetSmall
from cifar10Data import train_data_loader, test_data_loader
import os
import tensorboardX
# 定義訓練設備
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 訓練輪數
epoch_num = 200
# 學習率
lr = 0.01
# 加載網絡
net = InceptionNetSmall().to(device)
#定義loss
loss_func = nn.CrossEntropyLoss()
#定義優化器
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# 學習率衰減
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
if not os.path.exists("log3"):os.mkdir("log3")
writer = tensorboardX.SummaryWriter("log3")
step_n = 0
for epoch in range(epoch_num):print("epoch is:",epoch)#訓練for i,data in enumerate(train_data_loader):net.train()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()writer.add_scalar("train loss",loss.item(),global_step=step_n)im = torchvision.utils.make_grid(inputs)writer.add_image("train image",im,global_step=step_n)step_n += 1if not os.path.exists("models"):os.mkdir("models")torch.save(net.state_dict(),"models/{}.path".format(epoch+1))scheduler.step()sum_loss = 0#測試for i,data in enumerate(train_data_loader):net.eval()inputs,labels = datainputs,labels = inputs.to(device),labels.to(device)outputs = net(inputs)loss = loss_func(outputs,labels)optimizer.zero_grad()loss.backward()optimizer.step()sum_loss += loss.item()im = torchvision.utils.make_grid(inputs)writer.add_image("test image", im, global_step=step_n)test_loss = sum_loss * 1.0 / len(train_data_loader)writer.add_scalar("test loss", test_loss, global_step=epoch+1)print('test_step:', i, 'loss is:', test_loss)
writer.close()
  • 訓練結果
epoch is: 0
test_step: 8333 loss is: 2.1641721504324485
epoch is: 1
test_step: 8333 loss is: 2.106510695047678
epoch is: 2
test_step: 8333 loss is: 2.0794332600881478
epoch is: 3
test_step: 8333 loss is: 2.0550003183926497
  • 圖表結果

8.Pytorch提供的ResNet18模型

  • pytorch中提供了很多模型,都在torchvision的models中
  • 訓練代碼與前面的相同,只需要將模型引入,替換net的賦值即可,訓練結果也與此前無太大差異,此處就不過多贅述,只給出模型代碼
import torch.nn as nn
from torchvision import models
class resnet18(nn.Module):def __init__(self):super(resnet18, self).__init__()self.model = models.resnet18(pretrained=True)self.num_features = self.model.fc.in_featuresself.model.fc = nn.Linear(self.num_features, 10)def forward(self, x):out = self.model(x)return out
def pytorch_resnet18():return resnet18()

全部代碼的文件結構為:

知識點為聽課總結筆記,課程為B站“2025最新整合!公認B站講解最強【PyTorch】入門到進階教程,從環境配置到算法原理再到代碼實戰逐一解讀,比自學效果強得多!”:2025最新整合!公認B站講解最強【PyTorch】入門到進階教程,從環境配置到算法原理再到代碼實戰逐一解讀,比自學效果強得多!_嗶哩嗶哩_bilibili

其實課程后續還有檢測和分割,但是這兩部分是在講別人訓練好的模型,不好做筆記,大家如果需要可以自己去看看!

所以,Pytorch學習完結撒花!!!!!!!!!!!!

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/diannao/95489.shtml
繁體地址,請注明出處:http://hk.pswp.cn/diannao/95489.shtml
英文地址,請注明出處:http://en.pswp.cn/diannao/95489.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

飛凌OK3568開發板QT應用程序編譯流程

飛凌OK3568開發板QT應用程序編譯流程開發環境:ubuntu20.04(主機)、飛凌OK3568開發板一般在linux系統下開發用于ARM開發板的QT應用程序時,直接在主機上開發然后進行交叉編譯即可,但有時候ARM開發板的廠家提供的SDK中可能…

飛算JavaAI合并項目實戰:7天完成3年遺留系統重構

引言 企業數字化進程中,遺留系統改造始終是CIO面臨的頭號難題。某電商平臺的實踐數據顯示:3年以上的Java項目平均存在47%的冗余代碼,63%的架構設計不符合當前業務需求,進行系統性重構需要投入相當于原開發量200%的資源。傳統&quo…

衛星速度增量和比沖及推力之間的關系

一、定義1.1.比沖(Isp):比沖是衡量發動機性能的重要指標,反映了單位重量推進劑在發動機中產生的沖量,單位為米/秒(m/s),代表燃料燃燒時噴流速度。這個單位與速度單位“米/秒”相同&a…

MATLAB繪制各種心形曲線

1.方程(1)心形線的經典隱函數方程為:(2)參數方程(更平滑的心形):(3)極坐標心形線(4)參數方程(3D心形)(5)隱函數3D心形2. MATLAB代碼clc;close all;clear all;warning off;%清除變量 rand(seed, 100); randn…

Django REST Framework視圖

Django REST Framework (DRF) 視圖類詳解DRF 提供了豐富的視圖類來構建 API,從基礎到高級,滿足不同復雜度的需求。以下是 DRF 的主要視圖類及其使用場景:1. 基礎視圖類APIView所有 DRF 視圖的基類,相當于 Django 的 View 類的增強…

Linux面試題及詳細答案 120道(1-15)-- 基礎概念

《前后端面試題》專欄集合了前后端各個知識模塊的面試題,包括html,javascript,css,vue,react,java,Openlayers,leaflet,cesium,mapboxGL,threejs&…

week1-[分支結構]中位數

week1-[分支結構]中位數 題目描述 給定 444 個正整數 a,b,c,da,b,c,da,b,c,d,輸出它們的中位數,答案四舍五入保留 111 位小數。 輸入格式 輸入共 111 行 444 個正整數 a,b,c,da,b,c,da,b,c,d。 輸出格式 輸出共 111 行 111 個浮點數表示答案。 樣例 #1 樣…

[激光原理與應用-259]:理論 - 幾何光學 - 平面鏡的反射、平面透鏡的折射、平面鏡的反射成像、平面透鏡的成像的規律

一、平面鏡的反射規律平面鏡的反射遵循鏡面反射定律,即光線在光滑表面(反射面平整度遠大于波長)發生反射時,滿足以下條件:反射光線、入射光線與法線共面:反射光線、入射光線和法線(垂直于反射面…

相機按鍵功能解析

相機按鍵功能解析佳能相機按鍵機身背面機身正面機身頂部機身側面 佳能相機按鍵 機身背面取景器目鏡:用于拍攝時觀察相機形成的圖像。實拍顯示/視頻拍攝按鈕:按下即可開始拍攝或錄制視頻。光圈/曝光補償鍵:調整光圈大小和曝光補償,…

51單片機-驅動LED模塊教程

本章思維導圖: 51單片機驅動LED燈模塊 LED燈元器件簡介 LED(Light Emitting Diode,發光二極管) 是一種固態半導體器件,通過P-N結中電子與空穴復合直接將電能轉化為光能。其核心結構由P型半導體(空穴主導&a…

Git 完全手冊:從入門到團隊協作實戰(2)

Hello大家好&#xff01;很高興我們又見面啦&#xff01;給生活添點passion&#xff0c;開始今天的編程之路&#xff01; 我的博客&#xff1a;<但凡. 我的專欄&#xff1a;《編程之路》、《數據結構與算法之美》、《C修煉之路》、《Linux修煉&#xff1a;終端之內 洞悉真理…

c語言中堆和棧的區別

1.棧區(stack):由編譯器自動分配釋放&#xff0c;棧主要用于存儲局部變量、函數參數、函數調用和返回信息等。其操作方式類似于數據結構中的棧。 2.堆區(heap):一般由程序員分配釋放&#xff0c;若程序員不釋放&#xff0c;則可能會引起內存泄漏。注堆和數據結構中的堆棧不一樣…

華為實驗WLAN 基礎配置隨練

業務vlan 20 192.168.20.x管理vlan 100 192.168.100.x步驟① 網絡互通Core sw:vlan batch 20 100 dhcp enable int vlanif 20IP add 192.168.20.1 24dhcp select interfaceinterface GigabitEthernet0/0/1/2port link-type trunkport trunk pvid vlan 100port trunk allow-pas…

CMake 如何查找 Python2和Python3

問題 在一個CMakeLists.txt文件里面看到了下面的這句話 find_package(Python2 COMPONENTS Interpreter Development NumPy)這個好有趣啊&#xff0c;Python2也是一個C的庫嗎&#xff0c;也有Python2Config.cmake或者FindPython2.cmake? 回答 find_package(Python2 COMPONENTS …

心靈筆記:刻意練習

心靈筆記&#xff1a;刻意練習提要 所有人都以為“杰出”源于“天賦”&#xff0c;而“天才”卻說&#xff1a;我的成就源于“正確的練習”&#xff01; 定義&#xff1a;刻意練習是一種有目的、有方法、能帶來能力持續提升的結構化訓練方式&#xff0c;它并非簡單的重復勞動&a…

langchain入門筆記03:使用fastapi部署本地大模型后端接口,優化局域網內的問答響應速度

文章目錄前言一、fastapi的簡單入門1&#xff1a;安裝必要的包&#xff08;python3.11&#xff09;&#xff1a;2&#xff1a;快速搭建一個fastapi&#xff1a;二、提升問答的響應速度1. fastapi部署后端接口&#xff0c;在局域網內訪問的方法2. 局域網內的測試&#xff1a;“未…

【CDA 新一級】學習筆記第1篇:數據分析的時代背景

作者&#xff1a;CDA持證人 張九領我們要學習數據分析&#xff0c;就要從當前時代的數據特點&#xff0c;找到在時代特點下企業需要數據分析的痛點&#xff0c;然后理解數據分析在企業中的作用。當前時代&#xff0c;數據分析的特征是哪些呢&#xff1f;我們用VUCA來概括數據分…

Vite 為什么比 Webpack 快?原理深度分析

Hi&#xff0c;我是布蘭妮甜 &#xff01;在現代前端開發中&#xff0c;構建工具的性能直接影響開發體驗和生產力。Webpack 作為傳統打包工具的代表&#xff0c;長期以來主導著前端構建領域&#xff0c;而 Vite 作為新一代的前端構建工具&#xff0c;憑借其出色的開發服務器啟動…

數字電路上的通訊速度是越快越好還是越慢越好?

昨天我突發奇想&#xff0c;修改了一下MCU和INA226以及DAC8551的通訊速率。之前的INA226用的是I2C通訊&#xff0c;之前設置的速率是100Kbps&#xff0c;DAC8551是SPI通訊&#xff0c;速率是10Mbps&#xff0c;昨天修改之前輸出位置的測試結果如圖&#xff1a;可以看到&#xf…

Google Gemini 的深度研究終于進入 API 階段

Google Gemini 最強大的功能之一是深度研究&#xff0c;但到目前為止&#xff0c;它一直嚴格限制在 Gemini 界面上。這種情況可能很快就會改變。 通過 Gemini 中的深度研究&#xff0c;您可以搜索幾乎任何內容&#xff0c;包括學者、現有研究論文等。 谷歌將深度研究描述為一…