20分鐘了解 MMAction2 框架設計

步驟3:構建一個識別器

# 修改此處 predictions[0].pred_score -> predictions[0].pred_scores.item
print('Scores of Sample[0]', predictions[0].pred_scores.item)

步驟4:構建一個評估指標

# 修改此處 data_sample['pred_score'].cpu().numpy() -> data_sample['pred_scores']['item']
scores = data_sample['pred_scores']['item']
# 并在下方額外添加 scores = np.array(scores)
scores = np.array(scores)

步驟5:使用本地 PyTorch 訓練和測試

'''
修改此處 for data_batch in track_iter_progress(val_data_loader): ->
task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):
'''
task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):

修改完的完整代碼

import mmaction
from mmaction.utils import register_all_modulesregister_all_modules(init_default_scope=True)
print('**************************步驟0:準備數據*****************************')
print('**************************步驟1:構建一個數據流水線*****************************')
import mmcv
import decord
import numpy as np
from mmcv.transforms import TRANSFORMS, BaseTransform, to_tensor
from mmaction.structures import ActionDataSample@TRANSFORMS.register_module()
class VideoInit(BaseTransform):def transform(self, results):container = decord.VideoReader(results['filename'])results['total_frames'] = len(container)results['video_reader'] = containerreturn results@TRANSFORMS.register_module()
class VideoSample(BaseTransform):def __init__(self, clip_len, num_clips, test_mode=False):self.clip_len = clip_lenself.num_clips = num_clipsself.test_mode = test_modedef transform(self, results):total_frames = results['total_frames']interval = total_frames // self.clip_lenif self.test_mode:# 使測試期間的采樣具有確定性np.random.seed(42)inds_of_all_clips = []for i in range(self.num_clips):bids = np.arange(self.clip_len) * intervaloffset = np.random.randint(interval, size=bids.shape)inds = bids + offsetinds_of_all_clips.append(inds)results['frame_inds'] = np.concatenate(inds_of_all_clips)results['clip_len'] = self.clip_lenresults['num_clips'] = self.num_clipsreturn results@TRANSFORMS.register_module()
class VideoDecode(BaseTransform):def transform(self, results):frame_inds = results['frame_inds']container = results['video_reader']imgs = container.get_batch(frame_inds).asnumpy()imgs = list(imgs)results['video_reader'] = Nonedel containerresults['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoResize(BaseTransform):def __init__(self, r_size):self.r_size = (np.inf, r_size)def transform(self, results):img_h, img_w = results['img_shape']new_w, new_h = mmcv.rescale_size((img_w, img_h), self.r_size)imgs = [mmcv.imresize(img, (new_w, new_h))for img in results['imgs']]results['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoCrop(BaseTransform):def __init__(self, c_size):self.c_size = c_sizedef transform(self, results):img_h, img_w = results['img_shape']center_x, center_y = img_w // 2, img_h // 2x1, x2 = center_x - self.c_size // 2, center_x + self.c_size // 2y1, y2 = center_y - self.c_size // 2, center_y + self.c_size // 2imgs = [img[y1:y2, x1:x2] for img in results['imgs']]results['imgs'] = imgsresults['img_shape'] = imgs[0].shape[:2]return results@TRANSFORMS.register_module()
class VideoFormat(BaseTransform):def transform(self, results):num_clips = results['num_clips']clip_len = results['clip_len']imgs = results['imgs']# [num_clips*clip_len, H, W, C]imgs = np.array(imgs)# [num_clips, clip_len, H, W, C]imgs = imgs.reshape((num_clips, clip_len) + imgs.shape[1:])# [num_clips, C, clip_len, H, W]imgs = imgs.transpose(0, 4, 1, 2, 3)results['imgs'] = imgsreturn results@TRANSFORMS.register_module()
class VideoPack(BaseTransform):def __init__(self, meta_keys=('img_shape', 'num_clips', 'clip_len')):self.meta_keys = meta_keysdef transform(self, results):packed_results = dict()inputs = to_tensor(results['imgs'])data_sample = ActionDataSample().set_gt_label(results['label'])metainfo = {k: results[k] for k in self.meta_keys if k in results}data_sample.set_metainfo(metainfo)packed_results['inputs'] = inputspacked_results['data_samples'] = data_samplereturn packed_resultsimport os.path as osp
from mmengine.dataset import Composepipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=1, test_mode=False),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]pipeline = Compose(pipeline_cfg)
data_prefix = 'data/kinetics400_tiny/train'
results = dict(filename=osp.join(data_prefix, 'D32_1gwq35E.mp4'), label=0)
packed_results = pipeline(results)inputs = packed_results['inputs']
data_sample = packed_results['data_samples']print('shape of the inputs: ', inputs.shape)# 獲取輸入的信息
print('image_shape: ', data_sample.img_shape)
print('num_clips: ', data_sample.num_clips)
print('clip_len: ', data_sample.clip_len)# 獲取輸入的標簽
print('label: ', data_sample.gt_label)
print('**************************步驟2:構建一個數據集和數據加載器*****************************')
import os.path as osp
from mmengine.fileio import list_from_file
from mmengine.dataset import BaseDataset
from mmaction.registry import DATASETS@DATASETS.register_module()
class DatasetZelda(BaseDataset):def __init__(self, ann_file, pipeline, data_root, data_prefix=dict(video=''),test_mode=False, modality='RGB', **kwargs):self.modality = modalitysuper(DatasetZelda, self).__init__(ann_file=ann_file, pipeline=pipeline, data_root=data_root,data_prefix=data_prefix, test_mode=test_mode,**kwargs)def load_data_list(self):data_list = []fin = list_from_file(self.ann_file)for line in fin:line_split = line.strip().split()filename, label = line_splitlabel = int(label)filename = osp.join(self.data_prefix['video'], filename)data_list.append(dict(filename=filename, label=label))return data_listdef get_data_info(self, idx: int) -> dict:data_info = super().get_data_info(idx)data_info['modality'] = self.modalityreturn data_infofrom mmaction.registry import DATASETStrain_pipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=1, test_mode=False),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]val_pipeline_cfg = [dict(type='VideoInit'),dict(type='VideoSample', clip_len=16, num_clips=5, test_mode=True),dict(type='VideoDecode'),dict(type='VideoResize', r_size=256),dict(type='VideoCrop', c_size=224),dict(type='VideoFormat'),dict(type='VideoPack')
]train_dataset_cfg = dict(type='DatasetZelda',ann_file='kinetics_tiny_train_video.txt',pipeline=train_pipeline_cfg,data_root='data/kinetics400_tiny/',data_prefix=dict(video='train'))val_dataset_cfg = dict(type='DatasetZelda',ann_file='kinetics_tiny_val_video.txt',pipeline=val_pipeline_cfg,data_root='data/kinetics400_tiny/',data_prefix=dict(video='val'))train_dataset = DATASETS.build(train_dataset_cfg)packed_results = train_dataset[0]inputs = packed_results['inputs']
data_sample = packed_results['data_samples']print('shape of the inputs: ', inputs.shape)# 獲取輸入的信息
print('image_shape: ', data_sample.img_shape)
print('num_clips: ', data_sample.num_clips)
print('clip_len: ', data_sample.clip_len)# 獲取輸入的標簽
print('label: ', data_sample.gt_label)from mmengine.runner import RunnerBATCH_SIZE = 2train_dataloader_cfg = dict(batch_size=BATCH_SIZE,num_workers=0,persistent_workers=False,sampler=dict(type='DefaultSampler', shuffle=True),dataset=train_dataset_cfg)val_dataloader_cfg = dict(batch_size=BATCH_SIZE,num_workers=0,persistent_workers=False,sampler=dict(type='DefaultSampler', shuffle=False),dataset=val_dataset_cfg)train_data_loader = Runner.build_dataloader(dataloader=train_dataloader_cfg)
val_data_loader = Runner.build_dataloader(dataloader=val_dataloader_cfg)batched_packed_results = next(iter(train_data_loader))batched_inputs = batched_packed_results['inputs']
batched_data_sample = batched_packed_results['data_samples']assert len(batched_inputs) == BATCH_SIZE
assert len(batched_data_sample) == BATCH_SIZE
print('**************************步驟3:構建一個識別器*****************************')
import torch
from mmengine.model import BaseDataPreprocessor, stack_batch
from mmaction.registry import MODELS@MODELS.register_module()
class DataPreprocessorZelda(BaseDataPreprocessor):def __init__(self, mean, std):super().__init__()self.register_buffer('mean',torch.tensor(mean, dtype=torch.float32).view(-1, 1, 1, 1),False)self.register_buffer('std',torch.tensor(std, dtype=torch.float32).view(-1, 1, 1, 1),False)def forward(self, data, training=False):data = self.cast_data(data)inputs = data['inputs']batch_inputs = stack_batch(inputs)  # 批處理batch_inputs = (batch_inputs - self.mean) / self.std  # 歸一化data['inputs'] = batch_inputsreturn datafrom mmaction.registry import MODELSdata_preprocessor_cfg = dict(type='DataPreprocessorZelda',mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375])data_preprocessor = MODELS.build(data_preprocessor_cfg)preprocessed_inputs = data_preprocessor(batched_packed_results)
print(preprocessed_inputs['inputs'].shape)import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import BaseModel, BaseModule, Sequential
from mmengine.structures import LabelData
from mmaction.registry import MODELS@MODELS.register_module()
class BackBoneZelda(BaseModule):def __init__(self, init_cfg=None):if init_cfg is None:init_cfg = [dict(type='Kaiming', layer='Conv3d', mode='fan_out', nonlinearity="relu"),dict(type='Constant', layer='BatchNorm3d', val=1, bias=0)]super(BackBoneZelda, self).__init__(init_cfg=init_cfg)self.conv1 = Sequential(nn.Conv3d(3, 64, kernel_size=(3, 7, 7),stride=(1, 2, 2), padding=(1, 3, 3)),nn.BatchNorm3d(64), nn.ReLU())self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2),padding=(0, 1, 1))self.conv = Sequential(nn.Conv3d(64, 128, kernel_size=3, stride=2, padding=1),nn.BatchNorm3d(128), nn.ReLU())def forward(self, imgs):# imgs: [batch_size*num_views, 3, T, H, W]# features: [batch_size*num_views, 128, T/2, H//8, W//8]features = self.conv(self.maxpool(self.conv1(imgs)))return features@MODELS.register_module()
class ClsHeadZelda(BaseModule):def __init__(self, num_classes, in_channels, dropout=0.5, average_clips='prob', init_cfg=None):if init_cfg is None:init_cfg = dict(type='Normal', layer='Linear', std=0.01)super(ClsHeadZelda, self).__init__(init_cfg=init_cfg)self.num_classes = num_classesself.in_channels = in_channelsself.average_clips = average_clipsif dropout != 0:self.dropout = nn.Dropout(dropout)else:self.dropout = Noneself.fc = nn.Linear(self.in_channels, self.num_classes)self.pool = nn.AdaptiveAvgPool3d(1)self.loss_fn = nn.CrossEntropyLoss()def forward(self, x):N, C, T, H, W = x.shapex = self.pool(x)x = x.view(N, C)assert x.shape[1] == self.in_channelsif self.dropout is not None:x = self.dropout(x)cls_scores = self.fc(x)return cls_scoresdef loss(self, feats, data_samples):cls_scores = self(feats)labels = torch.stack([x.gt_label for x in data_samples])labels = labels.squeeze()if labels.shape == torch.Size([]):labels = labels.unsqueeze(0)loss_cls = self.loss_fn(cls_scores, labels)return dict(loss_cls=loss_cls)def predict(self, feats, data_samples):cls_scores = self(feats)num_views = cls_scores.shape[0] // len(data_samples)# assert num_views == data_samples[0].num_clipscls_scores = self.average_clip(cls_scores, num_views)for ds, sc in zip(data_samples, cls_scores):pred = LabelData(item=sc)ds.pred_scores = predreturn data_samplesdef average_clip(self, cls_scores, num_views):if self.average_clips not in ['score', 'prob', None]:raise ValueError(f'{self.average_clips} is not supported. 'f'Currently supported ones are 'f'["score", "prob", None]')total_views = cls_scores.shape[0]cls_scores = cls_scores.view(total_views // num_views, num_views, -1)if self.average_clips is None:return cls_scoreselif self.average_clips == 'prob':cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1)elif self.average_clips == 'score':cls_scores = cls_scores.mean(dim=1)return cls_scores@MODELS.register_module()
class RecognizerZelda(BaseModel):def __init__(self, backbone, cls_head, data_preprocessor):super().__init__(data_preprocessor=data_preprocessor)self.backbone = MODELS.build(backbone)self.cls_head = MODELS.build(cls_head)def extract_feat(self, inputs):inputs = inputs.view((-1, ) + inputs.shape[2:])return self.backbone(inputs)def loss(self, inputs, data_samples):feats = self.extract_feat(inputs)loss = self.cls_head.loss(feats, data_samples)return lossdef predict(self, inputs, data_samples):feats = self.extract_feat(inputs)predictions = self.cls_head.predict(feats, data_samples)return predictionsdef forward(self, inputs, data_samples=None, mode='tensor'):if mode == 'tensor':return self.extract_feat(inputs)elif mode == 'loss':return self.loss(inputs, data_samples)elif mode == 'predict':return self.predict(inputs, data_samples)else:raise RuntimeError(f'Invalid mode: {mode}')import torch
import copy
from mmaction.registry import MODELSmodel_cfg = dict(type='RecognizerZelda',backbone=dict(type='BackBoneZelda'),cls_head=dict(type='ClsHeadZelda',num_classes=2,in_channels=128,average_clips='prob'),data_preprocessor = dict(type='DataPreprocessorZelda',mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375]))model = MODELS.build(model_cfg)# 訓練
model.train()
model.init_weights()
data_batch_train = copy.deepcopy(batched_packed_results)
data = model.data_preprocessor(data_batch_train, training=True)
loss = model(**data, mode='loss')
print('loss dict: ', loss)# 驗證
with torch.no_grad():model.eval()data_batch_test = copy.deepcopy(batched_packed_results)data = model.data_preprocessor(data_batch_test, training=False)predictions = model(**data, mode='predict')
here = (predictions)
print('Label of Sample[0]', predictions[0].gt_label)
print('----------------------------------------------------')
print('Label of Sample[0]', predictions[0].gt_label)
print('Scores of Sample[0]', predictions[0].pred_scores.item)
print('**************************步驟4:構建一個評估指標*****************************')
import copy
from collections import OrderedDict
from mmengine.evaluator import BaseMetric
from mmaction.evaluation import top_k_accuracy
from mmaction.registry import METRICS@METRICS.register_module()
class AccuracyMetric(BaseMetric):def __init__(self, topk=(1, 5), collect_device='cpu', prefix='acc'):super().__init__(collect_device=collect_device, prefix=prefix)self.topk = topkdef process(self, data_batch, data_samples):data_samples = copy.deepcopy(data_samples)for data_sample in data_samples:result = dict()scores = data_sample['pred_scores']['item']scores = np.array(scores)label = data_sample['gt_label'].item()result['scores'] = scoresresult['label'] = labelself.results.append(result)def compute_metrics(self, results: list) -> dict:eval_results = OrderedDict()labels = [res['label'] for res in results]scores = [res['scores'] for res in results]topk_acc = top_k_accuracy(scores, labels, self.topk)for k, acc in zip(self.topk, topk_acc):eval_results[f'topk{k}'] = accreturn eval_resultsfrom mmaction.registry import METRICSmetric_cfg = dict(type='AccuracyMetric', topk=(1, 5))metric = METRICS.build(metric_cfg)data_samples = [d.to_dict() for d in predictions]metric.process(batched_packed_results, data_samples)
acc = metric.compute_metrics(metric.results)
print(acc)
print('**************************步驟5:使用本地 PyTorch 訓練和測試*****************************')
import torch.optim as optim
from mmengine import track_iter_progress
from tqdm import tqdmdevice = 'cuda' # or 'cpu'
max_epochs = 10optimizer = optim.Adam(model.parameters(), lr=0.01)for epoch in range(max_epochs):model.train()losses = []task_num = len(train_data_loader)for data_batch in track_iter_progress((train_data_loader, task_num)):data = model.data_preprocessor(data_batch, training=True)loss_dict = model(**data, mode='loss')loss = loss_dict['loss_cls']optimizer.zero_grad()loss.backward()optimizer.step()losses.append(loss.item())print(f'Epoch[{epoch}]: loss ', sum(losses) / len(train_data_loader))with torch.no_grad():model.eval()task_num = len(val_data_loader)for data_batch in track_iter_progress((val_data_loader,task_num)):data = model.data_preprocessor(data_batch, training=False)predictions = model(**data, mode='predict')data_samples = [d.to_dict() for d in predictions]metric.process(data_batch, data_samples)acc = metric.acc = metric.compute_metrics(metric.results)for name, topk in acc.items():print(f'{name}: ', topk)
print('**************************步驟6:使用 MMEngine 訓練和測試(推薦)*****************************')
# from mmengine.runner import Runner
#
# train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=10, val_interval=1)
# val_cfg = dict(type='ValLoop')
#
# optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.01))
#
# runner = Runner(model=model_cfg, work_dir='./work_dirs/guide',
#                 train_dataloader=train_dataloader_cfg,
#                 train_cfg=train_cfg,
#                 val_dataloader=val_dataloader_cfg,
#                 val_cfg=val_cfg,
#                 optim_wrapper=optim_wrapper,
#                 val_evaluator=[metric_cfg],
#                 default_scope='mmaction')
# runner.train()

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/bicheng/76339.shtml
繁體地址,請注明出處:http://hk.pswp.cn/bicheng/76339.shtml
英文地址,請注明出處:http://en.pswp.cn/bicheng/76339.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

單軌小車懸掛輸送機安全規程

導語 大家好,我是社長,老K。專注分享智能制造和智能倉儲物流等內容。歡迎大家使用我們的倉儲物流技術AI智能體。 新書《智能物流系統構成與技術實踐》 新書《智能倉儲項目出海-英語手冊,必備!》 完整版文件和更多學習資料&#xf…

C++之多態

文章目錄 一、多態的概念 多態的定義與類型 二、多態的實現 三、虛函數 虛函數的概念 虛函數的重寫/覆蓋 協變 析構函數的重寫/覆蓋 override,final關鍵字 override final 純虛函數與抽象類 三個概念辨析 四、多態實現的原理 虛函數表指針 動態綁定與靜態綁定 …

深入理解 HTML5 Audio:網頁音頻播放的新時代

在網頁開發領域,音頻的嵌入和播放一直是一個重要且不斷演進的話題。HTML5 的出現,為網頁音頻播放帶來了標準化的解決方案,極大地改善了開發者和用戶的體驗。 一、HTML5 之前的音頻播放狀況 在 HTML5 誕生之前,互聯網上缺乏統一的網頁音頻播放標準。當時,大多數音頻播放依…

重載和重寫的區別

重載 在同一個類中定義多個同名方法, 但參數列表不同(參數類型、參數個數或參數順序不同)返回值類型不同。 public class MathOperations {int add(int a, int b) {return a b;}double add(double a, double b) {return a b;} }重寫 子…

機器視覺+深度學習,讓電子零部件表面缺陷檢測效率大幅提升

在精密加工的3C電子行業中,一抹0.1毫米的油漬,一粒肉眼難辨的灰塵或將引發整機性能隱患。當制造業邁入微米級品質競爭時代,產品表面看似微不足道的臟污缺陷,正成為制約企業高質量發展的隱形枷鎖。分布無規律的污漬斑點、形態各異的…

Dart逆向之函數調用

我們從Blutter恢復的部分IL中可以看到Dart調用函數的邏輯 // 0x180490: r16 <int> // 0x180490: ldr x16, [PP, #0x8a0] ; [pp0x8a0] TypeArguments: <int> // 0x180494: r30 Instance_MethodChannel // 0x180494: ldr lr, [P…

如何白嫖Grok3 API? 如何使用Grok3 API調用實例?怎么使用Grok3模型?

前段時間&#xff0c;Grok3&#xff08;想要體驗Grok3的童鞋可以參考本文&#xff1a;Grok 上線角色扮演功能&#xff0c;教你課后作業手到擒來&#xff0c;Grok3使用次數限制&#xff1f;如何使用Grok3? Grok3國內支付手段如何訂閱升級Premium - AI is all your need!&#x…

《超短心法》速讀筆記

文章目錄 書籍信息概覽主線行業篇戰法一 人氣漲停戰法戰法二 四維主線戰法 主線龍頭篇戰法三 龍頭起爆戰法戰法四 六合強莊控盤戰法戰法五 籌碼戰法之七星連珠 趨勢牛股篇戰法六 趨勢擒龍之暴漲形態戰法七 趨勢破位起爆戰法戰法八 強中選強多頭戰法 漲停晉級篇戰法九 強勢漲停狙…

git倉庫遷移包括提交記錄日志

網上找了很多資料都不好用&#xff0c;直到看到一個親測有效后&#xff0c;整理如下&#xff1a; 1、進入倉庫目錄下&#xff0c;并且切換到要遷移的分支上 前提是你本地已有舊倉庫的代碼&#xff1b;如果沒有的話&#xff0c;先拉取。 2、更改倉庫地址 git remote set-url …

powerDesign 逆向 mysql 生成 物理模型,并用VBS腳本整理comment

學習自&#xff1a;https://www.cnblogs.com/xmyjcs/p/8536233.html 文章目錄 Reverse Engineer格式化模型執行 VBS 腳本 Reverse Engineer 下面 DBMS 可以通過 ODBC&#xff08;Open Database Connectivity&#xff0c;開放數據庫連接&#xff09;連接&#xff0c; 需要自己先…

Qt文件讀寫

Qt文件讀寫&#xff08;Stream流形式&#xff09; 文件讀寫相關類 1. QFile類 QFile主要用于文件的打開、關閉等功能&#xff1b; [override virtual] bool QFile::open(QIODevice::OpenMode mode);Reimplements: QIODevice::open(QIODevice::OpenMode mode). Opens the fi…

[特殊字符]【高并發實戰】Java Socket + 線程池實現高性能文件上傳服務器(附完整源碼)[特殊字符]

大家好&#xff01;今天給大家分享一個 Java Socket 線程池 實現的高性能文件上傳服務器&#xff0c;支持 多客戶端并發上傳&#xff0c;代碼可直接運行&#xff0c;適合 面試、項目實戰、性能優化 學習&#xff01; &#x1f4cc; 本文亮點&#xff1a; ? 完整可運行代碼&a…

Python proteinflow 庫介紹

ProteinFlow是一個開源的Python庫,旨在簡化蛋白質結構數據在深度學習應用中的預處理過程。以下是其詳細介紹: 功能 數據處理:支持處理單鏈和多鏈蛋白質結構,包括二級結構特征、扭轉角等特征化選項。 數據獲取:能夠從Protein Data Bank (PDB)和Structural Antibody Databa…

WebPages 對象

WebPages 對象 引言 在Web開發領域&#xff0c;WebPages 對象是前端工程師和開發者常用的工具之一。它提供了豐富的API&#xff0c;使我們能夠輕松地與網頁元素進行交互。本文將深入探討WebPages對象的概念、特性以及在實際開發中的應用。 概念 WebPages對象是現代瀏覽器提…

Mysql表的操作(2)

1.去重 select distinct 列名 from 表名 2.查詢時排序 select 列名 from 表名 order by 列名 asc/desc; 不影響數據庫里面的數據 錯誤樣例 &#xff1a; 但結果卻有點出乎意料了~為什么會失敗呢&#xff1f; 其實這是因為書寫的形式不對&#xff0c;如果帶了引號&#xff0c;…

先占個日常,等會寫。

引入一個重要的概念 “struct” &#xff08;譯為中文&#xff1a;結構體&#xff09; 可用作設出比較復雜的一些變量類型 語法 &#xff1a;struct point name { int x; int y; int z;} point 和 name是任意命名的名字&#xff0c;含義是&#xff0c;聲明一個變量類型為st…

SmolDocling:一種超緊湊的視覺語言模型,用于端到端多模態文檔轉換

paper地址:SmolDocling: An ultra-compact vision-language model for end-to-end multi-modal document conversion Huggingface地址:SmolDocling-256M-preview 代碼對應的權重文件:SmolDocling-256M-preview權重文件 一、摘要 以下是文章摘要的總結: SmolDocling 是一…

MySQL SQL Mode

SQL Mode 是 MySQL 中一個重要的系統變量&#xff0c;它決定了 MySQL 應遵循的 SQL 語法規則和數據驗證規則。 什么是 SQL Mode SQL Mode 定義了 MySQL 應該支持的 SQL 語法以及執行數據驗證的方式。通過設置不同的 SQL Mode&#xff0c;可以讓 MySQL 在不同程度上兼容其他數據…

Java bs架構/反射

bs架構 規定的格式是要換行&#xff0c;而打印流天然換行 線程池可以直接處理thread&#xff0c;thread繼承自runnable 在Java中&#xff0c;線程池的pool.execute()方法用于提交一個任務給線程池執行。這個方法接受一個Runnable對象作為參數。Runnable是一個函數式接口&…

C++手撕單鏈表及逆序打印

在學習數據結構的過程中&#xff0c;鏈表是一個非常重要的基礎數據結構。今天&#xff0c;我們將通過C手動實現一個單鏈表&#xff0c;并添加一個逆序打印的功能&#xff0c;幫助大家更好地理解鏈表的實現和操作。 一、鏈表簡介 鏈表是一種線性數據結構&#xff0c;其中每個元…