yolov11安裝,訓練模型,tensorrtx加速,Qt預測圖像

文章目錄

  • 一. yolov11 python環境安裝
  • 二. windows10下yolov11 tensorrtx推理加速
  • 三. windows10下qt調用tensorrtx加速的yolov11進行檢測

一. yolov11 python環境安裝

  1. 基礎環境
    CUDA:cuda_11.8.0_522.06_windows
    cudnn:cudnn-windows-x86_64-8.6.0.163_cuda11-archive
  2. 創建python環境
    conda create --name yolov11 python=3.10 -y
    
  3. 安裝pytorch
    pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu118
    
  4. 安裝yolov11
    pip install ultralytics -i https://pypi.mirrors.ustc.edu.cn/simple/
    
  5. 安裝必要的庫
    pip install -r requirements.txt -i https://pypi.mirrors.ustc.edu.cn/simple/
    

二. windows10下yolov11 tensorrtx推理加速

  1. 官網下載tensorrtx

    git clone https://github.com/wang-xinyu/tensorrtx.git
    
  2. 進入yolov11文件夾,轉換模型.pt轉.wts

    python gen_wts.py -w D:\code\ultralytics-main\yolo11n.pt -o yolo11n.wts -t detect
    

    在這里插入圖片描述
    注意:pytorch2.6需要修改代碼中的torch.load,在里面添加weigths_only=False
    在這里插入圖片描述

  3. 修改cmakeList.txt文件
    根據自己的opencv,tensort,dirent所在目錄路徑,修改以下文件路徑

    cmake_minimum_required(VERSION 3.10)project(yolov11)add_definitions(-std=c++11)
    add_definitions(-DAPI_EXPORTS)
    add_compile_definitions(NOMINMAX)set(CMAKE_CXX_STANDARD 11)
    set(CMAKE_BUILD_TYPE Debug)set(CMAKE_CUDA_ARCHITECTURES 70 75 80 86)
    set(CMAKE_CUDA_COMPILER "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8/bin/nvcc.exe")
    enable_language(CUDA)include_directories(${PROJECT_SOURCE_DIR}/include)
    include_directories(${PROJECT_SOURCE_DIR}/plugin)# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
    if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")message("embed_platform on")include_directories(/usr/local/cuda/targets/aarch64-linux/include)link_directories(/usr/local/cuda/targets/aarch64-linux/lib)
    else()message("embed_platform off")# cudafind_package(CUDA REQUIRED)include_directories(${CUDA_INCLUDE_DIRS})# tensorrtset(TRT_DIR "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\TensorRT-8.6.0.12")  set(TRT_INCLUDE_DIRS ${TRT_DIR}\\include) set(TRT_LIB_DIRS ${TRT_DIR}\\lib) include_directories(${TRT_INCLUDE_DIRS})link_directories(${TRT_LIB_DIRS})# opencvset(OpenCV_DIR "D:\\Program Files\\opencv\\build") set(OpenCV_INCLUDE_DIRS ${OpenCV_DIR}\\include) set(OpenCV_LIB_DIRS ${OpenCV_DIR}\\x64\\vc16\\lib) set(OpenCV_Debug_LIBS "opencv_world4110d.lib") set(OpenCV_Release_LIBS "opencv_world4110.lib") include_directories(${OpenCV_INCLUDE_DIRS})link_directories(${OpenCV_LIB_DIRS})# direntset(Dirent_INCLUDE_DIRS "D:\\Program Files\\dirent\\include")include_directories(${Dirent_INCLUDE_DIRS})
    endif()add_library(myplugins SHARED ${PROJECT_SOURCE_DIR}/plugin/yololayer.cu)
    target_link_libraries(myplugins nvinfer cudart)file(GLOB_RECURSE SRCS ${PROJECT_SOURCE_DIR}/src/*.cpp ${PROJECT_SOURCE_DIR}/src/*.cu)add_executable(yolo11_det ${PROJECT_SOURCE_DIR}/yolo11_det.cpp ${SRCS})
    target_link_libraries(yolo11_det nvinfer)
    target_link_libraries(yolo11_det cudart)
    target_link_libraries(yolo11_det myplugins)
    target_link_libraries(yolo11_det ${OpenCV_Debug_LIBS})
    target_link_libraries(yolo11_det ${OpenCV_Release_LIBS})add_executable(yolo11_cls ${PROJECT_SOURCE_DIR}/yolo11_cls.cpp ${SRCS})
    target_link_libraries(yolo11_cls nvinfer)
    target_link_libraries(yolo11_cls cudart)
    target_link_libraries(yolo11_cls myplugins)
    target_link_libraries(yolo11_cls ${OpenCV_Debug_LIBS})
    target_link_libraries(yolo11_cls ${OpenCV_Release_LIBS})add_executable(yolo11_seg ${PROJECT_SOURCE_DIR}/yolo11_seg.cpp ${SRCS})
    target_link_libraries(yolo11_seg nvinfer)
    target_link_libraries(yolo11_seg cudart)
    target_link_libraries(yolo11_seg myplugins)
    target_link_libraries(yolo11_seg ${OpenCV_Debug_LIBS})
    target_link_libraries(yolo11_seg ${OpenCV_Release_LIBS})add_executable(yolo11_pose ${PROJECT_SOURCE_DIR}/yolo11_pose.cpp ${SRCS})
    target_link_libraries(yolo11_pose nvinfer)
    target_link_libraries(yolo11_pose cudart)
    target_link_libraries(yolo11_pose myplugins)
    target_link_libraries(yolo11_pose ${OpenCV_Debug_LIBS})
    target_link_libraries(yolo11_pose ${OpenCV_Release_LIBS})add_executable(yolo11_obb ${PROJECT_SOURCE_DIR}/yolo11_obb.cpp ${SRCS})
    target_link_libraries(yolo11_obb nvinfer)
    target_link_libraries(yolo11_obb cudart)
    target_link_libraries(yolo11_obb myplugins)
    target_link_libraries(yolo11_obb ${OpenCV_Debug_LIBS})
    target_link_libraries(yolo11_obb ${OpenCV_Release_LIBS})
    
  4. 構建項目

    mkdir build
    cd build
    cmake ..
    

    在這里插入圖片描述5. vs打開項目,生成解決方案
    在這里插入圖片描述
    在這里插入圖片描述

  5. 裝換.wts為.engine
    在這里插入圖片描述
    在這里插入圖片描述
    在這里插入圖片描述
    轉換前,這里需要根據自己的模型,修改對應的配置,配置文件在以下位置
    在這里插入圖片描述
    在這里插入圖片描述
    在這里插入圖片描述

    在這里插入圖片描述

    -s ..\yolo11n.wts yolo11n.engine n
    
  6. 利用轉換好的.engine進行推理

    -d yolo11n.engine D:\code\yolov5-6.1\data\images g
    

    在這里插入圖片描述
    在這里插入圖片描述
    在這里插入圖片描述

三. windows10下qt調用tensorrtx加速的yolov11進行檢測

  1. 拷貝文件
    在這里插入圖片描述
  2. 修改Qt項目中的cmakeList.txt文件如下:
    cmake_minimum_required(VERSION 3.5)project(yolov11Test LANGUAGES CXX)add_definitions(-std=c++11)
    add_definitions(-DAPI_EXPORTS)
    add_compile_definitions(NOMINMAX)set(CMAKE_CXX_STANDARD 17)
    set(CMAKE_CXX_STANDARD_REQUIRED ON)set(CMAKE_CUDA_ARCHITECTURES 70 75 80 86)
    set(CMAKE_CUDA_COMPILER "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8/bin/nvcc.exe")
    enable_language(CUDA)include_directories(${PROJECT_SOURCE_DIR}/include)
    include_directories(${PROJECT_SOURCE_DIR}/plugin)# cuda
    find_package(CUDA REQUIRED)
    include_directories(${CUDA_INCLUDE_DIRS})# tensorrt
    set(TRT_DIR "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\TensorRT-8.6.0.12")
    set(TRT_INCLUDE_DIRS ${TRT_DIR}\\include)
    set(TRT_LIB_DIRS ${TRT_DIR}\\lib)
    include_directories(${TRT_INCLUDE_DIRS})
    link_directories(${TRT_LIB_DIRS})# opencv
    set(OpenCV_DIR "D:\\Program Files\\opencv\\build")
    set(OpenCV_INCLUDE_DIRS ${OpenCV_DIR}\\include)
    set(OpenCV_LIB_DIRS ${OpenCV_DIR}\\x64\\vc16\\lib)
    set(OpenCV_Debug_LIBS "opencv_world4110d.lib")
    set(OpenCV_Release_LIBS "opencv_world4110.lib")
    include_directories(${OpenCV_INCLUDE_DIRS})
    link_directories(${OpenCV_LIB_DIRS})# dirent
    set(Dirent_INCLUDE_DIRS "D:\\Program Files\\dirent\\include")
    include_directories(${Dirent_INCLUDE_DIRS})add_library(myplugins SHARED ${PROJECT_SOURCE_DIR}/plugin/yololayer.cu)
    target_link_libraries(myplugins nvinfer cudart)file(GLOB_RECURSE SRCS ${PROJECT_SOURCE_DIR}/src/*.cpp ${PROJECT_SOURCE_DIR}/src/*.cu)add_executable(yolov11Test main.cpp ${SRCS})target_link_libraries(yolov11Test nvinfer)
    target_link_libraries(yolov11Test cudart)
    target_link_libraries(yolov11Test myplugins)
    target_link_libraries(yolov11Test ${OpenCV_Debug_LIBS})
    target_link_libraries(yolov11Test ${OpenCV_Release_LIBS})install(TARGETS yolov11TestLIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
    )
  3. main函數代碼如下:
    #include <fstream>
    #include <iostream>
    #include <opencv2/opencv.hpp>
    #include "cuda_utils.h"
    #include "logging.h"
    #include "model.h"
    #include "postprocess.h"
    #include "preprocess.h"
    #include "utils.h"Logger gLogger;
    using namespace nvinfer1;
    const int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;void deserialize_engine(std::string& engine_name, IRuntime** runtime, ICudaEngine** engine, IExecutionContext** context)
    {std::ifstream file(engine_name, std::ios::binary);if (!file.good()){std::cerr << "read " << engine_name << " error!" << std::endl;assert(false);}size_t size = 0;file.seekg(0, file.end);size = file.tellg();file.seekg(0, file.beg);char* serialized_engine = new char[size];assert(serialized_engine);file.read(serialized_engine, size);file.close();*runtime = createInferRuntime(gLogger);assert(*runtime);*engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);assert(*engine);*context = (*engine)->createExecutionContext();assert(*context);delete[] serialized_engine;
    }void prepare_buffer(ICudaEngine* engine, float** input_buffer_device, float** output_buffer_device, float** output_buffer_host, float** decode_ptr_host, float** decode_ptr_device, std::string cuda_post_process)
    {assert(engine->getNbBindings() == 2);// In order to bind the buffers, we need to know the names of the input and output tensors.// Note that indices are guaranteed to be less than IEngine::getNbBindings()const int inputIndex = engine->getBindingIndex(kInputTensorName);const int outputIndex = engine->getBindingIndex(kOutputTensorName);assert(inputIndex == 0);assert(outputIndex == 1);// Create GPU buffers on deviceCUDA_CHECK(cudaMalloc((void**)input_buffer_device, kBatchSize * 3 * kInputH * kInputW * sizeof(float)));CUDA_CHECK(cudaMalloc((void**)output_buffer_device, kBatchSize * kOutputSize * sizeof(float)));if (cuda_post_process == "c") {*output_buffer_host = new float[kBatchSize * kOutputSize];} else if (cuda_post_process == "g") {if (kBatchSize > 1) {std::cerr << "Do not yet support GPU post processing for multiple batches" << std::endl;exit(0);}// Allocate memory for decode_ptr_host and copy to device*decode_ptr_host = new float[1 + kMaxNumOutputBbox * bbox_element];CUDA_CHECK(cudaMalloc((void**)decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element)));}
    }void infer(IExecutionContext& context, cudaStream_t& stream, void** buffers, float* output, int batchsize,float* decode_ptr_host, float* decode_ptr_device, int model_bboxes, std::string cuda_post_process) {// infer on the batch asynchronously, and DMA output back to hostauto start = std::chrono::system_clock::now();context.enqueueV2(buffers, stream, nullptr);if (cuda_post_process == "c") {CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost,stream));auto end = std::chrono::system_clock::now();std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()<< "ms" << std::endl;} else if (cuda_post_process == "g") {CUDA_CHECK(cudaMemsetAsync(decode_ptr_device, 0, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), stream));cuda_decode((float*)buffers[1], model_bboxes, kConfThresh, decode_ptr_device, kMaxNumOutputBbox, stream);cuda_nms(decode_ptr_device, kNmsThresh, kMaxNumOutputBbox, stream);  //cuda nmsCUDA_CHECK(cudaMemcpyAsync(decode_ptr_host, decode_ptr_device,sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), cudaMemcpyDeviceToHost,stream));auto end = std::chrono::system_clock::now();std::cout << "inference and gpu postprocess time: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;}CUDA_CHECK(cudaStreamSynchronize(stream));
    }int main(int argc, char** argv) {// yolo11_det -s ../models/yolo11n.wts ../models/yolo11n.fp32.trt n// yolo11_det -d ../models/yolo11n.fp32.trt ../images ccudaSetDevice(kGpuId);std::string engine_name= "D:\\code\\tensorrtx\\yolo11\\build\\yolo11n.engine"; //轉換好的模型文件路徑std::string img_dir= "D:\\code\\yolov5-6.1\\data\\images\\"; //要預測的圖像文件夾所在路徑std::string cuda_post_process = "g";int model_bboxes;float gd = 0, gw = 0;int max_channels = 0;// 反序列化模型文件 Deserialize the engine from fileIRuntime* runtime = nullptr;ICudaEngine* engine = nullptr;IExecutionContext* context = nullptr;deserialize_engine(engine_name, &runtime, &engine, &context);cudaStream_t stream;CUDA_CHECK(cudaStreamCreate(&stream));cuda_preprocess_init(kMaxInputImageSize);auto out_dims = engine->getBindingDimensions(1);model_bboxes = out_dims.d[0];// 準備cpu和gpu緩存 Prepare cpu and gpu buffersfloat* device_buffers[2];float* output_buffer_host = nullptr;float* decode_ptr_host = nullptr;float* decode_ptr_device = nullptr;// 從文件夾中讀取圖像 Read images from directorystd::vector<std::string> file_names;if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {std::cerr << "read_files_in_dir failed." << std::endl;return -1;}prepare_buffer(engine, &device_buffers[0], &device_buffers[1], &output_buffer_host, &decode_ptr_host,&decode_ptr_device, cuda_post_process);// 批預測batch predictfor (size_t i = 0; i < file_names.size(); i += kBatchSize){// 通過opencv讀取一批圖像Get a batch of imagesstd::vector<cv::Mat> img_batch;std::vector<std::string> img_name_batch;for (size_t j = i; j < i + kBatchSize && j < file_names.size(); j++){cv::Mat img = cv::imread(img_dir + "/" + file_names[j]);img_batch.push_back(img);img_name_batch.push_back(file_names[j]);}// Preprocesscuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);// 進行推理Run inferenceinfer(*context, stream, (void**)device_buffers, output_buffer_host, kBatchSize, decode_ptr_host,decode_ptr_device, model_bboxes, cuda_post_process);// 保存output_buffer_host的前100個值,一行一個//        std::ofstream out("../models/output.txt");//        for (int j = 0; j < 100; j++) {//            out << output_buffer_host[j] << std::endl;//        }//        out.close();std::vector<std::vector<Detection>> res_batch;if (cuda_post_process == "c"){// NMS非極大值抑制batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);} else if (cuda_post_process == "g"){//GPU非極大值抑制Process gpu decode and nms resultsbatch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);}// 繪制結果Draw bounding boxesdraw_bbox(img_batch, res_batch);//顯示圖像for (size_t j = 0; j < img_batch.size(); j++){cv::imshow("results", img_batch[j]);cv::waitKey(0);}// 保存圖像Save imagesfor (size_t j = 0; j < img_batch.size(); j++){cv::imwrite("_" + img_name_batch[j], img_batch[j]);}}// Release stream and bufferscudaStreamDestroy(stream);CUDA_CHECK(cudaFree(device_buffers[0]));CUDA_CHECK(cudaFree(device_buffers[1]));CUDA_CHECK(cudaFree(decode_ptr_device));delete[] decode_ptr_host;delete[] output_buffer_host;cuda_preprocess_destroy();// Destroy the enginedelete context;delete engine;delete runtime;// Print histogram of the output distribution//std::cout << "\nOutput:\n\n";//for (unsigned int i = 0; i < kOutputSize; i++)//{//    std::cout << prob[i] << ", ";//    if (i % 10 == 0) std::cout << std::endl;//}//std::cout << std::endl;return 0;
    }

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/diannao/89126.shtml
繁體地址,請注明出處:http://hk.pswp.cn/diannao/89126.shtml
英文地址,請注明出處:http://en.pswp.cn/diannao/89126.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

生物化學 PCR(聚合酶鏈式反應)引物 制造(固相磷酰胺化學法) 購買 存儲

引物&#xff08;Primer&#xff09; 引物&#xff08;Primer&#xff09;是一小段單鏈 DNA&#xff08;通常 18~25 個堿基&#xff09;&#xff0c;與模板 DNA 的特定位點互補。包括&#xff1a;Forward Primer&#xff08;正向引物&#xff09;和 Reverse Primer&#xff08…

SQL server 獲取表中所有行的序號 不夠四位數的前面補0

在 SQL Server 中&#xff0c;如果你想要為表中的行編號&#xff08;即序號&#xff09;添加前導零&#xff0c;以便它們總是呈現為四位數&#xff0c;你可以使用多種方法來實現這一點。以下是幾種常用的方法&#xff1a; 方法1&#xff1a;使用 RIGHT 和 REPLICATE 函數 如果…

熱門話題!網關模塊解決AB機器人和電壓控制器EtherCAT轉Ethernet/IP難題

網關模塊&#xff1a;解決AB機器人與電壓控制器通訊難題 在現代工業自動化生產中&#xff0c;不同設備之間的通信與協同至關重要。然而&#xff0c;由于設備品牌、型號以及所采用的通信協議各異&#xff0c;常常會出現通信兼容性問題。本案例將詳細介紹如何運用捷米特JM-ECTM-E…

將attribute數據動態寫入到excel上

將attribute數據動態寫入到excel上 顯示效果&#xff1a; I 大體思路&#xff1a; excel range name就設置為attribute_數字_類型&#xff0c;在創建template的時候&#xff0c;通過API得到這個event有幾個attribute&#xff0c;就創建幾列&#xff0c;同時還要根據不同的類…

Stable Diffusion入門-ControlNet 深入理解 第一課:ControlNet,控制AI繪圖的“大殺器”

大家好&#xff0c;歡迎來到Stable Diffusion入門-ControlNet深入理解系列的第一課&#xff01; 今天&#xff0c;我們要聊聊一個讓AI繪畫從“盲目生成”走向“精準控制”的神奇插件——ControlNet。 它就像一位無聲的魔術師&#xff0c;把原本隨意的AI生成圖片變得有條不紊、…

新生代潛力股劉小北:演藝路上的璀璨新星

在娛樂圈新人輩出的當下&#xff0c;一位來自四川的年輕演員正憑借著自己獨特的魅力和扎實的演技&#xff0c;悄然走進觀眾的視野&#xff0c;他就是劉小北。1998年出生的劉小北&#xff0c;畢業于四川電影電視學院&#xff0c;自踏入演藝圈以來&#xff0c;便以堅定的步伐在演…

強制IDEA始終使用Java 8

解決IDEA總是使用Java 21而非Java 8編譯的問題 您遇到的問題是典型的IDE內置JDK與項目沖突的情況。即使系統只安裝了Java 8&#xff0c;IntelliJ IDEA仍內置有最新的Java運行時&#xff0c;導致它使用Java 21來編譯您的代碼。 解決方案&#xff1a;強制IDEA始終使用Java 8 1…

青少年編程與數學 01-012 通用應用軟件簡介 14 詞典及翻譯資源

青少年編程與數學 01-012 通用應用軟件簡介 14 詞典及翻譯資源 一、什么是詞典及翻譯資源&#xff08;一&#xff09;詞典及翻譯資源的基本定義&#xff08;二&#xff09;詞典及翻譯資源的工作原理&#xff08;三&#xff09;詞典及翻譯資源的類型 二、詞典及翻譯資源的重要意…

AI測試革命:5分鐘自動生成單元測試|覆蓋率和邊界測試實戰指南

AI測試革命&#xff1a;5分鐘自動生成單元測試&#xff5c;覆蓋率和邊界測試實戰指南 你是否曾為編寫測試用例絞盡腦汁&#xff1f;是否因遺漏邊界條件導致上線后BUG頻發&#xff1f;告別低效測試&#xff0c;掌握AI賦能的現代化測試策略&#xff01; 一、為什么我們需要AI測試…

n8n Docker Compose部署

n8n Docker Compose 部署官方文檔詳細總結 1. 前提條件 具備服務器、容器、網絡和安全相關基礎知識。推薦有 Linux 運維經驗。已準備好一臺服務器&#xff08;建議為云服務器或本地服務器&#xff09;。 2. 安裝 Docker 和 Docker Compose 以 Ubuntu 為例&#xff0c;完整命…

Talk is cheap. Show me the code.手搓一個 Wayland 客戶端程序

前幾天我寫了一篇萬字長文《萬字長文詳解 Wayland 協議、架構》&#xff0c;但光講協議分析難免有些枯燥。畢竟&#xff0c;程序員更信奉那句名言&#xff1a;Talk is cheap. Show me the code. 所以這篇文章不打算長篇大論&#xff0c;而是通過編寫一個簡單的 Wayland 客戶端程…

Golang JSON 標準庫用法詳解

JSON (JavaScript Object Notation) 是一種輕量級的數據交換格式&#xff0c;Go語言的標準庫encoding/json提供了強大的JSON處理能力。下面我將詳細介紹各種用法并提供示例代碼。 1. 基本編碼&#xff08;Marshal&#xff09; 將Go數據結構轉換為JSON字符串。 package maini…

Day.42

hook函數&#xff1a; import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt torch.manual_seed(42) np.random.seed(42) 張量鉤子&#xff1a; x torch.tensor([2.0], requires_gradTrue) y x ** 2 z y ** 3 def tensor_hook…

【.net core】【sqlsugar】在where條件查詢時使用原生SQL

//初始化查詢 var query repository.IQueryable();//添加原生SQL WHERE條件 query query.Where(" fieldA < 123"); 對應調用ISugarQueryable接口類中&#xff1a; ISugarQueryable<T> Where(string whereString, object parameters null);

網絡 : 傳輸層【TCP協議】

網絡 : 傳輸層【TCP協議】 一、TCP協議段格式1.1 32位序號與確認號1.1.1 32位序號1.1.2 確認號 1.2 4位首部長度1.3 6位標志位1.4 16位窗口大小 二、確認應答(ACK)機制三、超時重傳機制四、連接管理機制4.1 三次握手(連接)listen的第二個參數 4.2 四次揮手(斷開連接)**TIME_WAI…

人大金倉Kingbase數據庫 Ksql: 未找到命令

人大金倉Kingbase數據庫 Ksql: 未找到命令 1. 定位 Kingbase 安裝目錄 Kingbase 數據庫通常安裝在 /kingbase/ES/V8/Server 目錄下。可以通過以下命令定位&#xff1a; cd /kingbase/ES/V8/Server2. 驗證 ksql 工具是否安裝成功 執行以下命令檢查 ksql 客戶端工具的版本信息…

Flask(四) 模板渲染render_template

文章目錄 &#x1f4e6; 過程詳解&#xff08;路由 <-> HTML 模板&#xff09;&#x1f9e0; 數據是怎么傳過去的&#xff1f;多變量示例 ? Jinja2 支持條件判斷、循環、模板繼承&#xff1a;? 安全性&#x1f512; Flask 默認也會對變量進行 HTML 轉義&#xff1a;&am…

[附源碼+數據庫+畢業論文+開題報告]基于Spring+MyBatis+MySQL+Maven+jsp實現的寵物領養管理系統,推薦!

摘 要 互聯網發展至今&#xff0c;無論是其理論還是技術都已經成熟&#xff0c;而且它廣泛參與在社會中的方方面面。它讓信息都可以通過網絡傳播&#xff0c;搭配信息管理工具可以很好地為人們提供服務。針對寵物領養信息管理混亂&#xff0c;出錯率高&#xff0c;信息安全性差…

【ArcGIS】水資源單項評價

【ArcGIS】水資源單項評價 一、水資源單項評價1、評價思路 二、操作步驟1、處理環境設置2、數據處理3、要素轉柵格4、水資源評價 一、水資源單項評價 1、評價思路 &#xff08;1&#xff09;省級層面宜選用四級/五級水資源分區或縣級行政區為評價單元&#xff0c;按照水資源總…

Windows環境下C語言匯編語言編輯器及環境安裝

安裝MinGw&#xff1a; 1.下載安裝文件 MinGW - 適用于 Windows 的極簡主義 GNU 下載 |SourceForge.net 點擊下載 下載之后就是如下圖的安裝文件 2.安裝 雙擊安裝文件進行安裝&#xff0c;點擊Install下一步 選擇安裝位置&#xff0c;默認是安裝在C盤&#xff0c;點擊Change…