【機器人+相機通訊】宇樹科技相機通信

https://github.com/unitreerobotics/xr_teleoperate/blob/main/README_zh-CN.md

?

相機驅動與服務端

https://github.com/unitreerobotics/xr_teleoperate/blob/main/teleop/image_server/image_server.py

其中相機如果是realsense, 安裝好驅動后,可以使用命令查看序列號

rs-enumerate-devices

?服務端代碼

import cv2
import zmq
import time
import struct
from collections import deque
import numpy as np
import pyrealsense2 as rs
import logging_mp
logger_mp = logging_mp.get_logger(__name__, level=logging_mp.DEBUG)class RealSenseCamera(object):def __init__(self, img_shape, fps, serial_number=None, enable_depth=False) -> None:"""img_shape: [height, width]serial_number: serial number"""self.img_shape = img_shapeself.fps = fpsself.serial_number = serial_numberself.enable_depth = enable_depthalign_to = rs.stream.colorself.align = rs.align(align_to)self.init_realsense()def init_realsense(self):self.pipeline = rs.pipeline()config = rs.config()if self.serial_number is not None:config.enable_device(self.serial_number)config.enable_stream(rs.stream.color, self.img_shape[1], self.img_shape[0], rs.format.bgr8, self.fps)if self.enable_depth:config.enable_stream(rs.stream.depth, self.img_shape[1], self.img_shape[0], rs.format.z16, self.fps)profile = self.pipeline.start(config)self._device = profile.get_device()if self._device is None:logger_mp.error('[Image Server] pipe_profile.get_device() is None .')if self.enable_depth:assert self._device is not Nonedepth_sensor = self._device.first_depth_sensor()self.g_depth_scale = depth_sensor.get_depth_scale()self.intrinsics = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()def get_frame(self):frames = self.pipeline.wait_for_frames()aligned_frames = self.align.process(frames)color_frame = aligned_frames.get_color_frame()if self.enable_depth:depth_frame = aligned_frames.get_depth_frame()if not color_frame:return Nonecolor_image = np.asanyarray(color_frame.get_data())# color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)depth_image = np.asanyarray(depth_frame.get_data()) if self.enable_depth else Nonereturn color_image, depth_imagedef release(self):self.pipeline.stop()class OpenCVCamera():def __init__(self, device_id, img_shape, fps):"""decive_id: /dev/video* or *img_shape: [height, width]"""self.id = device_idself.fps = fpsself.img_shape = img_shapeself.cap = cv2.VideoCapture(self.id, cv2.CAP_V4L2)self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.img_shape[0])self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,  self.img_shape[1])self.cap.set(cv2.CAP_PROP_FPS, self.fps)# Test if the camera can read framesif not self._can_read_frame():logger_mp.error(f"[Image Server] Camera {self.id} Error: Failed to initialize the camera or read frames. Exiting...")self.release()def _can_read_frame(self):success, _ = self.cap.read()return successdef release(self):self.cap.release()def get_frame(self):ret, color_image = self.cap.read()if not ret:return Nonereturn color_imageclass ImageServer:def __init__(self, config, port = 5555, Unit_Test = False):"""config example1:{'fps':30                                                          # frame per second'head_camera_type': 'opencv',                                     # opencv or realsense'head_camera_image_shape': [480, 1280],                           # Head camera resolution  [height, width]'head_camera_id_numbers': [0],                                    # '/dev/video0' (opencv)'wrist_camera_type': 'realsense', 'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]'wrist_camera_id_numbers': ["218622271789", "241222076627"],      # realsense camera's serial number}config example2:{'fps':30                                                          # frame per second'head_camera_type': 'realsense',                                  # opencv or realsense'head_camera_image_shape': [480, 640],                            # Head camera resolution  [height, width]'head_camera_id_numbers': ["218622271739"],                       # realsense camera's serial number'wrist_camera_type': 'opencv', 'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]'wrist_camera_id_numbers': [0,1],                                 # '/dev/video0' and '/dev/video1' (opencv)}If you are not using the wrist camera, you can comment out its configuration, like this below:config:{'fps':30                                                          # frame per second'head_camera_type': 'opencv',                                     # opencv or realsense'head_camera_image_shape': [480, 1280],                           # Head camera resolution  [height, width]'head_camera_id_numbers': [0],                                    # '/dev/video0' (opencv)#'wrist_camera_type': 'realsense', #'wrist_camera_image_shape': [480, 640],                           # Wrist camera resolution  [height, width]#'wrist_camera_id_numbers': ["218622271789", "241222076627"],      # serial number (realsense)}"""logger_mp.info(config)self.fps = config.get('fps', 30)self.head_camera_type = config.get('head_camera_type', 'opencv')self.head_image_shape = config.get('head_camera_image_shape', [480, 640])      # (height, width)self.head_camera_id_numbers = config.get('head_camera_id_numbers', [0])self.wrist_camera_type = config.get('wrist_camera_type', None)self.wrist_image_shape = config.get('wrist_camera_image_shape', [480, 640])    # (height, width)self.wrist_camera_id_numbers = config.get('wrist_camera_id_numbers', None)self.port = portself.Unit_Test = Unit_Test# Initialize head camerasself.head_cameras = []if self.head_camera_type == 'opencv':for device_id in self.head_camera_id_numbers:camera = OpenCVCamera(device_id=device_id, img_shape=self.head_image_shape, fps=self.fps)self.head_cameras.append(camera)elif self.head_camera_type == 'realsense':for serial_number in self.head_camera_id_numbers:camera = RealSenseCamera(img_shape=self.head_image_shape, fps=self.fps, serial_number=serial_number)self.head_cameras.append(camera)else:logger_mp.warning(f"[Image Server] Unsupported head_camera_type: {self.head_camera_type}")# Initialize wrist cameras if providedself.wrist_cameras = []if self.wrist_camera_type and self.wrist_camera_id_numbers:if self.wrist_camera_type == 'opencv':for device_id in self.wrist_camera_id_numbers:camera = OpenCVCamera(device_id=device_id, img_shape=self.wrist_image_shape, fps=self.fps)self.wrist_cameras.append(camera)elif self.wrist_camera_type == 'realsense':for serial_number in self.wrist_camera_id_numbers:camera = RealSenseCamera(img_shape=self.wrist_image_shape, fps=self.fps, serial_number=serial_number)self.wrist_cameras.append(camera)else:logger_mp.warning(f"[Image Server] Unsupported wrist_camera_type: {self.wrist_camera_type}")# Set ZeroMQ context and socketself.context = zmq.Context()self.socket = self.context.socket(zmq.PUB)self.socket.bind(f"tcp://*:{self.port}")if self.Unit_Test:self._init_performance_metrics()for cam in self.head_cameras:if isinstance(cam, OpenCVCamera):logger_mp.info(f"[Image Server] Head camera {cam.id} resolution: {cam.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)} x {cam.cap.get(cv2.CAP_PROP_FRAME_WIDTH)}")elif isinstance(cam, RealSenseCamera):logger_mp.info(f"[Image Server] Head camera {cam.serial_number} resolution: {cam.img_shape[0]} x {cam.img_shape[1]}")else:logger_mp.warning("[Image Server] Unknown camera type in head_cameras.")for cam in self.wrist_cameras:if isinstance(cam, OpenCVCamera):logger_mp.info(f"[Image Server] Wrist camera {cam.id} resolution: {cam.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)} x {cam.cap.get(cv2.CAP_PROP_FRAME_WIDTH)}")elif isinstance(cam, RealSenseCamera):logger_mp.info(f"[Image Server] Wrist camera {cam.serial_number} resolution: {cam.img_shape[0]} x {cam.img_shape[1]}")else:logger_mp.warning("[Image Server] Unknown camera type in wrist_cameras.")logger_mp.info("[Image Server] Image server has started, waiting for client connections...")def _init_performance_metrics(self):self.frame_count = 0  # Total frames sentself.time_window = 1.0  # Time window for FPS calculation (in seconds)self.frame_times = deque()  # Timestamps of frames sent within the time windowself.start_time = time.time()  # Start time of the streamingdef _update_performance_metrics(self, current_time):# Add current time to frame times dequeself.frame_times.append(current_time)# Remove timestamps outside the time windowwhile self.frame_times and self.frame_times[0] < current_time - self.time_window:self.frame_times.popleft()# Increment frame countself.frame_count += 1def _print_performance_metrics(self, current_time):if self.frame_count % 30 == 0:elapsed_time = current_time - self.start_timereal_time_fps = len(self.frame_times) / self.time_windowlogger_mp.info(f"[Image Server] Real-time FPS: {real_time_fps:.2f}, Total frames sent: {self.frame_count}, Elapsed time: {elapsed_time:.2f} sec")def _close(self):for cam in self.head_cameras:cam.release()for cam in self.wrist_cameras:cam.release()self.socket.close()self.context.term()logger_mp.info("[Image Server] The server has been closed.")def send_process(self):try:while True:head_frames = []for cam in self.head_cameras:if self.head_camera_type == 'opencv':color_image = cam.get_frame()if color_image is None:logger_mp.error("[Image Server] Head camera frame read is error.")breakelif self.head_camera_type == 'realsense':color_image, depth_iamge = cam.get_frame()if color_image is None:logger_mp.error("[Image Server] Head camera frame read is error.")breakhead_frames.append(color_image)if len(head_frames) != len(self.head_cameras):breakhead_color = cv2.hconcat(head_frames)if self.wrist_cameras:wrist_frames = []for cam in self.wrist_cameras:if self.wrist_camera_type == 'opencv':color_image = cam.get_frame()if color_image is None:logger_mp.error("[Image Server] Wrist camera frame read is error.")breakelif self.wrist_camera_type == 'realsense':color_image, depth_iamge = cam.get_frame()if color_image is None:logger_mp.error("[Image Server] Wrist camera frame read is error.")breakwrist_frames.append(color_image)wrist_color = cv2.hconcat(wrist_frames)# Concatenate head and wrist framesfull_color = cv2.hconcat([head_color, wrist_color])else:full_color = head_colorret, buffer = cv2.imencode('.jpg', full_color)if not ret:logger_mp.error("[Image Server] Frame imencode is failed.")continuejpg_bytes = buffer.tobytes()if self.Unit_Test:timestamp = time.time()frame_id = self.frame_countheader = struct.pack('dI', timestamp, frame_id)  # 8-byte double, 4-byte unsigned intmessage = header + jpg_byteselse:message = jpg_bytesself.socket.send(message)if self.Unit_Test:current_time = time.time()self._update_performance_metrics(current_time)self._print_performance_metrics(current_time)except KeyboardInterrupt:logger_mp.warning("[Image Server] Interrupted by user.")finally:self._close()if __name__ == "__main__":config = {'fps': 30,'head_camera_type': 'opencv','head_camera_image_shape': [480, 1280],  # Head camera resolution'head_camera_id_numbers': [0],'wrist_camera_type': 'opencv','wrist_camera_image_shape': [480, 640],  # Wrist camera resolution'wrist_camera_id_numbers': [2, 4],}server = ImageServer(config, Unit_Test=False)server.send_process()

同一網段的客戶端

import cv2
import zmq
import numpy as np
import time
import struct
from collections import deque
from multiprocessing import shared_memory
import logging_mp
logger_mp = logging_mp.get_logger(__name__)class ImageClient:def __init__(self, tv_img_shape = None, tv_img_shm_name = None, wrist_img_shape = None, wrist_img_shm_name = None, image_show = False, server_address = "192.168.123.164", port = 5555, Unit_Test = False):"""tv_img_shape: User's expected head camera resolution shape (H, W, C). It should match the output of the image service terminal.tv_img_shm_name: Shared memory is used to easily transfer images across processes to the Vuer.wrist_img_shape: User's expected wrist camera resolution shape (H, W, C). It should maintain the same shape as tv_img_shape.wrist_img_shm_name: Shared memory is used to easily transfer images.image_show: Whether to display received images in real time.server_address: The ip address to execute the image server script.port: The port number to bind to. It should be the same as the image server.Unit_Test: When both server and client are True, it can be used to test the image transfer latency, \network jitter, frame loss rate and other information."""self.running = Trueself._image_show = image_showself._server_address = server_addressself._port = portself.tv_img_shape = tv_img_shapeself.wrist_img_shape = wrist_img_shapeself.tv_enable_shm = Falseif self.tv_img_shape is not None and tv_img_shm_name is not None:self.tv_image_shm = shared_memory.SharedMemory(name=tv_img_shm_name)self.tv_img_array = np.ndarray(tv_img_shape, dtype = np.uint8, buffer = self.tv_image_shm.buf)self.tv_enable_shm = Trueself.wrist_enable_shm = Falseif self.wrist_img_shape is not None and wrist_img_shm_name is not None:self.wrist_image_shm = shared_memory.SharedMemory(name=wrist_img_shm_name)self.wrist_img_array = np.ndarray(wrist_img_shape, dtype = np.uint8, buffer = self.wrist_image_shm.buf)self.wrist_enable_shm = True# Performance evaluation parametersself._enable_performance_eval = Unit_Testif self._enable_performance_eval:self._init_performance_metrics()def _init_performance_metrics(self):self._frame_count = 0  # Total frames receivedself._last_frame_id = -1  # Last received frame ID# Real-time FPS calculation using a time windowself._time_window = 1.0  # Time window size (in seconds)self._frame_times = deque()  # Timestamps of frames received within the time window# Data transmission quality metricsself._latencies = deque()  # Latencies of frames within the time windowself._lost_frames = 0  # Total lost framesself._total_frames = 0  # Expected total frames based on frame IDsdef _update_performance_metrics(self, timestamp, frame_id, receive_time):# Update latencylatency = receive_time - timestampself._latencies.append(latency)# Remove latencies outside the time windowwhile self._latencies and self._frame_times and self._latencies[0] < receive_time - self._time_window:self._latencies.popleft()# Update frame timesself._frame_times.append(receive_time)# Remove timestamps outside the time windowwhile self._frame_times and self._frame_times[0] < receive_time - self._time_window:self._frame_times.popleft()# Update frame counts for lost frame calculationexpected_frame_id = self._last_frame_id + 1 if self._last_frame_id != -1 else frame_idif frame_id != expected_frame_id:lost = frame_id - expected_frame_idif lost < 0:logger_mp.info(f"[Image Client] Received out-of-order frame ID: {frame_id}")else:self._lost_frames += lostlogger_mp.warning(f"[Image Client] Detected lost frames: {lost}, Expected frame ID: {expected_frame_id}, Received frame ID: {frame_id}")self._last_frame_id = frame_idself._total_frames = frame_id + 1self._frame_count += 1def _print_performance_metrics(self, receive_time):if self._frame_count % 30 == 0:# Calculate real-time FPSreal_time_fps = len(self._frame_times) / self._time_window if self._time_window > 0 else 0# Calculate latency metricsif self._latencies:avg_latency = sum(self._latencies) / len(self._latencies)max_latency = max(self._latencies)min_latency = min(self._latencies)jitter = max_latency - min_latencyelse:avg_latency = max_latency = min_latency = jitter = 0# Calculate lost frame ratelost_frame_rate = (self._lost_frames / self._total_frames) * 100 if self._total_frames > 0 else 0logger_mp.info(f"[Image Client] Real-time FPS: {real_time_fps:.2f}, Avg Latency: {avg_latency*1000:.2f} ms, Max Latency: {max_latency*1000:.2f} ms, \Min Latency: {min_latency*1000:.2f} ms, Jitter: {jitter*1000:.2f} ms, Lost Frame Rate: {lost_frame_rate:.2f}%")def _close(self):self._socket.close()self._context.term()if self._image_show:cv2.destroyAllWindows()logger_mp.info("Image client has been closed.")def receive_process(self):# Set up ZeroMQ context and socketself._context = zmq.Context()self._socket = self._context.socket(zmq.SUB)self._socket.connect(f"tcp://{self._server_address}:{self._port}")self._socket.setsockopt_string(zmq.SUBSCRIBE, "")logger_mp.info("Image client has started, waiting to receive data...")try:while self.running:# Receive messagemessage = self._socket.recv()receive_time = time.time()if self._enable_performance_eval:header_size = struct.calcsize('dI')try:# Attempt to extract header and image dataheader = message[:header_size]jpg_bytes = message[header_size:]timestamp, frame_id = struct.unpack('dI', header)except struct.error as e:logger_mp.warning(f"[Image Client] Error unpacking header: {e}, discarding message.")continueelse:# No header, entire message is image datajpg_bytes = message# Decode imagenp_img = np.frombuffer(jpg_bytes, dtype=np.uint8)current_image = cv2.imdecode(np_img, cv2.IMREAD_COLOR)if current_image is None:logger_mp.warning("[Image Client] Failed to decode image.")continueif self.tv_enable_shm:np.copyto(self.tv_img_array, np.array(current_image[:, :self.tv_img_shape[1]]))if self.wrist_enable_shm:np.copyto(self.wrist_img_array, np.array(current_image[:, -self.wrist_img_shape[1]:]))if self._image_show:height, width = current_image.shape[:2]resized_image = cv2.resize(current_image, (width // 2, height // 2))cv2.imshow('Image Client Stream', resized_image)if cv2.waitKey(1) & 0xFF == ord('q'):self.running = Falseif self._enable_performance_eval:self._update_performance_metrics(timestamp, frame_id, receive_time)self._print_performance_metrics(receive_time)except KeyboardInterrupt:logger_mp.info("Image client interrupted by user.")except Exception as e:logger_mp.warning(f"[Image Client] An error occurred while receiving data: {e}")finally:self._close()if __name__ == "__main__":# example1# tv_img_shape = (480, 1280, 3)# img_shm = shared_memory.SharedMemory(create=True, size=np.prod(tv_img_shape) * np.uint8().itemsize)# img_array = np.ndarray(tv_img_shape, dtype=np.uint8, buffer=img_shm.buf)# img_client = ImageClient(tv_img_shape = tv_img_shape, tv_img_shm_name = img_shm.name)# img_client.receive_process()# example2# Initialize the client with performance evaluation enabled# client = ImageClient(image_show = True, server_address='127.0.0.1', Unit_Test=True) # local testclient = ImageClient(image_show = True, server_address='192.168.123.164', Unit_Test=False) # deployment testclient.receive_process()

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/diannao/93852.shtml
繁體地址,請注明出處:http://hk.pswp.cn/diannao/93852.shtml
英文地址,請注明出處:http://en.pswp.cn/diannao/93852.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

機械學習中的一些優化算法(以邏輯回歸實現案例來講解)

一、混淆矩陣混淆矩陣是機器學習中評估分類模型性能的重要工具&#xff0c;尤其適用于二分類或多分類任務。它通過展示模型預測結果與實際標簽的匹配情況&#xff0c;幫助理解模型的錯誤類型&#xff08;如假陽性、假陰性等&#xff09;。以下通過二分類場景為例&#xff0c;結…

龍蜥受邀參加2025開放計算技術大會,解碼基礎模型驅動下的系統創新與生態共建

開放計算技術大會由全球最大的開放計算社區 OCP 發起&#xff0c;是開放計算領域生態覆蓋最廣且最具影響力的亞洲年度技術盛會。本屆大會由 OCP 與 OCTC&#xff08;中國電子工業標準化技術協會開放計算標準工作委員會&#xff09;兩大開放組織聯合主辦&#xff0c;將于 8 月 7…

第三階段—8天Python從入門到精通【itheima】-140節(pysqark實戰——基礎準備)

目錄 140節——pysqark實戰——基礎準備 1.學習目標 2.pysqark庫的安裝 3.pyspark的路徑安裝問題 一、為什么不需要指定路徑&#xff1f; 二、如何找到 pyspark 的具體安裝路徑&#xff1f; 三、驗證一下&#xff1a;直接定位 pyspark 的安裝路徑 四、總結&#xff1a;記…

數據庫中使用SQL作分組處理01(簡單分組)

1.簡單分組GroupBy什么就Select什么SELECT Name,Score From StudentScore GROUP BY Name,Score2.聚合函數(MAX SUM AVG COUNT)&#xff08;1&#xff09;計算1.表的全部字段都可以用聚合函數&#xff0c;但是篩選聚合函數的結果要用Having關鍵字2.聚合函數默認排除Null值IDName…

Linux基本服務——web服務解析

提示&#xff1a;文章寫完后&#xff0c;目錄可以自動生成&#xff0c;如何生成可參考右邊的幫助文檔 文章目錄 目錄 Web服務解析 虛擬Web主機 Web目錄訪問控制 Web服務解析 用途&#xff1a;基于 B/S 架構提供網頁的服務端程序 應用層協議&#xff1a;HTTP&#xff08;TCP 80…

深入理解緩存淘汰策略:LRU vs LFU 完全解析

深入理解緩存淘汰策略&#xff1a;LRU vs LFU 完全解析 文章目錄深入理解緩存淘汰策略&#xff1a;LRU vs LFU 完全解析前言一、基礎概念解析1.1 LRU&#xff08;Least Recently Used&#xff09;- 最近最少使用1.2 LFU&#xff08;Least Frequently Used&#xff09;- 最少使用…

【C語言】字符函數與字符串函數詳解

文章目錄一、字符分類函數二、字符轉換函數三、strlen函數&#xff1a;計算字符串長度功能說明使用示例模擬實現四、strcpy函數&#xff1a;字符串拷貝功能說明模擬實現五、strcat函數&#xff1a;字符串追加功能說明模擬實現六、strcmp函數&#xff1a;字符串比較比較規則模擬…

uvicorn 啟動重復加載 多次加載

目錄 uvicorn 啟動重復加載 多次加載 解決方法1&#xff1a; 解決方法2&#xff1a; uvicorn 啟動重復加載 多次加載 fastapi_aa 是當前類 解決方法1&#xff1a; import uvicornfrom fastapi import FastAPIapp FastAPI()if __name__ "__main__":if sys.gett…

Bard AI本地部署教程:在自己的服務器上運行谷歌AI

Bard AI本地部署教程:在自己的服務器上運行谷歌AI 關鍵詞:Bard AI、本地部署、服務器、谷歌AI、運行教程 摘要:本文旨在為大家詳細介紹如何在自己的服務器上實現Bard AI的本地部署。我們會從背景知識講起,逐步深入到核心概念、算法原理、操作步驟,還會提供項目實戰案例和實…

應急響應處置案例(上)

本文目錄 目錄 本文目錄 Web安全事件 概述 案例1 - webshell 背景 排查情況 天眼 服務器 案例2 - Struts2 排查情況 天眼 服務器 案例3 - Redis未授權 背景 排查情況 天眼 服務器 案例4 - EW內網穿透 背景 排查情況 天眼 服務器 案例5 - 一句話木馬 背…

面試官問我:“為什么不能完全用對象替代指針?”我笑了:看看Google和Linux內核代碼就知道了!

本篇摘要 本篇將以最通俗易懂的語言&#xff0c;形象的講述為什么很多情境下&#xff0c;我們優先考慮的使用指針而不是對象本身&#xff0c;本篇將給出你答案&#xff01; 一.從一個生活例子說起&#xff0c;形象秒懂 想象一下&#xff0c;你去圖書館借書&#xff0c;下面你…

CAMx大氣污染模擬全流程:Linux編譯/多重嵌套配置/SMOKE清單預處理/SA-DDM-PA工具應用與科研繪圖結果可視化分析

CAMx模型是一個基于大氣化學&#xff0c;針對臭氧、顆粒物和霧霾天氣過程的大氣污染物計算模型。【目標】&#xff1a;1、掌握CAMx模式的區域空氣質量模擬案例配置技術方法2、掌握SMOKE模型的CAMx模式大氣排放清單輸入準備方法3、掌握CAMx模式污染來源解析工具&#xff08;SA&a…

嵌入式學習筆記-MCU階段-DAY10ESP8266模塊

1.ESP8266概述 官方網址&#xff1a;ESP8266 Wi-Fi MCU I 樂鑫科技 (espressif.com.cn) ESP8266模塊---wifi模塊 產品特點&#xff1a; 2.ESP8266中的wifi: ESP8266EX ?持 TCP/IP 協議&#xff0c;完全遵循 802.11 b/g/n WLAN MAC 協議&#xff0c;?持分布式控制功能 (DC…

如何快速通過軟件項目驗收,第三方軟件檢測機構的重要性

在客戶和開發團隊之間&#xff0c;最后臨門一腳的項目驗收環節總容易出現各種問題&#xff0c;以至于時間無限拉長&#xff0c;久久不見結束&#xff0c;為此給大家準備了一份如何快速通過軟件項目驗收的內容來幫助大家結束持久戰。 一、項目驗收準備材料 &#xff08;一&…

洛谷做題3:P5711 【深基3.例3】閏年判斷

文章目錄題目描述輸入格式輸出格式輸入輸出樣例分析代碼題目描述 輸入一個年份&#xff0c;判斷這一年是否是閏年&#xff0c;如果是輸出 1&#xff0c;否則輸出 0。 1582 年以來&#xff0c;閏年的定義&#xff1a; 普通閏年&#xff1a;公歷年份是 4 的倍數&#xff0c;且不…

PMP證書可以掛靠嗎?怎么掛靠?

哈嘍學弟學妹們&#xff0c;作為過來人&#xff0c;今天想跟大家聊聊 PMP 證書掛靠這事兒 —— 可能不少準備考或者剛考完的同學都琢磨過&#xff0c;但學長得跟你們交個底&#xff1a;這事兒真不行&#xff0c;更別提啥掛靠費了。先說說 PMP 證書本身哈&#xff0c;它是美國 P…

91-基于Spark的空氣質量數據分析可視化系統

基于Spark的空氣質量數據分析可視化系統設計與實現 項目概述 本項目是一個基于Apache Spark的大數據分析和可視化系統&#xff0c;專門用于空氣質量數據的采集、分析、預測和可視化展示。系統采用分布式計算架構&#xff0c;結合機器學習算法&#xff0c;實現了對全國12個主要…

leetcode 2419. 按位與最大的最長子數組 中等

給你一個長度為 n 的整數數組 nums 。考慮 nums 中進行 按位與&#xff08;bitwise AND&#xff09;運算得到的值 最大 的 非空 子數組。換句話說&#xff0c;令 k 是 nums 任意 子數組執行按位與運算所能得到的最大值。那么&#xff0c;只需要考慮那些執行一次按位與運算后等于…

Git 命令使用指南:從入門到進階

目錄1. Git 基本操作1.1 添加文件到暫存區1.2 提交更改到本地倉庫1.3 查看工作區狀態1.4 查看提交歷史1.5 查看引用日志&#xff08;包括已刪除的記錄&#xff09;2. 版本回退與撤銷2.1 版本回退2.2 查看已刪除的提交記錄3. 分支管理3.1 查看分支3.2 創建并切換到新分支3.3 合并…

SQL數據庫連接Python實戰:疫情數據指揮中心搭建指南

SQL數據庫連接Python實戰&#xff1a;疫情數據指揮中心搭建指南從WHO數據集到實時儀表盤&#xff0c;構建工業級疫情監控系統一、疫情數據指揮中心&#xff1a;全球健康危機的中樞神經??疫情數據價值??&#xff1a;全球每日新增病例&#xff1a;50萬疫苗接種數據&#xff1…