基于Qwen2.5-3B-Instruct的LoRA微調與推理實戰指南

前言
大語言模型(LLM)的微調是當前AI領域的熱門話題,而參數高效微調方法(如LoRA)因其低成本和高效率備受關注。本文將手把手教你如何使用Qwen2.5-3B-Instruct模型進行LoRA微調,并構建完整的推理流程。

一、環境準備

1.1 硬件要求

? GPU: 至少16GB顯存(如NVIDIA RTX 3090/A10)

? 內存: 32GB以上

? 存儲: 50GB可用空間

開門見山,首先給出完整微調代碼和完整推理代碼,推理代碼需要名為A1的jsonl文件,文件內部結構如下:
在這里插入圖片描述

完整微調代碼

from modelscope import AutoModelForCausalLM, AutoTokenizer
import torch
import os
from datasets import load_dataset, Dataset
import pandas as pd
from trl import SFTTrainer, SFTConfig
from transformers import TextStreamer, TrainerCallback
from peft import LoraConfig, TaskType, get_peft_model, PeftModel
import gc
from tqdm.auto import tqdm
import re
import time# 環境設置
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ.pop("LOCAL_RANK", None)print(f"GPU設備數: {torch.cuda.device_count()}")
if torch.cuda.is_available():print(f"當前GPU: {torch.cuda.get_device_name(0)}")gpu_mem = torch.cuda.get_device_properties(0).total_memory / 1024**3print(f"可用GPU內存: {gpu_mem:.2f} GB")
else:print("未檢測到GPU,將使用CPU運行(訓練會非常緩慢)")# 模型路徑
model_name = "/root/autodl-tmp/Qwen/Qwen2.5-3B-Instruct"# 加載分詞器
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token# 模型加載
max_retries = 3
model = None
for attempt in range(max_retries):try:model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,device_map="auto" if torch.cuda.is_available() else None,low_cpu_mem_usage=True,trust_remote_code=True,load_in_8bit=True,max_memory={0: f"{int(gpu_mem * 0.7)}GB"})model.enable_input_require_grads()breakexcept Exception as e:print(f"模型加載嘗試 {attempt + 1}/{max_retries} 失敗: {str(e)[:400]}...")if attempt == max_retries - 1:raisetime.sleep(5)# LoRA配置
config = LoraConfig(task_type=TaskType.CAUSAL_LM,target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],inference_mode=False,r=4,lora_alpha=8,lora_dropout=0.1,
)
model = get_peft_model(model, config)# ============== 修改后的數據集加載部分 ==============
try:# 先加載完整數據集full_ds = load_dataset("bespokelabs/Bespoke-Stratos-17k", cache_dir='./data/reasoning', split="train")# 隨機采樣2000條ds = full_ds.shuffle(seed=3407).select(range(2000))
except Exception as e:print(f"數據集加載失敗: {e}")# 回退方案full_ds = load_dataset("bespokelabs/Bespoke-Stratos-17k", cache_dir='./data/reasoning', split="train",download_mode="reuse_cache_if_exists")ds = full_ds.shuffle(seed=3407).select(range(2000))def generate_a_format_data(examples):formatted_data = []for conv in examples["conversations"]:user_msg = conv[0]["value"]assistant_msg = conv[1]["value"]# 清理用戶問題user_msg = user_msg.replace("Return your final response within \\boxed{}.", "").strip()# 提取思考部分think_part = ""if "<|begin_of_thought|>" in assistant_msg:think_part = assistant_msg.split("<|begin_of_thought|>")[1].split("<|end_of_thought|>")[0].strip()# 提取答案answer_part = ""if "\\boxed{" in assistant_msg:answer_part = assistant_msg.split("\\boxed{")[1].split("}")[0]elif "<|begin_of_solution|>" in assistant_msg:answer_part = assistant_msg.split("<|begin_of_solution|>")[1].split("<|end_of_solution|>")[0].strip()# 構建標準格式formatted_data.append([{"role": "user", "content": user_msg},{"role": "assistant", "content": f"<think>{think_part}</think> <answer>{answer_part}</answer>"}])return {"conversations": formatted_data}# 數據處理(添加隨機性)
processed_ds = ds.map(generate_a_format_data,batched=True,batch_size=5,remove_columns=ds.column_names
).shuffle(seed=3407)  # 關鍵修改:確保數據隨機性# 格式驗證
def validate_training_data(dataset, num_samples=3):print(f"\n=== 格式驗證(前 {num_samples} 條) ===")for i in range(min(num_samples, len(dataset))):content = dataset[i]["conversations"][1]["content"]has_think = "<think>" in content and "</think>" in contenthas_answer = "<answer>" in content and "</answer>" in contentprint(f"樣本 {i+1}: {'?' if has_think and has_answer else '?'}")if not (has_think and has_answer):print(f"問題內容: {content[:100]}...")validate_training_data(processed_ds)# 應用模板
reasoning_conversations = tokenizer.apply_chat_template(processed_ds["conversations"],tokenize=False,add_generation_prompt=True
)# 創建訓練集
df = pd.DataFrame({"text": reasoning_conversations})
train_ds = Dataset.from_pandas(df).shuffle(seed=3407)# 訓練回調
class TQDMCallback(TrainerCallback):def on_train_begin(self, args, state, control, **kwargs):self.total_steps = state.max_stepsself.progress_bar = tqdm(total=self.total_steps, desc="訓練進度", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]")def on_step_end(self, args, state, control, logs=None, **kwargs):self.progress_bar.update(1)if logs and "loss" in logs:self.progress_bar.set_postfix({"loss": f"{logs['loss']:.4f}"})gc.collect()if torch.cuda.is_available():torch.cuda.empty_cache()def on_train_end(self, args, state, control, **kwargs):self.progress_bar.close()if state.log_history:final_loss = state.log_history[-1].get("loss", float('nan'))print(f"訓練完成! 最終損失: {final_loss:.4f}")# 訓練配置
training_args = SFTConfig(output_dir="./lora_model_a榜",per_device_train_batch_size=1,gradient_accumulation_steps=4,warmup_steps=3,num_train_epochs=2,learning_rate=1e-4,logging_steps=1,optim="adamw_8bit",weight_decay=0.01,lr_scheduler_type="linear",seed=3407,report_to="none",fp16=torch.cuda.is_available(),max_grad_norm=0.5,logging_first_step=True,save_steps=10,max_seq_length=1500,
)# 訓練器
trainer = SFTTrainer(model=model,train_dataset=train_ds,eval_dataset=None,callbacks=[TQDMCallback()],args=training_args,
)# 清理內存
gc.collect()
if torch.cuda.is_available():torch.cuda.empty_cache()# 開始訓練
print("\n開始訓練...")
trainer.train()# 保存模型
print("\n保存模型...")
model.save_pretrained("lora_model_a榜")
tokenizer.save_pretrained("lora_model_a榜")# 清理
del model
gc.collect()
if torch.cuda.is_available():torch.cuda.empty_cache()# ============== 推理測試部分 ==============
print("\n=== 加載微調后的模型進行推理測試 ===")# 加載模型函數
def load_finetuned_model():base_model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,device_map="auto",trust_remote_code=True)model = PeftModel.from_pretrained(base_model,"lora_model_a榜",torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,)model.eval()return model# 推理函數
def ask(question, temperature=0.1, max_new_tokens=2048):print(f"\n問題: {question}")model = load_finetuned_model()messages = [{"role": "user", "content": question}]# 應用模板text = tokenizer.apply_chat_template(messages,tokenize=False,add_generation_prompt=True)# 流式輸出streamer = TextStreamer(tokenizer, skip_prompt=True)print("\n模型回答:")with torch.no_grad():inputs = tokenizer(text, return_tensors="pt").to(model.device)outputs = model.generate(**inputs,max_new_tokens=max_new_tokens,temperature=temperature,streamer=streamer,pad_token_id=tokenizer.pad_token_id,eos_token_id=tokenizer.eos_token_id)full_output = tokenizer.decode(outputs[0], skip_special_tokens=False)# 格式驗證if "<think>" not in full_output or "<answer>" not in full_output:print("\n?? 警告: 輸出缺少必要標記")return full_output# 測試蘑菇含水率問題
question = ("You have 1000 kg of mushrooms with a water content of 99%,After drying for a few days, the water content drops to 98%,How much water needs to be evaporated?"
)answer = ask(question)
print("\n完整輸出:")
print(answer[:1500] + ("..." if len(answer) > 1500 else ""))# 測試其他問題示例
test_questions = ["Calculate the area of a circle with radius 5","Solve for x: 2x + 5 = 15","Explain Newton's first law of motion"
]for q in test_questions:ask(q, temperature=0.1)

完整推理代碼

import json
import os
import torch
from typing import Dict, List, Union
from vllm import LLM, SamplingParams
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from dataclasses import dataclass@dataclass
class GenerationConfig:max_tokens: inttemperature: float = 0.3top_p: float = 0.95n: int = 1class Infer:DEFAULT_MODEL = "/root/autodl-tmp/Qwen/Qwen2.5-3B-Instruct"TEMP_MODEL_DIR = "merged_temp_model"def __init__(self, base_model: str = DEFAULT_MODEL,lora_path: str = "lora_model_a榜",device: str = "auto"):self._validate_paths(base_model, lora_path)self.device = deviceself.tokenizer = self._load_tokenizer(base_model)self.base_model = self._load_base_model(base_model)self.merged_model = self._merge_lora(lora_path)self.llm = self._init_vllm_engine()self.sampling_params = self._init_sampling_params()def _validate_paths(self, *paths):for path in paths:if not os.path.exists(path) and not path.startswith(("http://", "https://")):if "/" not in path:raise FileNotFoundError(f"路徑不存在: {path}")def _load_tokenizer(self, model_path: str):try:tokenizer = AutoTokenizer.from_pretrained(model_path,trust_remote_code=True)if tokenizer.pad_token is None:tokenizer.pad_token = tokenizer.eos_tokenspecial_tokens = ["<think>", "</think>", "<answer>", "</answer>"]tokenizer.add_special_tokens({"additional_special_tokens": special_tokens})return tokenizerexcept Exception as e:raise RuntimeError(f"加載tokenizer失敗: {str(e)}")def _load_base_model(self, model_path: str):try:model = AutoModelForCausalLM.from_pretrained(model_path,torch_dtype=torch.bfloat16,device_map=self.device,trust_remote_code=True)model.resize_token_embeddings(len(self.tokenizer))return modelexcept Exception as e:raise RuntimeError(f"加載基礎模型失敗: {str(e)}")def _merge_lora(self, lora_path: str):try:print("開始合并LoRA權重...")lora_model = PeftModel.from_pretrained(self.base_model,lora_path,torch_dtype=torch.bfloat16)merged_model = lora_model.merge_and_unload()test_input = "Calculate the area of a circle with radius 5"messages = [{"role": "user", "content": test_input}]input_text = self.tokenizer.apply_chat_template(messages,tokenize=False,add_generation_prompt=True)inputs = self.tokenizer(input_text, return_tensors="pt").to(self.base_model.device)with torch.no_grad():outputs = merged_model.generate(**inputs,max_new_tokens=2000,temperature=0.1,pad_token_id=self.tokenizer.pad_token_id,eos_token_id=self.tokenizer.eos_token_id)output_text = self.tokenizer.decode(outputs[0], skip_special_tokens=False)print("\n=== 合并后模型測試輸出 ===")print(output_text)os.makedirs(self.TEMP_MODEL_DIR, exist_ok=True)merged_model.save_pretrained(self.TEMP_MODEL_DIR)self.tokenizer.save_pretrained(self.TEMP_MODEL_DIR)print("? LoRA權重合并成功")return merged_modelexcept Exception as e:raise RuntimeError(f"合并LoRA失敗: {str(e)}")def _init_vllm_engine(self):try:return LLM(model=self.TEMP_MODEL_DIR,tokenizer=self.TEMP_MODEL_DIR,trust_remote_code=True,max_model_len=2048,gpu_memory_utilization=0.8,enforce_eager=True)except Exception as e:raise RuntimeError(f"初始化vLLM引擎失敗: {str(e)}")def _init_sampling_params(self) -> Dict[str, SamplingParams]:base_params = {"temperature": 0.3,"top_p": 0.90,"repetition_penalty": 1.5,"stop": ["</answer>"],"skip_special_tokens": False}return {"math": SamplingParams(**{**base_params, "max_tokens": 1024}),"choice": SamplingParams(**{**base_params, "max_tokens": 1024}),"code-generate": SamplingParams(**{**base_params, "max_tokens": 2048, "n": 3}),"generic-generate": SamplingParams(**{**base_params, "max_tokens":1024})}def _validate_output_format(self, text: str) -> bool:required_components = ["<think>","</think>","<answer>","</answer>",]return all(comp in text for comp in required_components)def infer(self, data_file: str = "A1.jsonl") -> Dict:if not os.path.exists(data_file):raise FileNotFoundError(f"數據文件不存在: {data_file}")try:with open(data_file, 'r', encoding='utf-8') as f:data = [json.loads(line) for line in f]except Exception as e:raise RuntimeError(f"讀取數據文件失敗: {str(e)}")results = {"result": {"results": []}}for item in data:try:messages = [{"role": "user", "content": item["prompt"]}]prompt = self.tokenizer.apply_chat_template(messages,tokenize=False,add_generation_prompt=True)outputs = self.llm.generate(prompt, self.sampling_params[item["type"]])if item["type"] == "code-generate":generated_text = [o.text for output in outputs for o in output.outputs][:3]else:generated_text = outputs[0].outputs[0].textresults["result"]["results"].append({"id": item["id"],"content": generated_text})except Exception as e:print(f"處理樣本 {item.get('id')} 失敗: {str(e)}")results["result"]["results"].append({"id": item.get("id"),"error": str(e)})return resultsdef cleanup(self):if os.path.exists(self.TEMP_MODEL_DIR):import shutilshutil.rmtree(self.TEMP_MODEL_DIR)if __name__ == "__main__":infer = Nonetry:print("開始初始化推理引擎...")infer = Infer(base_model="/root/autodl-tmp/Qwen/Qwen2.5-3B-Instruct",lora_path="lora_model_a榜")results = infer.infer(data_file="A1.jsonl")with open("res1.json", "w", encoding="utf-8") as f:json.dump(results, f, ensure_ascii=False, indent=2)print("? 推理完成,結果已保存到res.json")except Exception as e:print(f"運行失敗: {str(e)}")finally:if infer is not None:infer.cleanup()

核心庫安裝

pip install torch transformers datasets peft trl vllm -U
pip install modelscope -U

二、模型加載與配置

2.1 模型與分詞器加載

from modelscope import AutoModelForCausalLM, AutoTokenizer
import torchmodel_name = "/root/autodl-tmp/Qwen/Qwen2.5-3B-Instruct"

分詞器加載

tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token  # 設置填充token

模型加載(8bit量化節省顯存)

model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16,device_map="auto",load_in_8bit=True,trust_remote_code=True
)

2.2 LoRA配置

from peft import LoraConfig, TaskType, get_peft_modellora_config = LoraConfig(task_type=TaskType.CAUSAL_LM,target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],inference_mode=False,r=8,  # LoRA秩lora_alpha=16,lora_dropout=0.05,
)model = get_peft_model(model, lora_config)
model.print_trainable_parameters()  # 查看可訓練參數占比

三、數據處理與訓練

3.1 數據集準備

我們使用Bespoke-Stratos-17k數據集,包含17k條結構化問答數據:

from datasets import load_datasetdataset = load_dataset("bespokelabs/Bespoke-Stratos-17k", split="train")

3.2 數據格式化

將原始數據轉換為標準對話格式:

def format_conversation(example):user_msg = example["conversations"][0]["value"]assistant_msg = example["conversations"][1]["value"]# 提取思考過程和答案think_part = assistant_msg.split("<|begin_of_thought|>")[1].split("<|end_of_thought|>")[0]answer_part = assistant_msg.split("<|begin_of_solution|>")[1].split("<|end_of_solution|>")[0]return {"conversations": [{"role": "user", "content": user_msg},{"role": "assistant", "content": f"<think>{think_part}</think> <answer>{answer_part}</answer>"}]}formatted_ds = dataset.map(format_conversation, batched=False)

3.3 訓練配置

from trl import SFTTrainer, SFTConfigtraining_args = SFTConfig(output_dir="./qwen_lora",per_device_train_batch_size=2,gradient_accumulation_steps=4,num_train_epochs=3,learning_rate=2e-5,optim="adamw_8bit",max_seq_length=1024,logging_steps=10,save_steps=100
)trainer = SFTTrainer(model=model,train_dataset=formatted_ds,dataset_text_field="conversations",tokenizer=tokenizer,args=training_args
)

3.4 訓練過程監控

自定義回調函數監控訓練進度:

from tqdm.auto import tqdm
from transformers import TrainerCallbackclass ProgressCallback(TrainerCallback):def on_train_begin(self, args, state, control, **kwargs):self.progress_bar = tqdm(total=state.max_steps, desc="Training")def on_step_end(self, args, state, control, **kwargs):self.progress_bar.update(1)if "loss" in state.log_history[-1]:self.progress_bar.set_postfix({"loss": state.log_history[-1]["loss"]})def on_train_end(self, args, state, control, **kwargs):self.progress_bar.close()trainer.add_callback(ProgressCallback())

四、模型推理與部署

4.1 基礎推理函數

from transformers import TextStreamerdef generate_response(prompt, model, tokenizer, max_length=1024):messages = [{"role": "user", "content": prompt}]input_text = tokenizer.apply_chat_template(messages,tokenize=False,add_generation_prompt=True)streamer = TextStreamer(tokenizer, skip_prompt=True)inputs = tokenizer(input_text, return_tensors="pt").to(model.device)outputs = model.generate(**inputs,max_new_tokens=max_length,streamer=streamer,temperature=0.7,top_p=0.9)return tokenizer.decode(outputs[0], skip_special_tokens=True)

4.2 使用vLLM加速推理

from vllm import LLM, SamplingParamsclass VLLMInference:def __init__(self, model_path, lora_path=None):self.llm = LLM(model=model_path,enable_lora=(lora_path is not None),lora_modules=[{"name": "lora", "path": lora_path}] if lora_path else None,max_model_len=2048)self.sampling_params = SamplingParams(temperature=0.7,top_p=0.9,max_tokens=1024)def generate(self, prompt):return self.llm.generate(prompt, self.sampling_params)

4.3 批量推理處理

import json
from typing import List, Dictdef batch_inference(questions: List[str], inferencer) -> Dict[str, str]:results = {}for q in questions:output = inferencer.generate(q)results[q] = output[0].outputs[0].textreturn results

五、實戰案例:數學問題求解

5.1 測試樣例

test_questions = ["Calculate the area of a circle with radius 5","Solve for x: 2x + 5 = 15","You have 1000 kg of mushrooms with water content 99%. After drying, the water content drops to 98%. How much water was evaporated?"
]

5.2 執行推理

inferencer = VLLMInference(model_name,lora_path="qwen_lora"
)results = batch_inference(test_questions, inferencer)for q, ans in results.items():print(f"Q: {q}\nA: {ans}\n{'='*50}")

六、性能優化技巧

6.1 顯存優化

梯度檢查點

model.gradient_checkpointing_enable()

8bit Adam優化器

training_args.optim = "adamw_8bit"

梯度累積

training_args.gradient_accumulation_steps = 4

6.2 推理加速

使用Flash Attention

model = AutoModelForCausalLM.from_pretrained(model_name,torch_dtype=torch.bfloat16,use_flash_attention_2=True
)

vLLM的連續批處理

sampling_params = SamplingParams(batch_size=8,  # 同時處理8個請求max_num_batched_tokens=4096
)

七、常見問題解答

Q1: 訓練時出現OOM(顯存不足)怎么辦?

A: 嘗試以下方法:

  1. 減小per_device_train_batch_size
  2. 增加gradient_accumulation_steps
  3. 啟用load_in_4bit代替8bit

Q2: 模型生成內容重復怎么辦?

A: 調整生成參數:
generation_config = {
“repetition_penalty”: 1.5,
“no_repeat_ngram_size”: 3,
“do_sample”: True
}

Q3: 如何評估模型效果?

建議使用以下指標:

  1. 準確率(Accuracy)
  2. BLEU分數
  3. 人工評估(最重要)

結語

通過本文的實踐指南,您已經掌握了使用LoRA微調Qwen2.5-3B-Instruct的核心技術。關鍵要點:

  1. LoRA可大幅降低訓練成本(僅微調0.1%參數)
  2. 數據質量比數量更重要
  3. vLLM可顯著提升推理速度

完整代碼已開源在:[GitHub倉庫鏈接]

下一步建議:
? 嘗試不同的LoRA秩(r=4,8,16)比較效果

? 探索QLoRA等4bit微調方法

? 構建Web demo展示成果

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/news/915086.shtml
繁體地址,請注明出處:http://hk.pswp.cn/news/915086.shtml
英文地址,請注明出處:http://en.pswp.cn/news/915086.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

電腦插上u盤不顯示怎么回事

對于經常使用電腦的用戶來說&#xff0c;U盤是一種再熟悉不過的存儲工具。不管是拷貝資料、備份文件&#xff0c;還是制作啟動盤&#xff0c;U盤都發揮著重要作用。然而&#xff0c;有時候你可能會遇到這樣的情況&#xff1a;“U盤插上電腦&#xff0c;燈亮了&#xff0c;但電腦…

2025年6月GESP(C++二級): 冪和數

2025年6月GESP(C++二級): 冪和數 題目描述 對于正整數 n n n,如果 n n n 可以表為兩個

Windows、macOS、liunx下使用qemu搭建riscv64/linux

背景 在Windows、macOS和Linux環境下使用QEMU搭建RISC-V 64位Linux系統&#xff0c;網絡上存在大量過時、不完整或錯誤的教程。且部分AI生成的內容“幻覺”現象嚴重&#xff0c;導致關鍵步驟錯誤且難以進行。為確保可靠性&#xff0c;本教程基于最新實測驗證&#xff0c;涵蓋三…

簡單使用MCP

1、說明# 測試環境服務器 CPU數量&#xff1a;2核 內存&#xff1a;4GB 磁盤&#xff1a;50GB# 補充 如果不想使用Docker進行操作&#xff0c;只需要跳過Docker相關命令操作 即&#xff1a;使用Ollama運行模型&#xff0c;使用Python來創建MCP2、安裝Docker# 安裝Docker https:…

電腦裝機軟件一鍵安裝管理器

軟件使用 現在的裝機軟件很多&#xff0c;主要幾種類型就是辦公、看圖、影音、下載等&#xff0c;如果每次裝機之后&#xff0c;手動一個一個去安裝&#xff0c;費時費力還容易安裝到全家桶。 就有人整理了網絡上常用的一系列裝機軟件純凈和諧版本&#xff0c;并打包到一起&a…

深度學習入門-深度學習簡介

深度學習是加深了層的深度神經網絡。只需通過疊加層&#xff0c;就可以創建深度網絡。1、 加深網絡將深度學習中的重要技術&#xff08;構成神經網絡的各種層、學習時的有效技巧、對圖像特別有效的CNN、參數的最優化方法等&#xff09;匯總起來&#xff0c;創建一個深度網絡&am…

Linux 下安裝DM8數據庫詳細教程

Linux 下安裝DM8數據庫詳細教程 一、環境準備 1.操作系統要求 DM 數據庫支持多種操作系統,如 Windows、Linux 等。對于 Linux 系統,確保內核版本符合要求,例如 CentOS 7 或更高版本。同時,要保證系統有足夠的磁盤空間(建議至少 10GB 以上)和內存(至少 1GB 以上)。 對…

搭建基于Gitee文檔筆記自動發布

搭建基于Gitee文檔筆記自動發布由于現在gitee不支持代理靜態頁面&#xff0c;并且github.io需要VPN&#xff0c;實際使用的話gitee更為方便。一、為服務器和個人PC添加免密push和pull 參考鏈接&#xff1a;https://help.gitee.com/base/account/SSH%E5%85%AC%E9%92%A5%E8%AE%BE…

【Lua】閉包可能會導致的變量問題

先思考下面這個問題&#xff1a;local function counter()local count 0return function()count count 1return countend endlocal a counter() local b counter()print(a()) --> ? print(a()) --> ? print(b()) --> ? print(a()) --> ?輸出結果&#xff…

可觀測性、OpenTracing、OpenCensus、OpenTelemetry、Jaeger

監控與觀測 隨著軟件應用從單片架構向分布式微服務體系轉變&#xff0c;應用監控(Monitoring)和觀測(Observability)的需求也隨之提升。兩者存在相同的定義&#xff0c;目的都是為了發現應用程序中的問題。但還是有差別&#xff1a; 監控&#xff1a;目的是為了捕獲已知的問題…

Linux下使用原始socket收發數據包

在Linux系統中&#xff0c;使用非原始的socket&#xff0c;可以收發TCP或者UDP等網絡層數據包。如果要處理網絡層以下的數據包&#xff0c;比如ICMP、ARP等&#xff0c;或者更底層&#xff0c;比如鏈路層數據包&#xff0c;就得使用原始socket了。 創建socket 創建socket要使用…

暑期自學嵌入式——Day05補充(C語言階段)

接續上文&#xff1a;暑期自學嵌入式——Day05&#xff08;C語言階段&#xff09;-CSDN博客 主頁點關注不迷路喲。你的點贊、收藏&#xff0c;一鍵三連&#xff0c;是我持續更新的動力喲&#xff01;&#xff01;&#xff01; 主頁&#xff1a; 一位搞嵌入式的 genius-CSDN博…

.NET Core EFCore零基礎快速入門簡單使用

一、什么是 Entity Framework (EF) Core Entity Framework (EF) Core 是輕量化、可擴展和跨平臺版的對象關系映射程序 (O/RM)數據訪問技術&#xff0c;。 它將開發人員從編寫大量 SQL 語句中解放出來。 二、EF的相關程序包 Microsoft.EntityFrameworkCore 核心程序包&#x…

AAC音頻格式

目錄 AAC音頻格式介紹 主要特點 技術優勢 常見文件擴展名 應用領域 AAC與PCM的區別與優勢對比 基本概念差異 主要技術區別 各自優勢 PCM的優勢 AAC的優勢 應用場景選擇 AAC音頻數據格式解析 1. AAC 文件格式 (1) ADIF (Audio Data Interchange Format) (2) ADT…

pom.xml文件中的${}變量從哪里傳值

在 Maven 的 pom.xml 文件中&#xff0c;${} 格式的變量&#xff08;稱為屬性占位符&#xff09;的值來源主要有以下幾種途徑&#xff1a; 1. ?內置屬性&#xff08;Maven 預定義&#xff09;?? ${project.basedir}&#xff1a;項目根目錄${project.version}&#xff1a;項…

【人工智能】項目案例分析:使用TensorFlow進行大規模對象檢測

????歡迎大家來到我們的天空???? ?? 作者簡介:我們的天空 ??《頭銜》:大廠高級軟件測試工程師,阿里云開發者社區專家博主,CSDN人工智能領域新星創作者。 ??《博客》:人工智能,深度學習,機器學習,python,自然語言處理,AIGC等分享。 所屬的專欄:TensorF…

C++---cout、cerr、clog

在C編程里&#xff0c;cout、cerr和clog是標準庫提供的重要輸出流對象&#xff0c;在數據輸出方面發揮著關鍵作用。 一、cout&#xff1a;標準輸出流 cout 是 std::ostream 類的對象&#xff0c;其作用是向標準輸出設備&#xff08;一般是控制臺&#xff09;輸出數據。它和 C 語…

脈沖神經網絡(Spiking Neural Network, SNN)與知識蒸餾(Knowledge Distillation, KD)

目錄 脈沖神經網絡&#xff08;Spiking Neural Network, SNN&#xff09; 知識蒸餾&#xff08;Knowledge Distillation, KD&#xff09; 三種類別 三種變體 脈沖神經網絡&#xff08;Spiking Neural Network, SNN&#xff09; 收到生物神經系統的啟發&#xff0c;設計的&a…

使用Java完成下面項目

第一題&#xff1a;從控制臺輸入十個學生的成績&#xff0c;使用list集合來保存數據&#xff0c; 遍歷并打印其中成績不及格的成績&#xff0c;打印最高成績&#xff0c;最低成績&#xff0c;并計算及格率代碼如下public class Home1 {public static void main(String[] args) …

龍虎榜——20250718

上證指數今天上漲收陽線&#xff0c;繼續在5天均線保持強勢上漲&#xff0c;個股下跌稍多&#xff0c;大盤股上漲為主。深證指數收小陽線&#xff0c;繼續在5天均線上&#xff0c;總體保持強勢&#xff0c;調整更多是小票。2025年7月18日龍虎榜行業方向分析1. 醫藥醫療? 代表標…