当前位置: 首页 > news >正文

DeepSeek 14B模型本地部署与预训练实现方案

DeepSeek 14B模型本地部署与预训练实现方案

1. 项目概述

本项目旨在实现DeepSeek 14B大语言模型的本地部署,包括文本数据清洗与预处理、模型量化以及本地预训练全流程。通过本方案,用户可以在有限硬件资源条件下高效运行和微调大规模语言模型。

2. 环境准备与依赖安装

2.1 硬件要求

  • GPU: 至少24GB显存(如RTX 3090/4090或A100)
  • RAM: 至少64GB系统内存
  • 存储: 至少100GB可用空间(用于模型权重和数据集)

2.2 软件环境

# 创建conda环境
conda create -n deepseek python=3.10
conda activate deepseek# 安装PyTorch (CUDA 11.8版本)
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118# 安装其他依赖
pip install transformers>=4.30.0
pip install datasets>=2.12.0
pip install accelerate>=0.20.0
pip install bitsandbytes>=0.40.0
pip install sentencepiece
pip install protobuf
pip install einops
pip install scipy
pip install nltk
pip install tqdm
pip install wandb  # 可选,用于训练监控

3. 文本数据清洗与预处理

3.1 数据收集与加载

import os
import json
import re
from datasets import load_dataset, Dataset
import nltk
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
import multiprocessing as mp# 下载NLTK资源
nltk.download('punkt')
nltk.download('stopwords')class TextDataPreprocessor:def __init__(self, data_paths, output_dir="./processed_data"):self.data_paths = data_pathsself.output_dir = output_dirself.stop_words = set(stopwords.words('english'))os.makedirs(output_dir, exist_ok=True)def load_data(self):"""加载多种格式的文本数据"""datasets = []for path in self.data_paths:if path.endswith('.txt'):# 处理纯文本文件with open(path, 'r', encoding='utf-8') as f:text = f.read()datasets.append({"text": text})elif path.endswith('.json') or path.endswith('.jsonl'):# 处理JSON/JSONL文件with open(path, 'r', encoding='utf-8') as f:if path.endswith('.jsonl'):data = [json.loads(line) for line in f]else:data = json.load(f)datasets.extend(data)elif os.path.isdir(path):# 处理目录下的所有文本文件for filename in os.listdir(path):if filename.endswith('.txt'):filepath = os.path.join(path, filename)with open(filepath, 'r', encoding='utf-8') as f:text = f.read()datasets.append({"text": text})return Dataset.from_list(datasets)

3.2 文本清洗函数

    def clean_text(self, text):"""文本清洗函数"""if not isinstance(text, str):return ""# 转换为小写text = text.lower()# 移除特殊字符和多余空格text = re.sub(r'[^a-zA-Z0-9\s.,!?;:()\"\'-]', '', text)text = re.sub(r'\s+', ' ', text).strip()# 处理HTML标签text = re.sub(r'<[^>]+>', '', text)# 处理URLtext = re.sub(r'http\S+', '', text)# 处理电子邮件text = re.sub(r'\S+@\S+', '', text)# 处理数字(可选:保留或移除)text = re.sub(r'\d+', '', text)return textdef remove_stopwords(self, text):"""移除停用词"""words = text.split()filtered_words = [word for word in words if word not in self.stop_words]return ' '.join(filtered_words)def sentence_segmentation(self, text):"""句子分割"""sentences = sent_tokenize(text)return [s.strip() for s in sentences if len(s.strip()) > 10]  # 过滤过短句子

3.3 并行处理与数据保存

    def process_chunk(self, chunk):"""处理数据块"""processed_chunk = []for item in chunk:if 'text' in item:cleaned_text = self.clean_text(item['text'])if cleaned_text:  # 只保留非空文本sentences = self.sentence_segmentation(cleaned_text)for sentence in sentences:processed_chunk.append({"text": sentence})return processed_chunkdef process_data_parallel(self, dataset, chunk_size=1000, num_workers=mp.cpu_count()):"""并行处理数据"""data = dataset.to_list()chunks = [data[i:i+chunk_size] for i in range(0, len(data), chunk_size)]with mp.Pool(processes=num_workers) as pool:results = pool.map(self.process_chunk, chunks)# 合并结果processed_data = []for result in results:processed_data.extend(result)return Dataset.from_list(processed_data)def save_processed_data(self, dataset, filename="processed_dataset.jsonl"):"""保存处理后的数据"""output_path = os.path.join(self.output_dir, filename)dataset.to_json(output_path, orient="records", lines=True)print(f"处理后的数据已保存到: {output_path}")return output_pathdef run_preprocessing(self):"""运行完整的预处理流程"""print("开始加载数据...")raw_dataset = self.load_data()print(f"原始数据加载完成,共 {len(raw_dataset)} 条记录")print("开始并行处理数据...")processed_dataset = self.process_data_parallel(raw_dataset)print(f"数据处理完成,共 {len(processed_dataset)} 条有效记录")print("保存处理后的数据...")output_path = self.save_processed_data(processed_dataset)return output_path

3.4 数据预处理执行

# 使用示例
if __name__ == "__main__":# 指定数据路径(可以是多个文件或目录)data_paths = ["./raw_data/text_corpus.txt","./raw_data/json_data.json","./raw_data/documents_dir"]preprocessor = TextDataPreprocessor(data_paths)processed_data_path = preprocessor.run_preprocessing()

4. 模型量化与优化

4.1 模型加载与量化配置

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from accelerate import init_empty_weights, load_checkpoint_and_dispatchclass ModelQuantizer:def __init__(self, model_name="deepseek-ai/deepseek-llm-14b", cache_dir="./models"):self.model_name = model_nameself.cache_dir = cache_diros.makedirs(cache_dir, exist_ok=True)def setup_quantization_config(self, quantization_type="4bit"):"""设置量化配置"""if quantization_type == "4bit":return BitsAndBytesConfig(load_in_4bit=True,bnb_4bit_quant_type="nf4",bnb_4bit_use_double_quant=True,bnb_4bit_compute_dtype=torch.bfloat16)elif quantization_type == "8bit":return BitsAndBytesConfig(load_in_8bit=True)else:return Nonedef load_tokenizer(self):"""加载tokenizer"""tokenizer = AutoTokenizer.from_pretrained(self.model_name,cache_dir=self.cache_dir,trust_remote_code=True)# 添加pad token如果不存在if tokenizer.pad_token is None:tokenizer.pad_token = tokenizer.eos_tokenreturn tokenizer

4.2 模型量化加载

    def load_quantized_model(self, quantization_config=None, device_map="auto"):"""加载量化模型"""print("开始加载模型...")model = AutoModelForCausalLM.from_pretrained(self.model_name,quantization_config=quantization_config,device_map=device_map,cache_dir=self.cache_dir,trust_remote_code=True,torch_dtype=torch.bfloat16,low_cpu_mem_usage=True)print("模型加载完成!")return modeldef estimate_memory_usage(self, model):"""估算模型内存使用情况"""param_size = 0for param in model.parameters():param_size += param.nelement() * param.element_size()buffer_size = 0for buffer in model.buffers():buffer_size += buffer.nelement() * buffer.element_size()size_all_mb = (param_size + buffer_size) / 1024**2print(f"模型大小: {size_all_mb:.2f}MB")return size_all_mb

4.3 分层加载与内存优化

    def load_model_with_offloading(self, max_memory=None):"""使用CPU卸载加载模型(适用于内存有限的情况)"""if max_memory is None:max_memory = {0: "20GB", "cpu": "50GB"}# 初始化空权重with init_empty_weights():model = AutoModelForCausalLM.from_pretrained(self.model_name,trust_remote_code=True,torch_dtype=torch.bfloat16)# 分层加载模型model = load_checkpoint_and_dispatch(model,checkpoint=self.model_name,device_map="auto",max_memory=max_memory,no_split_module_classes=["GPTJBlock"]  # 根据模型结构调整)return model

4.4 量化策略比较与选择

    def compare_quantization_strategies(self):"""比较不同量化策略的性能和内存使用"""strategies = [("FP16", None),("8bit", self.setup_quantization_config("8bit")),("4bit", self.setup_quantization_config("4bit"))]results = []tokenizer = self.load_tokenizer()test_text = "深度学习是人工智能的一个重要领域,它"for name, quant_config in strategies:print(f"\n测试 {name} 量化策略...")try:model = self.load_quantized_model(quant_config)memory_usage = self.estimate_memory_usage(model)# 测试推理速度inputs = tokenizer(test_text, return_tensors="pt").to(model.device)start_time = time.time()with torch.no_grad():outputs = model.generate(**inputs,max_length=50,num_return_sequences=1,temperature=0.7)inference_time = time.time() - start_timegenerated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)results.append({"strategy": name,"memory_mb": memory_usage,"inference_time": inference_time,"generated_text": generated_text})# 清理内存del modeltorch.cuda.empty_cache()except Exception as e:print(f"{name} 策略失败: {str(e)}")results.append({"strategy": name,"error": str(e)})return results

5. 本地预训练实现

5.1 训练配置与参数设置

from transformers import TrainingArguments, Trainer, DataCollatorForLanguageModeling
from datasets import load_datasetclass DeepSeekTrainer:def __init__(self, model, tokenizer, train_data_path, output_dir="./training_output"):self.model = modelself.tokenizer = tokenizerself.train_data_path = train_data_pathself.output_dir = output_diros.makedirs(output_dir, exist_ok=True)def prepare_dataset(self, max_seq_length=2048):"""准备训练数据集"""dataset = load_dataset('json', data_files=self.train_data_path, split='train')def tokenize_function(examples):# 连接所有文本并 tokenizeconcatenated = [ " ".join([item["text"] for item in examples]) ]tokenized = self.tokenizer(concatenated,truncation=True,max_length=max_seq_length,padding="max_length",return_tensors="pt")return tokenizedtokenized_dataset = dataset.map(tokenize_function,batched=True,remove_columns=dataset.column_names,batch_size=1000)return tokenized_dataset

5.2 训练参数配置

    def setup_training_arguments(self, learning_rate=2e-5, num_train_epochs=1, batch_size=1):"""设置训练参数"""return TrainingArguments(output_dir=self.output_dir,overwrite_output_dir=True,num_train_epochs=num_train_epochs,per_device_train_batch_size=batch_size,gradient_accumulation_steps=8,  # 有效批大小 = batch_size * gradient_accumulation_stepslearning_rate=learning_rate,weight_decay=0.01,warmup_steps=100,logging_steps=10,save_steps=500,eval_steps=500,evaluation_strategy="steps",load_best_model_at_end=True,metric_for_best_model="eval_loss",greater_is_better=False,fp16=True,  # 使用混合精度训练dataloader_pin_memory=False,remove_unused_columns=False,report_to="none",  # 禁用wandb等报告工具,或设置为"wandb"启用run_name="deepseek-14b-finetune",)

5.3 自定义训练循环

    def custom_train_loop(self, train_dataset, eval_dataset=None, training_args=None):"""自定义训练循环,支持高级功能"""if training_args is None:training_args = self.setup_training_arguments()# 数据收集器data_collator = DataCollatorForLanguageModeling(tokenizer=self.tokenizer,mlm=False,  # 使用因果语言建模而非掩码语言建模)# 初始化Trainertrainer = Trainer(model=self.model,args=training_args,data_collator=data_collator,train_dataset=train_dataset,eval_dataset=eval_dataset,tokenizer=self.tokenizer,)# 开始训练print("开始训练...")train_result = trainer.train()# 保存最终模型trainer.save_model()self.tokenizer.save_pretrained(self.output_dir)# 记录训练指标metrics = train_result.metricstrainer.log_metrics("train", metrics)trainer.save_metrics("train", metrics)trainer.save_state()return metrics

5.4 内存优化训练技术

    def optimized_training(self, dataset, use_gradient_checkpointing=True, use_offloading=True):"""使用内存优化技术进行训练"""# 启用梯度检查点if use_gradient_checkpointing:self.model.gradient_checkpointing_enable()print("已启用梯度检查点")# 设置CPU卸载if use_offloading:from accelerate import dispatch_model, infer_auto_device_map# 创建设备映射device_map = infer_auto_device_map(self.model, max_memory={0: "20GB", "cpu": "50GB"},no_split_module_classes=["GPTJBlock"])# 分发模型self.model = dispatch_model(self.model, device_map=device_map)print("已启用CPU卸载")# 准备数据集train_dataset = self.prepare_dataset()# 分割训练集和验证集if len(train_dataset) > 1000:split_dataset = train_dataset.train_test_split(test_size=0.1)train_data = split_dataset["train"]eval_data = split_dataset["test"]else:train_data = train_dataseteval_data = None# 设置训练参数(使用较小的批大小)training_args = self.setup_training_arguments(batch_size=1,  # 由于内存限制,使用较小的批大小learning_rate=1e-5,  # 较小的学习率num_train_epochs=1)# 开始训练metrics = self.custom_train_loop(train_data, eval_data, training_args)return metrics

6. 模型评估与验证

6.1 评估指标计算

import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_supportclass ModelEvaluator:def __init__(self, model, tokenizer):self.model = modelself.tokenizer = tokenizerdef calculate_perplexity(self, dataset):"""计算困惑度"""eval_loss = 0nb_eval_steps = 0self.model.eval()for batch in dataset:with torch.no_grad():outputs = self.model(**batch)eval_loss += outputs.loss.mean().item()nb_eval_steps += 1eval_loss = eval_loss / nb_eval_stepsperplexity = np.exp(eval_loss)return perplexity, eval_lossdef generate_sample_text(self, prompt, max_length=100, temperature=0.7):"""生成示例文本"""inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)with torch.no_grad():outputs = self.model.generate(**inputs,max_length=max_length,temperature=temperature,do_sample=True,pad_token_id=self.tokenizer.eos_token_id)generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)return generated_text

6.2 综合评估流程

    def comprehensive_evaluation(self, test_dataset, sample_prompts=None):"""综合评估模型性能"""if sample_prompts is None:sample_prompts = ["人工智能的未来","深度学习的主要应用包括","自然语言处理的重要性体现在"]print("开始模型评估...")# 计算困惑度perplexity, eval_loss = self.calculate_perplexity(test_dataset)print(f"困惑度: {perplexity:.2f}, 评估损失: {eval_loss:.4f}")# 生成示例文本print("\n生成文本示例:")for prompt in sample_prompts:generated = self.generate_sample_text(prompt)print(f"提示: '{prompt}'")print(f"生成: '{generated}'")print("-" * 50)# 评估结果results = {"perplexity": perplexity,"eval_loss": eval_loss,"generated_samples": [{"prompt": prompt, "generated": self.generate_sample_text(prompt)}for prompt in sample_prompts]}return results

7. 部署与推理服务

7.1 模型服务化

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicornclass TextGenerationRequest(BaseModel):prompt: strmax_length: int = 100temperature: float = 0.7num_return_sequences: int = 1class ModelServer:def __init__(self, model, tokenizer, host="0.0.0.0", port=8000):self.model = modelself.tokenizer = tokenizerself.host = hostself.port = portself.app = FastAPI(title="DeepSeek 14B API")self.setup_routes()def setup_routes(self):@self.app.post("/generate")async def generate_text(request: TextGenerationRequest):try:# 编码输入inputs = self.tokenizer(request.prompt, return_tensors="pt").to(self.model.device)# 生成文本with torch.no_grad():outputs = self.model.generate(**inputs,max_length=request.max_length,temperature=request.temperature,num_return_sequences=request.num_return_sequences,do_sample=True,pad_token_id=self.tokenizer.eos_token_id)# 解码输出generated_texts = [self.tokenizer.decode(output, skip_special_tokens=True)for output in outputs]return {"generated_texts": generated_texts,"prompt": request.prompt,"parameters": {"max_length": request.max_length,"temperature": request.temperature}}except Exception as e:raise HTTPException(status_code=500, detail=str(e))@self.app.get("/health")async def health_check():return {"status": "healthy", "model": "deepseek-14b"}def run(self):"""启动服务"""uvicorn.run(self.app, host=self.host, port=self.port)

7.2 批量推理优化

    def batch_generate(self, prompts, batch_size=4, **generation_kwargs):"""批量生成文本,优化推理速度"""all_outputs = []for i in range(0, len(prompts), batch_size):batch_prompts = prompts[i:i+batch_size]# 编码批次inputs = self.tokenizer(batch_prompts, return_tensors="pt", padding=True, truncation=True).to(self.model.device)# 生成文本with torch.no_grad():outputs = self.model.generate(**inputs,**generation_kwargs,pad_token_id=self.tokenizer.eos_token_id)# 解码输出decoded_outputs = [self.tokenizer.decode(output, skip_special_tokens=True)for output in outputs]all_outputs.extend(decoded_outputs)return all_outputs

8. 完整部署流程示例

8.1 端到端部署脚本

import time
from datetime import datetimedef main():"""完整的部署流程"""print("DeepSeek 14B 本地部署开始")start_time = time.time()# 1. 数据预处理print("\n=== 步骤1: 数据预处理 ===")data_paths = ["./raw_data"]preprocessor = TextDataPreprocessor(data_paths)processed_data_path = preprocessor.run_preprocessing()# 2. 模型加载与量化print("\n=== 步骤2: 模型加载与量化 ===")quantizer = ModelQuantizer()tokenizer = quantizer.load_tokenizer()# 选择量化策略(根据可用内存调整)quantization_config = quantizer.setup_quantization_config("4bit")model = quantizer.load_quantized_model(quantization_config)# 3. 模型训练/微调print("\n=== 步骤3: 模型训练 ===")trainer = DeepSeekTrainer(model, tokenizer, processed_data_path)# 加载数据集dataset = trainer.prepare_dataset()# 开始训练(根据需求选择是否训练)# training_metrics = trainer.optimized_training(dataset)# print(f"训练完成,指标: {training_metrics}")# 4. 模型评估print("\n=== 步骤4: 模型评估 ===")evaluator = ModelEvaluator(model, tokenizer)# 分割评估数据集if len(dataset) > 100:eval_dataset = dataset.select(range(100))else:eval_dataset = datasetevaluation_results = evaluator.comprehensive_evaluation(eval_dataset)print(f"模型评估完成,困惑度: {evaluation_results['perplexity']:.2f}")# 5. 启动服务print("\n=== 步骤5: 启动推理服务 ===")server = ModelServer(model, tokenizer)# 保存部署信息deployment_info = {"deployment_time": datetime.now().isoformat(),"model_name": "deepseek-14b","quantization": "4bit","evaluation_results": evaluation_results,"processing_time_seconds": time.time() - start_time}with open("./deployment_info.json", "w") as f:json.dump(deployment_info, f, indent=2)print(f"\n部署完成! 总耗时: {time.time() - start_time:.2f}秒")print(f"API服务将在 http://{server.host}:{server.port} 启动")# 启动服务(在实际部署中,可能需要在后台运行)# server.run()if __name__ == "__main__":main()

8.2 监控与维护

class DeploymentMonitor:def __init__(self, model, tokenizer, log_dir="./logs"):self.model = modelself.tokenizer = tokenizerself.log_dir = log_diros.makedirs(log_dir, exist_ok=True)def monitor_resources(self):"""监控系统资源使用情况"""import psutil# GPU监控(如果可用)gpu_info = {}if torch.cuda.is_available():gpu_memory = torch.cuda.memory_allocated() / 1024**3  # GBgpu_max_memory = torch.cuda.max_memory_allocated() / 1024**3gpu_info = {"gpu_memory_used_gb": gpu_memory,"gpu_max_memory_used_gb": gpu_max_memory}# CPU和内存监控cpu_percent = psutil.cpu_percent()memory_info = psutil.virtual_memory()return {**gpu_info,"cpu_percent": cpu_percent,"memory_used_gb": memory_info.used / 1024**3,"memory_total_gb": memory_info.total / 1024**3,"timestamp": datetime.now().isoformat()}def log_inference_metrics(self, prompt, generated_text, response_time):"""记录推理指标"""log_entry = {"prompt_length": len(prompt),"response_length": len(generated_text),"response_time_seconds": response_time,"tokens_per_second": len(generated_text.split()) / response_time,"timestamp": datetime.now().isoformat()}log_file = os.path.join(self.log_dir, "inference_metrics.jsonl")with open(log_file, "a") as f:f.write(json.dumps(log_entry) + "\n")def run_continuous_monitoring(self, interval=60):"""持续监控资源使用情况"""import timemonitor_file = os.path.join(self.log_dir, "resource_monitor.jsonl")while True:try:resource_info = self.monitor_resources()with open(monitor_file, "a") as f:f.write(json.dumps(resource_info) + "\n")time.sleep(interval)except KeyboardInterrupt:print("监控已停止")breakexcept Exception as e:print(f"监控错误: {e}")time.sleep(interval)

9. 故障排除与优化建议

9.1 常见问题解决方案

class Troubleshooter:@staticmethoddef handle_memory_issues():"""处理内存不足问题"""solutions = ["减少批处理大小 (batch_size)","使用梯度累积而不是增加批大小","启用梯度检查点 (gradient_checkpointing)","使用更激进的量化 (如4bit而非8bit)","使用CPU卸载功能","减少模型序列长度","使用更小的模型变体"]print("内存不足解决方案:")for i, solution in enumerate(solutions, 1):print(f"{i}. {solution}")@staticmethoddef handle_slow_training():"""处理训练速度慢问题"""solutions = ["增加批处理大小(如果内存允许)","使用更快的存储(如NVMe SSD)","启用混合精度训练 (fp16/bf16)","使用数据并行训练(多GPU)","优化数据加载流程(预加载、缓存)","减少日志记录和检查点频率"]print("训练速度慢解决方案:")for i, solution in enumerate(solutions, 1):print(f"{i}. {solution}")@staticmethoddef handle_poor_performance():"""处理模型性能差问题"""solutions = ["增加训练数据量和质量","调整学习率和训练计划","增加训练轮数 (epochs)","尝试不同的优化器参数","进行更细致的数据清洗和预处理","使用早停防止过拟合","尝试不同的模型架构或超参数"]print("模型性能差解决方案:")for i, solution in enumerate(solutions, 1):print(f"{i}. {solution}")

9.2 性能优化技巧

    @staticmethoddef optimization_tips():"""提供性能优化技巧"""tips = [{"area": "数据预处理","tips": ["使用并行处理加速数据清洗","预处理数据并缓存以减少训练时的IO开销","使用二进制格式(如HDF5)存储预处理数据"]},{"area": "模型训练","tips": ["使用学习率调度器(如cosine annealing)","实施梯度裁剪防止梯度爆炸","使用权重衰减正则化","尝试不同的优化器(如AdamW, SGD等)"]},{"area": "推理优化","tips": ["使用Key-Value缓存加速生成任务","实现批处理推理提高吞吐量","使用量化后的模型进行推理","使用TensorRT或ONNX Runtime进一步优化"]}]print("性能优化技巧:")for area in tips:print(f"\n{area['area']}:")for tip in area['tips']:print(f"  • {tip}")

10. 结论与扩展

本方案提供了DeepSeek 14B模型本地部署的完整流程,从数据预处理、模型量化到训练部署的全套解决方案。通过合理的资源管理和优化策略,即使是在有限的硬件资源上也能实现大语言模型的有效部署和微调。

扩展建议

  1. 多GPU训练:扩展代码以支持数据并行和模型并行训练
  2. 模型蒸馏:实现知识蒸馏技术,从小型化模型中获得类似性能
  3. 领域适配:针对特定领域(医疗、法律、金融等)进行专门优化
  4. 多模态扩展:扩展支持图像、音频等多模态输入
  5. 云原生部署:适配Kubernetes等云原生环境,实现弹性扩展

通过本方案的实施,可以为企业和研究机构提供强大的本地化大语言模型能力,同时确保数据隐私和安全性。

http://www.xdnf.cn/news/1364599.html

相关文章:

  • 从零开始学习单片机15
  • MySQL常见报错分析及解决方案总结(1)---Can‘t connect to MySQL server on ‘localhost‘(10061)
  • 什么是事件循环(Event Loop)?浏览器和 Node.js 中的事件循环有什么区别?
  • 维度建模 —— 雪花模型 和 星型模型的优缺点
  • 冯·诺依曼架构:现代计算机的基石与瓶颈
  • Linux驱动开发笔记(七)——并发与竞争(下)——自旋锁信号量互斥体
  • k8s笔记03-常用操作命令
  • vite 项目创建、插件配置
  • JBL音响代理——河北正娱科技的声学精品工程
  • 智慧城市SaaS平台/交通设施运行监测系统之桥梁运行监测、城市道路塌陷风险运行监测系统架构内容
  • 网络编程--TCP/UDP Socket套接字
  • 验证码流程
  • 【AI解读源码系列】ant design mobile——Space间距
  • 京东API分类接口实战指南:获取各类商品信息
  • 【大模型本地运行与部署框架】Ollama的API交互
  • Spring拦截器中@Resource注入为null的问题
  • PAT乙级_1120 买地攻略_Python_AC解法_含疑难点
  • 6.3Element UI 的表单
  • 【python断言插件responses_validator使用】
  • 分布式系统与单机系统的优劣势对比
  • Reachability Query
  • Linux系统编程——进程 | 线程
  • 直播美颜SDK技术解析:人脸美型功能的算法原理与实现方案
  • TCP与HTTP协议以及爬虫
  • 如何在Debian服务器上设置Node.js日志轮转
  • cs61a中的递归小例子
  • 创建高效MCP客户端:多服务器环境解决方案指南
  • 决策树原理与 Sklearn 实战
  • Hadoop MapReduce Task 设计源码分析
  • 【C++高并发内存池篇】ThreadCache 极速引擎:C++ 高并发内存池的纳秒级无锁革命!