深度学习框架基于YOLOv11无人机垃圾检测系统 无人机视角垃圾检测数据集 YOLO数据集 目标检测算法
深度学习框架基于YOLOv11无人机垃圾检测系统 无人机视角垃圾检测数据集数据集描述无人机视角垃圾检测数据集26700余张无人机图像超过4万标注信息yolo标注格式共3.5GB数据量可用于环卫快速检查垃圾快速定位等应用模型代码模型训练使用yolov11n训练50个epoch训练结果map如描述图所示。qt界面运行界面采用pyqt编写本项目已经训练好模型配置好环境后可直接使用运行效果见描述图像️ 无人机视角垃圾检测系统信息表类别内容系统名称无人机视角垃圾智能检测系统核心算法YOLOv11n (Ultralytics)检测任务无人机航拍图像中的垃圾目标检测与定位数据集规模图像总数26,700张标注总数40,000个数据总量约 3.5 GB数据格式标准 YOLO 格式 (.txt标注文件)应用场景城市环卫快速巡检、垃圾堆积点快速定位、环境监测模型性能训练轮次 (Epochs): 50mAP: 详见项目描述图用户界面基于PyQt5开发的图形化界面系统功能- 图片/视频文件导入检测- 实时摄像头/无人机图传检测- 检测结果显示类别、置信度- 检测结果统计与可视化️ 构建系统详细代码以下是构建该系统的完整代码分为三部分数据配置文件、模型训练代码和PyQt5界面代码。1. 数据配置文件 (data.yaml)同学数据集实际路径修改此文件。# data.yaml# 数据集根目录 (请修改为您的实际路径)path:/path/to/your/drone_garbage_dataset# 训练集和验证集图片路径 (相对于 path)train:images/trainval:images/val# 类别数量 (根据您的数据集实际情况修改此处以2类为例)nc:2# 类别名称 (请确保与您的标注文件中的索引顺序一致)names:0:bottle# 例如瓶子1:bag# 例如塑料袋2. 模型训练代码 (train.py)此脚本用于训练YOLOv11n模型。fromultralyticsimportYOLOdeftrain_drone_garbage_model(): 使用 YOLOv11n 训练无人机垃圾检测模型 # 1. 加载预训练模型modelYOLO(yolov11n.pt)# 加载轻量级的 YOLOv11n 预训练权重# 2. 开始训练resultsmodel.train(datadata.yaml,# 数据配置文件路径epochs50,# 训练轮数imgsz640,# 输入图像尺寸batch16,# 批大小根据GPU显存调整namedrone_garbage_v11n,# 训练任务名称projectruns/train,# 项目保存目录exist_okTrue,# 允许覆盖已有实验patience10,# 早停耐心值防止过拟合device0,# 使用GPU 0若使用CPU则设为 cpuworkers4# 数据加载线程数)print(训练完成)print(f最佳模型保存在:{results.save_dir}/weights/best.pt)if__name____main__:train_drone_garbage_model()3. PyQt5 界面代码 (main_window.py)此代码创建了图形用户界面实现了图像/视频检测、实时流检测和结果展示功能。importsysimporttimeimportcv2fromPyQt5.QtWidgetsimport(QApplication,QMainWindow,QWidget,QVBoxLayout,QHBoxLayout,QLabel,QPushButton,QFileDialog,QTableWidget,QTableWidgetItem,QMessageBox)fromPyQt5.QtGuiimportQPixmap,QImage,QFontfromPyQt5.QtCoreimportQt,QTimer,QThread,pyqtSignalfromultralyticsimportYOLO# --- 检测线程 ---classDetectionThread(QThread):# 信号用于更新UIframe_processedpyqtSignal(QImage,list,float)finishedpyqtSignal()def__init__(self,model,source0):super().__init__()self.modelmodel self.sourcesource# 0 for webcam, or file pathself.runningTruedefrun(self):capcv2.VideoCapture(self.source)whileself.runningandcap.isOpened():ret,framecap.read()ifnotret:breakstart_timetime.time()# 进行推理resultsself.model(frame)infer_timetime.time()-start_time# 解析结果detections[]annotated_frameresults[0].plot()# 获取带标注的帧forboxinresults[0].boxes:cls_idint(box.cls[0])conffloat(box.conf[0])xyxybox.xyxy[0].tolist()detections.append({class:self.model.names[cls_id],confidence:conf,coordinates:xyxy})# 转换颜色格式 (BGR - RGB)rgb_imagecv2.cvtColor(annotated_frame,cv2.COLOR_BGR2RGB)h,w,chrgb_image.shape bytes_per_linech*w qt_imageQImage(rgb_image.data,w,h,bytes_per_line,QImage.Format_RGB888)# 发送信号self.frame_processed.emit(qt_image,detections,infer_time)cap.release()self.finished.emit()defstop(self):self.runningFalseself.wait()# --- 主窗口 ---classMainWindow(QMainWindow):def__init__(self):super().__init__()self.setWindowTitle(无人机视角垃圾检测系统)self.setGeometry(100,100,1200,800)# 加载模型 (请确保路径正确)self.modelYOLO(runs/train/drone_garbage_v11n/weights/best.pt)self.init_ui()self.detection_threadNonedefinit_ui(self):# --- 主布局 ---central_widgetQWidget()self.setCentralWidget(central_widget)main_layoutQHBoxLayout(central_widget)# --- 左侧图像显示区域 ---left_layoutQVBoxLayout()self.image_labelQLabel(等待输入...)self.image_label.setAlignment(Qt.AlignCenter)self.image_label.setMinimumSize(640,480)self.image_label.setStyleSheet(QLabel { background-color : lightgray; })left_layout.addWidget(self.image_label)# --- 右侧控制面板 ---right_layoutQVBoxLayout()# 文件导入file_groupQLabel(文件导入)file_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(file_group)self.btn_select_imageQPushButton(选择图片文件)self.btn_select_image.clicked.connect(self.select_image)right_layout.addWidget(self.btn_select_image)self.btn_select_videoQPushButton(选择视频文件)self.btn_select_video.clicked.connect(self.select_video)right_layout.addWidget(self.btn_select_video)self.btn_webcamQPushButton(开启摄像头/图传)self.btn_webcam.clicked.connect(self.toggle_webcam)right_layout.addWidget(self.btn_webcam)# 检测结果result_groupQLabel(检测结果)result_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(result_group)self.time_labelQLabel(用时: 0.000s)self.count_labelQLabel(目标数目: 0)self.conf_labelQLabel(置信度: 0.00%)right_layout.addWidget(self.time_label)right_layout.addWidget(self.count_label)right_layout.addWidget(self.conf_label)# 操作按钮action_groupQLabel(操作)action_group.setFont(QFont(Arial,12,QFont.Bold))right_layout.addWidget(action_group)self.btn_exitQPushButton(退出)self.btn_exit.clicked.connect(self.close)right_layout.addWidget(self.btn_exit)# --- 底部检测结果表格 ---self.tableQTableWidget()self.table.setColumnCount(4)self.table.setHorizontalHeaderLabels([序号,类别,置信度,坐标位置])main_layout.addLayout(left_layout,70)main_layout.addLayout(right_layout,30)main_layout.addWidget(self.table)defselect_image(self):file_path,_QFileDialog.getOpenFileName(self,选择图片文件,,Image Files (*.png *.jpg *.jpeg))iffile_path:self.detect_single_image(file_path)defselect_video(self):file_path,_QFileDialog.getOpenFileName(self,选择视频文件,,Video Files (*.mp4 *.avi *.mov))iffile_path:self.start_detection(file_path)deftoggle_webcam(self):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()self.btn_webcam.setText(开启摄像头/图传)else:self.start_detection(0)# 0 代表默认摄像头self.btn_webcam.setText(关闭摄像头/图传)defdetect_single_image(self,img_path):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()framecv2.imread(img_path)ifframeisNone:QMessageBox.warning(self,错误,无法读取图片文件)returnstart_timetime.time()resultsself.model(frame)infer_timetime.time()-start_time detections[]annotated_frameresults[0].plot()forboxinresults[0].boxes:cls_idint(box.cls[0])conffloat(box.conf[0])xyxybox.xyxy[0].tolist()detections.append({class:self.model.names[cls_id],confidence:conf,coordinates:xyxy})# 更新UIrgb_imagecv2.cvtColor(annotated_frame,cv2.COLOR_BGR2RGB)h,w,chrgb_image.shape bytes_per_linech*w qt_imageQImage(rgb_image.data,w,h,bytes_per_line,QImage.Format_RGB888)pixmapQPixmap.fromImage(qt_image)self.image_label.setPixmap(pixmap.scaled(self.image_label.size(),Qt.KeepAspectRatio))self.time_label.setText(f用时:{infer_time:.3f}s)self.count_label.setText(f目标数目:{len(detections)})ifdetections:avg_confsum(d[confidence]fordindetections)/len(detections)self.conf_label.setText(f置信度:{avg_conf:.2%})else:self.conf_label.setText(置信度: 0.00%)# 更新表格self.table.setRowCount(len(detections))fori,detinenumerate(detections):self.table.setItem(i,0,QTableWidgetItem(str(i1)))self.table.setItem(i,1,QTableWidgetItem(det[class]))self.table.setItem(i,2,QTableWidgetItem(f{det[confidence]:.2%}))coords[int(x)forxindet[coordinates]]self.table.setItem(i,3,QTableWidgetItem(str(coords)))defstart_detection(self,source):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()self.detection_threadDetectionThread(self.model,source)self.detection_thread.frame_processed.connect(self.update_frame)self.detection_thread.finished.connect(self.on_detection_finished)self.detection_thread.start()defupdate_frame(self,qt_image,detections,infer_time):# 更新图像pixmapQPixmap.fromImage(qt_image)self.image_label.setPixmap(pixmap.scaled(self.image_label.size(),Qt.KeepAspectRatio))# 更新检测结果self.time_label.setText(f用时:{infer_time:.3f}s)self.count_label.setText(f目标数目:{len(detections)})ifdetections:avg_confsum(d[confidence]fordindetections)/len(detections)self.conf_label.setText(f置信度:{avg_conf:.2%})else:self.conf_label.setText(置信度: 0.00%)# 更新表格self.table.setRowCount(len(detections))fori,detinenumerate(detections):self.table.setItem(i,0,QTableWidgetItem(str(i1)))self.table.setItem(i,1,QTableWidgetItem(det[class]))self.table.setItem(i,2,QTableWidgetItem(f{det[confidence]:.2%}))coords[int(x)forxindet[coordinates]]self.table.setItem(i,3,QTableWidgetItem(str(coords)))defon_detection_finished(self):self.btn_webcam.setText(开启摄像头/图传)QMessageBox.information(self,提示,视频播放完毕或摄像头已停止。)defcloseEvent(self,event):ifself.detection_threadandself.detection_thread.isRunning():self.detection_thread.stop()event.accept()if__name____main__:appQApplication(sys.argv)windowMainWindow()window.show()sys.exit(app.exec_())使用说明准备环境:pipinstallultralytics PyQt5 opencv-python torch训练模型:将您的无人机垃圾数据集整理好并创建data.yaml文件。运行train.py脚本。训练完成后模型权重将保存在runs/train/drone_garbage_v11n/weights/best.pt。运行界面:确保main_window.py中的模型路径self.model YOLO(...)指向您训练好的best.pt文件。运行main_window.py即可启动图形界面。