旅游網(wǎng)站的網(wǎng)頁設計網(wǎng)絡營銷策劃方案的目的
前言
因工作要求使用的都是yolov5系列的模型,今天學習一下最先進的yolov11,記錄一下環(huán)境配置及訓練過程。
1.項目下載及環(huán)境安裝
源碼位置:yolov11
可以看到,這里要求python版本大于等于3.8,我這里安裝python3.10.
conda create -n yolov11 python=3.10
conda activate yolov11
pip install ultralytics -i https://pypi.tuna.tsinghua.edu.cn/simple
2.標注自己的數(shù)據(jù)集
標注實例分割數(shù)據(jù)集的工具有很多,這里建議labelme和AnyLabeling任意選一個。
如圖所示,標注后的數(shù)據(jù)集是json格式的:
我們需要將其轉(zhuǎn)成yolo系列需要的txt格式。
json轉(zhuǎn)txt格式轉(zhuǎn)化代碼:
# json2txt.py
# json2txt.py
import cv2
import os
import json
import glob
import numpy as npclass_names = ["cls1_name", "cls2_name", "cls3_name", "cls4_name", "cls5_name"]def convert_json_label_to_yolov_seg_label():json_path = "F:/Desktop/hand/labels" # 本地json路徑json_files = glob.glob(json_path + "/*.json")# print(json_files)# 指定輸出文件夾output_folder = "F:/Desktop/hand/labels_txt" # txt存放路徑if not os.path.exists(output_folder):os.makedirs(output_folder)for json_file in json_files:# print(json_file)with open(json_file, 'r') as f:json_info = json.load(f)img = cv2.imread(os.path.join(json_path, json_info["imagePath"]))height, width, _ = img.shapenp_w_h = np.array([[width, height]], np.int32)txt_file = os.path.join(output_folder, os.path.basename(json_file).replace(".json", ".txt"))with open(txt_file, "w") as f:for point_json in json_info["shapes"]:txt_content = ""np_points = np.array(point_json["points"], np.int32)label = point_json["label"]index = class_names.index(label)# print(type(label))norm_points = np_points / np_w_hnorm_points_list = norm_points.tolist()txt_content += str(index) + " " + " ".join([" ".join([str(cell[0]), str(cell[1])]) for cell in norm_points_list]) + "\n"f.write(txt_content)convert_json_label_to_yolov_seg_label()
轉(zhuǎn)換后是這樣的:
分割數(shù)據(jù)集,我們需要將轉(zhuǎn)化成txt的數(shù)據(jù)集分割成訓練集、驗證集和測試集,這是分割代碼:
# txt_split.py
# 將圖片和標注數(shù)據(jù)按比例切分為 訓練集和測試集
import shutil
import random
import os# 原始路徑
image_original_path = "hhh/images/"
label_original_path = "hhh/labels_txt/"cur_path = os.getcwd()
#cur_path = 'D:/image_denoising_test/denoise/'
# 訓練集路徑
train_image_path = os.path.join(cur_path, "datasets/images/train/")
train_label_path = os.path.join(cur_path, "datasets/labels/train/")# 驗證集路徑
val_image_path = os.path.join(cur_path, "datasets/images/val/")
val_label_path = os.path.join(cur_path, "datasets/labels/val/")# 測試集路徑
test_image_path = os.path.join(cur_path, "datasets/images/test/")
test_label_path = os.path.join(cur_path, "datasets/labels/test/")# 訓練集目錄
list_train = os.path.join(cur_path, "datasets/train.txt")
list_val = os.path.join(cur_path, "datasets/val.txt")
list_test = os.path.join(cur_path, "datasets/test.txt")train_percent = 0.8
val_percent = 0.1
test_percent = 0.1def del_file(path):for i in os.listdir(path):file_data = path + "\\" + ios.remove(file_data)def mkdir():if not os.path.exists(train_image_path):os.makedirs(train_image_path)else:del_file(train_image_path)if not os.path.exists(train_label_path):os.makedirs(train_label_path)else:del_file(train_label_path)if not os.path.exists(val_image_path):os.makedirs(val_image_path)else:del_file(val_image_path)if not os.path.exists(val_label_path):os.makedirs(val_label_path)else:del_file(val_label_path)if not os.path.exists(test_image_path):os.makedirs(test_image_path)else:del_file(test_image_path)if not os.path.exists(test_label_path):os.makedirs(test_label_path)else:del_file(test_label_path)def clearfile():if os.path.exists(list_train):os.remove(list_train)if os.path.exists(list_val):os.remove(list_val)if os.path.exists(list_test):os.remove(list_test)def main():mkdir()clearfile()file_train = open(list_train, 'w')file_val = open(list_val, 'w')file_test = open(list_test, 'w')total_txt = os.listdir(label_original_path)num_txt = len(total_txt)list_all_txt = range(num_txt)num_train = int(num_txt * train_percent)num_val = int(num_txt * val_percent)num_test = num_txt - num_train - num_valtrain = random.sample(list_all_txt, num_train)# train從list_all_txt取出num_train個元素# 所以list_all_txt列表只剩下了這些元素val_test = [i for i in list_all_txt if not i in train]# 再從val_test取出num_val個元素,val_test剩下的元素就是testval = random.sample(val_test, num_val)print("訓練集數(shù)目:{}, 驗證集數(shù)目:{}, 測試集數(shù)目:{}".format(len(train), len(val), len(val_test) - len(val)))for i in list_all_txt:name = total_txt[i][:-4]srcImage = image_original_path + name + '.jpg'srcLabel = label_original_path + name + ".txt"if i in train:dst_train_Image = train_image_path + name + '.jpg'dst_train_Label = train_label_path + name + '.txt'shutil.copyfile(srcImage, dst_train_Image)shutil.copyfile(srcLabel, dst_train_Label)file_train.write(dst_train_Image + '\n')elif i in val:dst_val_Image = val_image_path + name + '.jpg'dst_val_Label = val_label_path + name + '.txt'shutil.copyfile(srcImage, dst_val_Image)shutil.copyfile(srcLabel, dst_val_Label)file_val.write(dst_val_Image + '\n')else:dst_test_Image = test_image_path + name + '.jpg'dst_test_Label = test_label_path + name + '.txt'shutil.copyfile(srcImage, dst_test_Image)shutil.copyfile(srcLabel, dst_test_Label)file_test.write(dst_test_Image + '\n')file_train.close()file_val.close()file_test.close()if __name__ == "__main__":main()
3.編寫訓練代碼并訓練
我這里習慣使用代碼訓練,還有命令訓練,如果感興趣的朋友可以去官網(wǎng)了解。
# train.py
from ultralytics import YOLOif __name__ == '__main__':model = YOLO(r'ultralytics/cfg/models/11/yolo11-seg.yaml') model.train(data=r'config.yaml',imgsz=640,epochs=800,single_cls=True, batch=16,workers=10,device='0',)
配置文件:
# config.yaml
path: ../datasets/images # 數(shù)據(jù)集所在路徑
train: train # 數(shù)據(jù)集路徑下的train.txt
val: val # 數(shù)據(jù)集路徑下的val.txt
test: test # 數(shù)據(jù)集路徑下的test.txt# Classes
names:0: class1_name1: class2_name2: class3_name3: class4_name4: class5_name
這里的path改成你的數(shù)據(jù)集位置,如果txt_split.py在項目根目錄下運行則不需要修改路徑,只需要修改類別即可。
修改之后,只需要python train.py運行即可。
測試代碼:
# test.py
from ultralytics import YOLO
# 加載訓練好的模型,改為自己的路徑
model = YOLO('runs/train/exp22/weights/best.pt') #修改為訓練好的路徑
source = '11.jpg' #修改為自己的圖片路徑及文件名
# 運行推理,并附加參數(shù)
model.predict(source, save=True, imgsz=640)
轉(zhuǎn)成onnx模型并運行:
yolo export model=runs/segment/train11/weights/best.pt imgsz=640 format=onnx opset=12 simplify
python examples/YOLOv8-Segmentation-ONNXRuntime-Python/main.py --model runs/segment/train5n/weights/bestv8.onnx
4.常見報錯
RuntimeError: Trying to create tensor with negative dimension -37: [0, -37]
運行YOLOv8-Segmentation-ONNXRuntime-Python時報錯,修改配置文件
參考
語義分割:YOLOv11的分割模型訓練自己的數(shù)據(jù)集(從代碼下載到實例測試)
配置文件位置在ultralytics/cfg/datasets/
,如果這里一直報錯can't find file
,就直接寫絕對路徑
。
總結(jié)
因為項目還沒完成,主要精力在此項目中,過程寫的有點倉促,后面會慢慢優(yōu)化文章質(zhì)量,補全沒完成的部分。