Labelme标注的数据集转VOC2007格式的数据集。

428 阅读2分钟

VOC2007数据文件夹说明

1)JPEGImages文件夹

文件夹里包含了训练图片和测试图片,混放在一起

2)Annatations文件夹

文件夹存放的是xml格式的标签文件,每个xml文件都对应于JPEGImages文件夹的一张图片

3)ImageSets文件夹

Main存放的是图像物体识别的数据,Main里面有test.txt, train.txt, val.txt,trainval.txt.这四个文件我们后面会生成

XML说明

<?xml version="1.0" encoding="utf-8"?>

<annotation>

         <source>

                   

                   <annotation>Lmars RSDS2016</annotation>

                   <flickrid>0</flickrid>

                   <database>Lmars Detection Dataset of RS</database>

         </source>

         <object>

<!--bounding box的四个坐标,分别为左上角和右下角的x,y坐标-->

                   <bndbox>

                            <xmin>690</xmin>

                            <ymin>618</ymin>

                            <ymax>678</ymax>

                            <xmax>748</xmax>

                   </bndbox>

<!--是否容易被识别,0表示容易,1表示困难-->

                   <difficult>0</difficult>

                   <pose>Left</pose>

<!--物体类别-->

                   <name>aircraft</name>

<!--是否被裁剪,0表示完整,1表示不完整-->

                   <truncated>1</truncated>

         </object>

         <filename>aircraft_773.jpg</filename>

         <!--是否用于分割,0表示用于,1表示不用于-->

         <segmented>0</segmented>

<!--图片所有者-->

         <owner>

                   <name>Lmars, Wuhan University</name>

                   <flickrid>I do not know</flickrid>

         </owner>

         <folder>RSDS2016</folder>

         <size>

                   <width>1044</width>

                   <depth>3</depth>

                   <height>915</height>

         </size>

</annotation>

完整代码:

import os

from typing import List, Any



import numpy as np

import codecs

import json

from glob import glob

import cv2

import shutil

from sklearn.model_selection import train_test_split

# 1.标签路径

labelme_path = "LabelmeData/"  # 原始labelme标注数据路径

saved_path = "VOC2007/"  # 保存路径

isUseTest=True#是否创建test集

# 2.创建要求文件夹

if not os.path.exists(saved_path + "Annotations"):

    os.makedirs(saved_path + "Annotations")

if not os.path.exists(saved_path + "JPEGImages/"):

    os.makedirs(saved_path + "JPEGImages/")

if not os.path.exists(saved_path + "ImageSets/Main/"):

    os.makedirs(saved_path + "ImageSets/Main/")

# 3.获取待处理文件

files = glob(labelme_path + "*.json")

files = [i.replace("\\","/").split("/")[-1].split(".json")[0] for i in files]

print(files)

# 4.读取标注信息并写入 xml

for json_file_ in files:

    json_filename = labelme_path + json_file_ + ".json"

    json_file = json.load(open(json_filename, "r", encoding="utf-8"))

    height, width, channels = cv2.imread(labelme_path + json_file_ + ".jpg").shape

    with codecs.open(saved_path + "Annotations/" + json_file_ + ".xml", "w", "utf-8") as xml:

        xml.write('<annotation>\n')

        xml.write('\t<folder>' + 'WH_data' + '</folder>\n')

        xml.write('\t<filename>' + json_file_ + ".jpg" + '</filename>\n')

        xml.write('\t<source>\n')

        xml.write('\t\t<database>WH Data</database>\n')

        xml.write('\t\t<annotation>WH</annotation>\n')

        xml.write('\t\t\n')

        xml.write('\t\t<flickrid>NULL</flickrid>\n')

        xml.write('\t</source>\n')

        xml.write('\t<owner>\n')

        xml.write('\t\t<flickrid>NULL</flickrid>\n')

        xml.write('\t\t<name>WH</name>\n')

        xml.write('\t</owner>\n')

        xml.write('\t<size>\n')

        xml.write('\t\t<width>' + str(width) + '</width>\n')

        xml.write('\t\t<height>' + str(height) + '</height>\n')

        xml.write('\t\t<depth>' + str(channels) + '</depth>\n')

        xml.write('\t</size>\n')

        xml.write('\t\t<segmented>0</segmented>\n')

        for multi in json_file["shapes"]:

            points = np.array(multi["points"])

            labelName=multi["label"]

            xmin = min(points[:, 0])

            xmax = max(points[:, 0])

            ymin = min(points[:, 1])

            ymax = max(points[:, 1])

            label = multi["label"]

            if xmax <= xmin:

                pass

            elif ymax <= ymin:

                pass

            else:

                xml.write('\t<object>\n')

                xml.write('\t\t<name>' + labelName+ '</name>\n')

                xml.write('\t\t<pose>Unspecified</pose>\n')

                xml.write('\t\t<truncated>1</truncated>\n')

                xml.write('\t\t<difficult>0</difficult>\n')

                xml.write('\t\t<bndbox>\n')

                xml.write('\t\t\t<xmin>' + str(int(xmin)) + '</xmin>\n')

                xml.write('\t\t\t<ymin>' + str(int(ymin)) + '</ymin>\n')

                xml.write('\t\t\t<xmax>' + str(int(xmax)) + '</xmax>\n')

                xml.write('\t\t\t<ymax>' + str(int(ymax)) + '</ymax>\n')

                xml.write('\t\t</bndbox>\n')

                xml.write('\t</object>\n')

                print(json_filename, xmin, ymin, xmax, ymax, label)

        xml.write('</annotation>')

# 5.复制图片到 VOC2007/JPEGImages/下

image_files = glob(labelme_path + "*.jpg")

print("copy image files to VOC007/JPEGImages/")

for image in image_files:

    shutil.copy(image, saved_path + "JPEGImages/")

# 6.split files for txt

txtsavepath = saved_path + "ImageSets/Main/"

ftrainval = open(txtsavepath + '/trainval.txt', 'w')

ftest = open(txtsavepath + '/test.txt', 'w')

ftrain = open(txtsavepath + '/train.txt', 'w')

fval = open(txtsavepath + '/val.txt', 'w')

total_files = glob("./VOC2007/Annotations/*.xml")

total_files = [i.replace("\\","/").split("/")[-1].split(".xml")[0] for i in total_files]

trainval_files=[]

test_files=[]

if isUseTest:

    trainval_files, test_files = train_test_split(total_files, test_size=0.15, random_state=55)

else:

    trainval_files=total_files

for file in trainval_files:

    ftrainval.write(file + "\n")

# split

train_files, val_files = train_test_split(trainval_files, test_size=0.15, random_state=55)

# train

for file in train_files:

    ftrain.write(file + "\n")

# val

for file in val_files:

    fval.write(file + "\n")

for file in test_files:

    print(file)

    ftest.write(file + "\n")

ftrainval.close()

ftrain.close()

fval.close()

ftest.close()

注:训练集和验证集的划分方法是采用 sklearn.model_selection.train_test_split 进行分割的。