1.先创建 anaconda 虚拟环境 labelme,对应更改自己的python版本,我的是3.6.

①在Anaconda Prompt(虚拟环境对应的命令窗)中创建新的虚拟环境,命令如下:

conda create -n labelme python=3.6

②创建完成后,激活虚拟环境

conda activate labelme

③安装labelme 正常运转需要各种依赖的包,先下载pyqt和pillow

conda install pyqtconda install pillow

均yes操作

④安装labelme

pip install labelme

至此,使用labelme的前期工作已经做完,接下来开始labelme的使用。

2.labelme的使用,进入桌面的anaconda prompt(假设你已经安装好了anaconda),激活labelme环境(在环境中,无需再次进入).

进入环境

activate labelme

打开labelme

labelme

3.打开labelme界面后,开始创作自己的数据集(可以是目标检测,也可以是实例分割,按照自己需求来)

如下图所示,是labelme的界面

Open是打开某一张图片,Open Dir是打开存放你要打标的图片文件夹。
然后点击左侧Create Polygans,开始标点把目标圈起来。如下图所示。

圈起来之后,围成一个圈命名该标签为你的类名。例如下图中,红色命名为Belt,绿色命名为Shadow。

然后点击save保存,切换下一张图片继续打标,直至全部图片打标完成。

到保存的路径,里面包含原图和json文件,如下图.

4.数据集转换前的准备

在你的instance_segmentation目录下包含4个文件,data_annotated里面存放你的原图和对应的json文件,labelme2coco.py是转为coco数据集的代码,labelme2voc.py是转为voc数据集的代码,labels.txt前两行不变,往下每一行写上你打标的类别,如下图所示。

在此,如果自己没有labelme转换函数,请将下列代码写成对应名称的.py文件

labelme2coco.py

#!/usr/bin/env pythonimport argparseimport collectionsimport datetimeimport globimport jsonimport osimport os.path as ospimport sysimport uuidimport imgvizimport numpy as npimport labelmetry:import pycocotools.maskexcept ImportError:print("Please install pycocotools:\n\npip install pycocotools\n")sys.exit(1)def main():parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)parser.add_argument("input_dir", help="input annotated directory")parser.add_argument("output_dir", help="output dataset directory")parser.add_argument("--labels", help="labels file", required=True)parser.add_argument("--noviz", help="no visualization", action="store_true")args = parser.parse_args()if osp.exists(args.output_dir):print("Output directory already exists:", args.output_dir)sys.exit(1)os.makedirs(args.output_dir)os.makedirs(osp.join(args.output_dir, "JPEGImages"))if not args.noviz:os.makedirs(osp.join(args.output_dir, "Visualization"))print("Creating dataset:", args.output_dir)now = datetime.datetime.now()data = dict(info=dict(description=None,url=None,version=None,year=now.year,contributor=None,date_created=now.strftime("%Y-%m-%d %H:%M:%S.%f"),),licenses=[dict(url=None, id=0, name=None,)],images=[# license, url, file_name, height, width, date_captured, id],type="instances",annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id],categories=[# supercategory, id, name],)class_name_to_id = {}for i, line in enumerate(open(args.labels).readlines()):class_id = i - 1# starts with -1class_name = line.strip()if class_id == -1:assert class_name == "__ignore__"continueclass_name_to_id[class_name] = class_iddata["categories"].append(dict(supercategory=None, id=class_id, name=class_name,))out_ann_file = osp.join(args.output_dir, "annotations.json")label_files = glob.glob(osp.join(args.input_dir, "*.json"))for image_id, filename in enumerate(label_files):print("Generating dataset from:", filename)label_file = labelme.LabelFile(filename=filename)base = osp.splitext(osp.basename(filename))[0]out_img_file = osp.join(args.output_dir, "JPEGImages", base + ".jpg")img = labelme.utils.img_data_to_arr(label_file.imageData)imgviz.io.imsave(out_img_file, img)data["images"].append(dict(license=0,url=None,file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),height=img.shape[0],width=img.shape[1],date_captured=None,id=image_id,))masks = {}# for areasegmentations = collections.defaultdict(list)# for segmentationfor shape in label_file.shapes:points = shape["points"]label = shape["label"]group_id = shape.get("group_id")shape_type = shape.get("shape_type", "polygon")mask = labelme.utils.shape_to_mask(img.shape[:2], points, shape_type)if group_id is None:group_id = uuid.uuid1()instance = (label, group_id)if instance in masks:masks[instance] = masks[instance] | maskelse:masks[instance] = maskif shape_type == "rectangle":(x1, y1), (x2, y2) = pointsx1, x2 = sorted([x1, x2])y1, y2 = sorted([y1, y2])points = [x1, y1, x2, y1, x2, y2, x1, y2]if shape_type == "circle":(x1, y1), (x2, y2) = pointsr = np.linalg.norm([x2 - x1, y2 - y1])# r(1-cos(a/2)) N>pi/arccos(1-x/r)# x: tolerance of the gap between the arc and the line segmentn_points_circle = max(int(np.pi / np.arccos(1 - 1 / r)), 12)i = np.arange(n_points_circle)x = x1 + r * np.sin(2 * np.pi / n_points_circle * i)y = y1 + r * np.cos(2 * np.pi / n_points_circle * i)points = np.stack((x, y), axis=1).flatten().tolist()else:points = np.asarray(points).flatten().tolist()segmentations[instance].append(points)segmentations = dict(segmentations)for instance, mask in masks.items():cls_name, group_id = instanceif cls_name not in class_name_to_id:continuecls_id = class_name_to_id[cls_name]mask = np.asfortranarray(mask.astype(np.uint8))mask = pycocotools.mask.encode(mask)area = float(pycocotools.mask.area(mask))bbox = pycocotools.mask.toBbox(mask).flatten().tolist()data["annotations"].append(dict(id=len(data["annotations"]),image_id=image_id,category_id=cls_id,segmentation=segmentations[instance],area=area,bbox=bbox,iscrowd=0,))if not args.noviz:viz = imgif masks:labels, captions, masks = zip(*[(class_name_to_id[cnm], cnm, msk)for (cnm, gid), msk in masks.items()if cnm in class_name_to_id])viz = imgviz.instances2rgb(image=img,labels=labels,masks=masks,captions=captions,font_size=15,line_width=2,)out_viz_file = osp.join(args.output_dir, "Visualization", base + ".jpg")imgviz.io.imsave(out_viz_file, viz)with open(out_ann_file, "w") as f:json.dump(data, f)if __name__ == "__main__":main()

labelme2voc.py

#!/usr/bin/env pythonfrom __future__ import print_functionimport argparseimport globimport osimport os.path as ospimport sysimport imgvizimport numpy as npimport labelmedef main():parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)parser.add_argument("input_dir", help="input annotated directory")parser.add_argument("output_dir",help="output dataset directory")parser.add_argument("--labels", help="labels file", required=True)parser.add_argument("--noviz", help="no visualization", action="store_true")args = parser.parse_args()if osp.exists(args.output_dir):print("Output directory already exists:", args.output_dir)sys.exit(1)os.makedirs(args.output_dir)os.makedirs(osp.join(args.output_dir, "JPEGImages"))os.makedirs(osp.join(args.output_dir, "SegmentationClass"))os.makedirs(osp.join(args.output_dir, "SegmentationClassPNG"))if not args.noviz:os.makedirs(osp.join(args.output_dir, "SegmentationClassVisualization"))os.makedirs(osp.join(args.output_dir, "SegmentationObject"))os.makedirs(osp.join(args.output_dir, "SegmentationObjectPNG"))if not args.noviz:os.makedirs(osp.join(args.output_dir, "SegmentationObjectVisualization"))print("Creating dataset:", args.output_dir)class_names = []class_name_to_id = {}for i, line in enumerate(open(args.labels).readlines()):class_id = i - 1# starts with -1class_name = line.strip()class_name_to_id[class_name] = class_idif class_id == -1:assert class_name == "__ignore__"continueelif class_id == 0:assert class_name == "_background_"class_names.append(class_name)class_names = tuple(class_names)print("class_names:", class_names)out_class_names_file = osp.join(args.output_dir, "class_names.txt")with open(out_class_names_file, "w") as f:f.writelines("\n".join(class_names))print("Saved class_names:", out_class_names_file)for filename in glob.glob(osp.join(args.input_dir, "*.json")):print("Generating dataset from:", filename)label_file = labelme.LabelFile(filename=filename)base = osp.splitext(osp.basename(filename))[0]out_img_file = osp.join(args.output_dir, "JPEGImages", base + ".jpg")out_cls_file = osp.join(args.output_dir, "SegmentationClass", base + ".npy")out_clsp_file = osp.join(args.output_dir, "SegmentationClassPNG", base + ".png")if not args.noviz:out_clsv_file = osp.join(args.output_dir,"SegmentationClassVisualization",base + ".jpg",)out_ins_file = osp.join(args.output_dir, "SegmentationObject", base + ".npy")out_insp_file = osp.join(args.output_dir, "SegmentationObjectPNG", base + ".png")if not args.noviz:out_insv_file = osp.join(args.output_dir,"SegmentationObjectVisualization",base + ".jpg",)img = labelme.utils.img_data_to_arr(label_file.imageData)imgviz.io.imsave(out_img_file, img)cls, ins = labelme.utils.shapes_to_label(img_shape=img.shape,shapes=label_file.shapes,label_name_to_value=class_name_to_id,)ins[cls == -1] = 0# ignore it.# class labellabelme.utils.lblsave(out_clsp_file, cls)np.save(out_cls_file, cls)if not args.noviz:clsv = imgviz.label2rgb(cls,imgviz.rgb2gray(img),label_names=class_names,font_size=15,loc="rb",)imgviz.io.imsave(out_clsv_file, clsv)# instance labellabelme.utils.lblsave(out_insp_file, ins)np.save(out_ins_file, ins)if not args.noviz:instance_ids = np.unique(ins)instance_names = [str(i) for i in range(max(instance_ids) + 1)]insv = imgviz.label2rgb(ins,imgviz.rgb2gray(img),label_names=instance_names,font_size=15,loc="rb",)imgviz.io.imsave(out_insv_file, insv)if __name__ == "__main__":main()

5.voc数据集的转换

在你包含data_annotatedlabelme2coco.pylabelme2voc.pylabels.txt四个文件的文件夹根目录下,执行以下命令:

python labelme2voc.py data_annotated data_dataset_voc --labels labels.txt

运行结果如下,转换成功。

6.coco数据集的转换

在你包含data_annotatedlabelme2coco.pylabelme2voc.pylabels.txt四个文件的文件夹根目录下,执行以下命令:

python labelme2coco.py data_annotated data_dataset_coco --labels labels.txt 

运行结果如下,转换成功。

7.转换完成

到你存放数据集的位置,已经生成data_dataset_coco和data_dataset_voc文件,就是对应的coco和voc数据集。

以下为转换后的实例分割效果图