Commit 0086bdff authored by chenzk's avatar chenzk
Browse files

v1.0.2

parent d3a4b4da
# Ultralytics YOLO 🚀, AGPL-3.0 license
import shutil
from pathlib import Path
TMP = Path(__file__).resolve().parent / "tmp" # temp directory for test files
def pytest_addoption(parser):
"""
Add custom command-line options to pytest.
Args:
parser (pytest.config.Parser): The pytest parser object.
"""
parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
def pytest_collection_modifyitems(config, items):
"""
Modify the list of test items to remove tests marked as slow if the --slow option is not provided.
Args:
config (pytest.config.Config): The pytest config object.
items (list): List of test items to be executed.
"""
if not config.getoption("--slow"):
# Remove the item entirely from the list of test items if it's marked as 'slow'
items[:] = [item for item in items if "slow" not in item.keywords]
def pytest_sessionstart(session):
"""
Initialize session configurations for pytest.
This function is automatically called by pytest after the 'Session' object has been created but before performing
test collection. It sets the initial seeds and prepares the temporary directory for the test session.
Args:
session (pytest.Session): The pytest session object.
"""
from ultralytics.utils.torch_utils import init_seeds
init_seeds()
shutil.rmtree(TMP, ignore_errors=True) # delete any existing tests/tmp directory
TMP.mkdir(parents=True, exist_ok=True) # create a new empty directory
def pytest_terminal_summary(terminalreporter, exitstatus, config):
"""
Cleanup operations after pytest session.
This function is automatically called by pytest at the end of the entire test session. It removes certain files
and directories used during testing.
Args:
terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object.
exitstatus (int): The exit status of the test run.
config (pytest.config.Config): The pytest config object.
"""
from ultralytics.utils import WEIGHTS_DIR
# Remove files
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models:
Path(file).unlink(missing_ok=True)
# Remove directories
models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
for directory in [TMP.parents[1] / ".pytest_cache", TMP] + models:
shutil.rmtree(directory, ignore_errors=True)
# Ultralytics YOLO 🚀, AGPL-3.0 license
import subprocess
import pytest
from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
CUDA_IS_AVAILABLE = checks.cuda_is_available()
CUDA_DEVICE_COUNT = checks.cuda_device_count()
TASK_ARGS = [
("detect", "yolov8n", "coco8.yaml"),
("segment", "yolov8n-seg", "coco8-seg.yaml"),
("classify", "yolov8n-cls", "imagenet10"),
("pose", "yolov8n-pose", "coco8-pose.yaml"),
("obb", "yolov8n-obb", "dota8.yaml"),
] # (task, model, data)
EXPORT_ARGS = [
("yolov8n", "torchscript"),
("yolov8n-seg", "torchscript"),
("yolov8n-cls", "torchscript"),
("yolov8n-pose", "torchscript"),
("yolov8n-obb", "torchscript"),
] # (model, format)
def run(cmd):
"""Execute a shell command using subprocess."""
subprocess.run(cmd.split(), check=True)
def test_special_modes():
"""Test various special command modes of YOLO."""
run("yolo help")
run("yolo checks")
run("yolo version")
run("yolo settings reset")
run("yolo cfg")
@pytest.mark.parametrize("task,model,data", TASK_ARGS)
def test_train(task, model, data):
"""Test YOLO training for a given task, model, and data."""
run(f"yolo train {task} model={model}.yaml data={data} imgsz=32 epochs=1 cache=disk")
@pytest.mark.parametrize("task,model,data", TASK_ARGS)
def test_val(task, model, data):
"""Test YOLO validation for a given task, model, and data."""
run(f"yolo val {task} model={WEIGHTS_DIR / model}.pt data={data} imgsz=32 save_txt save_json")
@pytest.mark.parametrize("task,model,data", TASK_ARGS)
def test_predict(task, model, data):
"""Test YOLO prediction on sample assets for a given task and model."""
run(f"yolo predict model={WEIGHTS_DIR / model}.pt source={ASSETS} imgsz=32 save save_crop save_txt")
@pytest.mark.parametrize("model,format", EXPORT_ARGS)
def test_export(model, format):
"""Test exporting a YOLO model to different formats."""
run(f"yolo export model={WEIGHTS_DIR / model}.pt format={format} imgsz=32")
def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
"""Test the RTDETR functionality with the Ultralytics framework."""
# Warning: MUST use imgsz=640
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk") # add coma, spaces to args
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
"""Test FastSAM segmentation functionality within Ultralytics."""
source = ASSETS / "bus.jpg"
run(f"yolo segment val {task} model={model} data={data} imgsz=32")
run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
from ultralytics import FastSAM
from ultralytics.models.fastsam import FastSAMPrompt
from ultralytics.models.sam import Predictor
# Create a FastSAM model
sam_model = FastSAM(model) # or FastSAM-x.pt
# Run inference on an image
everything_results = sam_model(source, device="cpu", retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
# Remove small regions
new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
# Everything prompt
prompt_process = FastSAMPrompt(source, everything_results, device="cpu")
ann = prompt_process.everything_prompt()
# Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
# Text prompt
ann = prompt_process.text_prompt(text="a photo of a dog")
# Point prompt
# Points default [[0,0]] [[x1,y1],[x2,y2]]
# Point_label default [0] [1,0] 0:background, 1:foreground
ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
prompt_process.plot(annotations=ann, output="./")
def test_mobilesam():
"""Test MobileSAM segmentation functionality using Ultralytics."""
from ultralytics import SAM
# Load the model
model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
# Source
source = ASSETS / "zidane.jpg"
# Predict a segment based on a point prompt
model.predict(source, points=[900, 370], labels=[1])
# Predict a segment based on a box prompt
model.predict(source, bboxes=[439, 437, 524, 709])
# Predict all
# model(source)
# Slow Tests -----------------------------------------------------------------------------------------------------------
@pytest.mark.slow
@pytest.mark.parametrize("task,model,data", TASK_ARGS)
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
def test_train_gpu(task, model, data):
"""Test YOLO training on GPU(s) for various tasks and models."""
run(f"yolo train {task} model={model}.yaml data={data} imgsz=32 epochs=1 device=0") # single GPU
run(f"yolo train {task} model={model}.pt data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
# Ultralytics YOLO 🚀, AGPL-3.0 license
import pytest
import torch
from ultralytics import YOLO
from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
CUDA_IS_AVAILABLE = checks.cuda_is_available()
CUDA_DEVICE_COUNT = checks.cuda_device_count()
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
DATA = "coco8.yaml"
BUS = ASSETS / "bus.jpg"
def test_checks():
"""Validate CUDA settings against torch CUDA functions."""
assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_train():
"""Test model training on a minimal dataset."""
device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=device) # requires imgsz>=64
@pytest.mark.slow
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_multiple_devices():
"""Validate model prediction on multiple devices."""
model = YOLO("yolov8n.pt")
model = model.cpu()
assert str(model.device) == "cpu"
_ = model(BUS) # CPU inference
assert str(model.device) == "cpu"
model = model.to("cuda:0")
assert str(model.device) == "cuda:0"
_ = model(BUS) # CUDA inference
assert str(model.device) == "cuda:0"
model = model.cpu()
assert str(model.device) == "cpu"
_ = model(BUS) # CPU inference
assert str(model.device) == "cpu"
model = model.cuda()
assert str(model.device) == "cuda:0"
_ = model(BUS) # CUDA inference
assert str(model.device) == "cuda:0"
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_autobatch():
"""Check batch size for YOLO model using autobatch."""
from ultralytics.utils.autobatch import check_train_batch_size
check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
@pytest.mark.slow
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_utils_benchmarks():
"""Profile YOLO models for performance benchmarks."""
from ultralytics.utils.benchmarks import ProfileModels
# Pre-export a dynamic engine model to use dynamic inference
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_sam():
"""Test SAM model prediction with various prompts."""
from ultralytics import SAM
from ultralytics.models.sam import Predictor as SAMPredictor
# Load a model
model = SAM(WEIGHTS_DIR / "sam_b.pt")
# Display model information (optional)
model.info()
# Run inference
model(BUS, device=0)
# Run inference with bboxes prompt
model(BUS, bboxes=[439, 437, 524, 709], device=0)
# Run inference with points prompt
model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
# Create SAMPredictor
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
predictor = SAMPredictor(overrides=overrides)
# Set image
predictor.set_image(ASSETS / "zidane.jpg") # set with image file
# predictor(bboxes=[439, 437, 524, 709])
# predictor(points=[900, 370], labels=[1])
# Reset image
predictor.reset_image()
# Ultralytics YOLO 🚀, AGPL-3.0 license
from ultralytics import YOLO
from ultralytics.cfg import get_cfg
from ultralytics.engine.exporter import Exporter
from ultralytics.models.yolo import classify, detect, segment
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
CFG_DET = "yolov8n.yaml"
CFG_SEG = "yolov8n-seg.yaml"
CFG_CLS = "yolov8n-cls.yaml" # or 'squeezenet1_0'
CFG = get_cfg(DEFAULT_CFG)
MODEL = WEIGHTS_DIR / "yolov8n"
def test_func(*args): # noqa
"""Test function callback."""
print("callback test passed")
def test_export():
"""Test model exporting functionality."""
exporter = Exporter()
exporter.add_callback("on_export_start", test_func)
assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
f = exporter(model=YOLO(CFG_DET).model)
YOLO(f)(ASSETS) # exported model inference
def test_detect():
"""Test object detection functionality."""
overrides = {"data": "coco8.yaml", "model": CFG_DET, "imgsz": 32, "epochs": 1, "save": False}
CFG.data = "coco8.yaml"
CFG.imgsz = 32
# Trainer
trainer = detect.DetectionTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = detect.DetectionValidator(args=CFG)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best) # validate best.pt
# Predictor
pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=f"{MODEL}.pt")
assert len(result), "predictor test failed"
overrides["resume"] = trainer.last
trainer = detect.DetectionTrainer(overrides=overrides)
try:
trainer.train()
except Exception as e:
print(f"Expected exception caught: {e}")
return
Exception("Resume test failed!")
def test_segment():
"""Test image segmentation functionality."""
overrides = {"data": "coco8-seg.yaml", "model": CFG_SEG, "imgsz": 32, "epochs": 1, "save": False}
CFG.data = "coco8-seg.yaml"
CFG.imgsz = 32
# YOLO(CFG_SEG).train(**overrides) # works
# Trainer
trainer = segment.SegmentationTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = segment.SegmentationValidator(args=CFG)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best) # validate best.pt
# Predictor
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=f"{MODEL}-seg.pt")
assert len(result), "predictor test failed"
# Test resume
overrides["resume"] = trainer.last
trainer = segment.SegmentationTrainer(overrides=overrides)
try:
trainer.train()
except Exception as e:
print(f"Expected exception caught: {e}")
return
Exception("Resume test failed!")
def test_classify():
"""Test image classification functionality."""
overrides = {"data": "imagenet10", "model": CFG_CLS, "imgsz": 32, "epochs": 1, "save": False}
CFG.data = "imagenet10"
CFG.imgsz = 32
# YOLO(CFG_SEG).train(**overrides) # works
# Trainer
trainer = classify.ClassificationTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = classify.ClassificationValidator(args=CFG)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best)
# Predictor
pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=trainer.best)
assert len(result), "predictor test failed"
# Ultralytics YOLO 🚀, AGPL-3.0 license
import PIL
import pytest
from ultralytics import Explorer
from ultralytics.utils import ASSETS
@pytest.mark.slow
def test_similarity():
"""Test similarity calculations and SQL queries for correctness and response length."""
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(idx=1)
assert len(similar) == 25
similar = exp.get_similar(img=ASSETS / "zidane.jpg")
assert len(similar) == 25
similar = exp.get_similar(idx=[1, 2], limit=10)
assert len(similar) == 10
sim_idx = exp.similarity_index()
assert len(sim_idx) > 0
sql = exp.sql_query("WHERE labels LIKE '%person%'")
assert len(sql) > 0
@pytest.mark.slow
def test_det():
"""Test detection functionalities and ensure the embedding table has bounding boxes."""
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["bboxes"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)
assert len(similar) > 0
# This is a loose test, just checks errors not correctness
similar = exp.plot_similar(idx=[1, 2], limit=10)
assert isinstance(similar, PIL.Image.Image)
@pytest.mark.slow
def test_seg():
"""Test segmentation functionalities and verify the embedding table includes masks."""
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["masks"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)
assert len(similar) > 0
similar = exp.plot_similar(idx=[1, 2], limit=10)
assert isinstance(similar, PIL.Image.Image)
@pytest.mark.slow
def test_pose():
"""Test pose estimation functionalities and check the embedding table for keypoints."""
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["keypoints"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)
assert len(similar) > 0
similar = exp.plot_similar(idx=[1, 2], limit=10)
assert isinstance(similar, PIL.Image.Image)
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
from pathlib import Path
import pytest
from ultralytics import YOLO, download
from ultralytics.utils import ASSETS, DATASETS_DIR, ROOT, SETTINGS, WEIGHTS_DIR
from ultralytics.utils.checks import check_requirements
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
CFG = "yolov8n.yaml"
SOURCE = ASSETS / "bus.jpg"
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
def test_model_ray_tune():
"""Tune YOLO model with Ray optimization library."""
YOLO("yolov8n-cls.yaml").tune(
use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
)
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow():
"""Test training with MLflow tracking enabled."""
SETTINGS["mlflow"] = True
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
@pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow_keep_run_active():
import os
import mlflow
"""Test training with MLflow tracking enabled."""
SETTINGS["mlflow"] = True
run_name = "Test Run"
os.environ["MLFLOW_RUN"] = run_name
# Test with MLFLOW_KEEP_RUN_ACTIVE=True
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.active_run().info.status
assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
run_id = mlflow.active_run().info.run_id
# Test with MLFLOW_KEEP_RUN_ACTIVE=False
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
# Test with MLFLOW_KEEP_RUN_ACTIVE not set
os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
@pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
def test_triton():
"""Test NVIDIA Triton Server functionalities."""
check_requirements("tritonclient[all]")
import subprocess
import time
from tritonclient.http import InferenceServerClient # noqa
# Create variables
model_name = "yolo"
triton_repo = TMP / "triton_repo" # Triton repo path
triton_model = triton_repo / model_name # Triton model path
# Export model to ONNX
f = YOLO(MODEL).export(format="onnx", dynamic=True)
# Prepare Triton repo
(triton_model / "1").mkdir(parents=True, exist_ok=True)
Path(f).rename(triton_model / "1" / "model.onnx")
(triton_model / "config.pbtxt").touch()
# Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
# Pull the image
subprocess.call(f"docker pull {tag}", shell=True)
# Run the Triton server and capture the container ID
container_id = (
subprocess.check_output(
f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
shell=True,
)
.decode("utf-8")
.strip()
)
# Wait for the Triton server to start
triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
# Wait until model is ready
for _ in range(10):
with contextlib.suppress(Exception):
assert triton_client.is_model_ready(model_name)
break
time.sleep(1)
# Check Triton inference
YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE) # exported model inference
# Kill and remove the container at the end of the test
subprocess.call(f"docker kill {container_id}", shell=True)
@pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
def test_pycocotools():
"""Validate model predictions using pycocotools."""
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.models.yolo.pose import PoseValidator
from ultralytics.models.yolo.segment import SegmentationValidator
# Download annotations after each dataset downloads first
url = "https://github.com/ultralytics/assets/releases/download/v8.1.0/"
args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
validator = DetectionValidator(args=args)
validator()
validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
_ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
validator = SegmentationValidator(args=args)
validator()
validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
_ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
validator = PoseValidator(args=args)
validator()
validator.is_coco = True
download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
_ = validator.eval_json(validator.stats)
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
from copy import copy
from pathlib import Path
import cv2
import numpy as np
import pytest
import torch
import yaml
from PIL import Image
from torchvision.transforms import ToTensor
from ultralytics import RTDETR, YOLO
from ultralytics.cfg import TASK2DATA
from ultralytics.data.build import load_inference_source
from ultralytics.utils import (
ASSETS,
DEFAULT_CFG,
DEFAULT_CFG_PATH,
LINUX,
MACOS,
ONLINE,
ROOT,
WEIGHTS_DIR,
WINDOWS,
Retry,
checks,
is_dir_writeable,
)
from ultralytics.utils.downloads import download
from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
CFG = "yolov8n.yaml"
SOURCE = ASSETS / "bus.jpg"
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
IS_TMP_WRITEABLE = is_dir_writeable(TMP)
def test_model_forward():
"""Test the forward pass of the YOLO model."""
model = YOLO(CFG)
model(source=None, imgsz=32, augment=True) # also test no source and augment
def test_model_methods():
"""Test various methods and properties of the YOLO model."""
model = YOLO(MODEL)
# Model methods
model.info(verbose=True, detailed=True)
model = model.reset_weights()
model = model.load(MODEL)
model.to("cpu")
model.fuse()
model.clear_callback("on_train_start")
model.reset_callbacks()
# Model properties
_ = model.names
_ = model.device
_ = model.transforms
_ = model.task_map
def test_model_profile():
"""Test profiling of the YOLO model with 'profile=True' argument."""
from ultralytics.nn.tasks import DetectionModel
model = DetectionModel() # build model
im = torch.randn(1, 3, 64, 64) # requires min imgsz=64
_ = model.predict(im, profile=True)
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
def test_predict_txt():
"""Test YOLO predictions with sources (file, dir, glob, recursive glob) specified in a text file."""
txt_file = TMP / "sources.txt"
with open(txt_file, "w") as f:
for x in [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]:
f.write(f"{x}\n")
_ = YOLO(MODEL)(source=txt_file, imgsz=32)
def test_predict_img():
"""Test YOLO prediction on various types of image sources."""
model = YOLO(MODEL)
seg_model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
cls_model = YOLO(WEIGHTS_DIR / "yolov8n-cls.pt")
pose_model = YOLO(WEIGHTS_DIR / "yolov8n-pose.pt")
obb_model = YOLO(WEIGHTS_DIR / "yolov8n-obb.pt")
im = cv2.imread(str(SOURCE))
assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1 # ndarray
assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2 # batch
assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2 # stream
assert len(model(torch.zeros(320, 640, 3).numpy(), imgsz=32)) == 1 # tensor to numpy
batch = [
str(SOURCE), # filename
Path(SOURCE), # Path
"https://ultralytics.com/images/zidane.jpg" if ONLINE else SOURCE, # URI
cv2.imread(str(SOURCE)), # OpenCV
Image.open(SOURCE), # PIL
np.zeros((320, 640, 3)),
] # numpy
assert len(model(batch, imgsz=32)) == len(batch) # multiple sources in a batch
# Test tensor inference
im = cv2.imread(str(SOURCE)) # OpenCV
t = cv2.resize(im, (32, 32))
t = ToTensor()(t)
t = torch.stack([t, t, t, t])
results = model(t, imgsz=32)
assert len(results) == t.shape[0]
results = seg_model(t, imgsz=32)
assert len(results) == t.shape[0]
results = cls_model(t, imgsz=32)
assert len(results) == t.shape[0]
results = pose_model(t, imgsz=32)
assert len(results) == t.shape[0]
results = obb_model(t, imgsz=32)
assert len(results) == t.shape[0]
def test_predict_grey_and_4ch():
"""Test YOLO prediction on SOURCE converted to greyscale and 4-channel images."""
im = Image.open(SOURCE)
directory = TMP / "im4"
directory.mkdir(parents=True, exist_ok=True)
source_greyscale = directory / "greyscale.jpg"
source_rgba = directory / "4ch.png"
source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
source_spaces = directory / "image with spaces.jpg"
im.convert("L").save(source_greyscale) # greyscale
im.convert("RGBA").save(source_rgba) # 4-ch PNG with alpha
im.save(source_non_utf) # non-UTF characters in filename
im.save(source_spaces) # spaces in filename
# Inference
model = YOLO(MODEL)
for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
for source in Image.open(f), cv2.imread(str(f)), f:
results = model(source, save=True, verbose=True, imgsz=32)
assert len(results) == 1 # verify that an image was run
f.unlink() # cleanup
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
@Retry(times=3, delay=10)
def test_youtube():
"""
Test YouTube inference.
Marked --slow to reduce YouTube API rate limits risk.
"""
model = YOLO(MODEL)
model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
def test_track_stream():
"""
Test streaming tracking (short 10 frame video) with non-default ByteTrack tracker.
Note imgsz=160 required for tracking for higher confidence and better matches
"""
video_url = "https://ultralytics.com/assets/decelera_portrait_min.mov"
model = YOLO(MODEL)
model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
# Test Global Motion Compensation (GMC) methods
for gmc in "orb", "sift", "ecc":
with open(ROOT / "cfg/trackers/botsort.yaml", encoding="utf-8") as f:
data = yaml.safe_load(f)
tracker = TMP / f"botsort-{gmc}.yaml"
data["gmc_method"] = gmc
with open(tracker, "w", encoding="utf-8") as f:
yaml.safe_dump(data, f)
model.track(video_url, imgsz=160, tracker=tracker)
def test_val():
"""Test the validation mode of the YOLO model."""
YOLO(MODEL).val(data="coco8.yaml", imgsz=32, save_hybrid=True)
def test_train_scratch():
"""Test training the YOLO model from scratch."""
model = YOLO(CFG)
model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
model(SOURCE)
def test_train_pretrained():
"""Test training the YOLO model from a pre-trained state."""
model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
model(SOURCE)
def test_export_torchscript():
"""Test exporting the YOLO model to TorchScript format."""
f = YOLO(MODEL).export(format="torchscript", optimize=False)
YOLO(f)(SOURCE) # exported model inference
def test_export_onnx():
"""Test exporting the YOLO model to ONNX format."""
f = YOLO(MODEL).export(format="onnx", dynamic=True)
YOLO(f)(SOURCE) # exported model inference
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
def test_export_openvino():
"""Test exporting the YOLO model to OpenVINO format."""
f = YOLO(MODEL).export(format="openvino")
YOLO(f)(SOURCE) # exported model inference
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
def test_export_coreml():
"""Test exporting the YOLO model to CoreML format."""
if not WINDOWS: # RuntimeError: BlobWriter not loaded with coremltools 7.0 on windows
if MACOS:
f = YOLO(MODEL).export(format="coreml")
YOLO(f)(SOURCE) # model prediction only supported on macOS for nms=False models
else:
YOLO(MODEL).export(format="coreml", nms=True)
def test_export_tflite(enabled=False):
"""
Test exporting the YOLO model to TFLite format.
Note TF suffers from install conflicts on Windows and macOS.
"""
if enabled and LINUX:
model = YOLO(MODEL)
f = model.export(format="tflite")
YOLO(f)(SOURCE)
def test_export_pb(enabled=False):
"""
Test exporting the YOLO model to *.pb format.
Note TF suffers from install conflicts on Windows and macOS.
"""
if enabled and LINUX:
model = YOLO(MODEL)
f = model.export(format="pb")
YOLO(f)(SOURCE)
def test_export_paddle(enabled=False):
"""
Test exporting the YOLO model to Paddle format.
Note Paddle protobuf requirements conflicting with onnx protobuf requirements.
"""
if enabled:
YOLO(MODEL).export(format="paddle")
@pytest.mark.slow
def test_export_ncnn():
"""Test exporting the YOLO model to NCNN format."""
f = YOLO(MODEL).export(format="ncnn")
YOLO(f)(SOURCE) # exported model inference
def test_all_model_yamls():
"""Test YOLO model creation for all available YAML configurations."""
for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
if "rtdetr" in m.name:
if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
_ = RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
else:
YOLO(m.name)
def test_workflow():
"""Test the complete workflow including training, validation, prediction, and exporting."""
model = YOLO(MODEL)
model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
model.val(imgsz=32)
model.predict(SOURCE, imgsz=32)
model.export(format="onnx") # export a model to ONNX format
def test_predict_callback_and_setup():
"""Test callback functionality during YOLO prediction."""
def on_predict_batch_end(predictor):
"""Callback function that handles operations at the end of a prediction batch."""
path, im0s, _ = predictor.batch
im0s = im0s if isinstance(im0s, list) else [im0s]
bs = [predictor.dataset.bs for _ in range(len(path))]
predictor.results = zip(predictor.results, im0s, bs) # results is List[batch_size]
model = YOLO(MODEL)
model.add_callback("on_predict_batch_end", on_predict_batch_end)
dataset = load_inference_source(source=SOURCE)
bs = dataset.bs # noqa access predictor properties
results = model.predict(dataset, stream=True, imgsz=160) # source already setup
for r, im0, bs in results:
print("test_callback", im0.shape)
print("test_callback", bs)
boxes = r.boxes # Boxes object for bbox outputs
print(boxes)
def test_results():
"""Test various result formats for the YOLO model."""
for m in "yolov8n-pose.pt", "yolov8n-seg.pt", "yolov8n.pt", "yolov8n-cls.pt":
results = YOLO(WEIGHTS_DIR / m)([SOURCE, SOURCE], imgsz=160)
for r in results:
r = r.cpu().numpy()
r = r.to(device="cpu", dtype=torch.float32)
r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
r.save_crop(save_dir=TMP / "runs/tests/crops/")
r.tojson(normalize=True)
r.plot(pil=True)
r.plot(conf=True, boxes=True)
print(r, len(r), r.path)
def test_labels_and_crops():
"""Test output from prediction args for saving detection labels and crops."""
imgs = [SOURCE, ASSETS / "zidane.jpg"]
results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
save_path = Path(results[0].save_dir)
for r in results:
im_name = Path(r.path).stem
cls_idxs = r.boxes.cls.int().tolist()
# Check label path
labels = save_path / f"labels/{im_name}.txt"
assert labels.exists()
# Check detections match label count
assert len(r.boxes.data) == len([l for l in labels.read_text().splitlines() if l])
# Check crops path and files
crop_dirs = [p for p in (save_path / "crops").iterdir()]
crop_files = [f for p in crop_dirs for f in p.glob("*")]
# Crop directories match detections
assert all([r.names.get(c) in [d.name for d in crop_dirs] for c in cls_idxs])
# Same number of crops as detections
assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_data_utils():
"""Test utility functions in ultralytics/data/utils.py."""
from ultralytics.data.utils import HUBDatasetStats, autosplit
from ultralytics.utils.downloads import zip_directory
# from ultralytics.utils.files import WorkingDirectory
# with WorkingDirectory(ROOT.parent / 'tests'):
for task in "detect", "segment", "pose", "classify":
file = Path(TASK2DATA[task]).with_suffix(".zip") # i.e. coco8.zip
download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
stats = HUBDatasetStats(TMP / file, task=task)
stats.get_json(save=True)
stats.process_images()
autosplit(TMP / "coco8")
zip_directory(TMP / "coco8/images/val") # zip
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_data_converter():
"""Test dataset converters."""
from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
file = "instances_val2017.json"
download(f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{file}", dir=TMP)
convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
coco80_to_coco91_class()
def test_data_annotator():
"""Test automatic data annotation."""
from ultralytics.data.annotator import auto_annotate
auto_annotate(
ASSETS,
det_model=WEIGHTS_DIR / "yolov8n.pt",
sam_model=WEIGHTS_DIR / "mobile_sam.pt",
output_dir=TMP / "auto_annotate_labels",
)
def test_events():
"""Test event sending functionality."""
from ultralytics.hub.utils import Events
events = Events()
events.enabled = True
cfg = copy(DEFAULT_CFG) # does not require deepcopy
cfg.mode = "test"
events(cfg)
def test_cfg_init():
"""Test configuration initialization utilities."""
from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
with contextlib.suppress(SyntaxError):
check_dict_alignment({"a": 1}, {"b": 2})
copy_default_cfg()
(Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
[smart_value(x) for x in ["none", "true", "false"]]
def test_utils_init():
"""Test initialization utilities."""
from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
get_ubuntu_version()
is_github_action_running()
get_git_origin_url()
get_git_branch()
def test_utils_checks():
"""Test various utility checks."""
checks.check_yolov5u_filename("yolov5n.pt")
checks.git_describe(ROOT)
checks.check_requirements() # check requirements.txt
checks.check_imgsz([600, 600], max_dim=1)
checks.check_imshow()
checks.check_version("ultralytics", "8.0.0")
checks.print_args()
# checks.check_imshow(warn=True)
def test_utils_benchmarks():
"""Test model benchmarking."""
from ultralytics.utils.benchmarks import ProfileModels
ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
def test_utils_torchutils():
"""Test Torch utility functions."""
from ultralytics.nn.modules.conv import Conv
from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
x = torch.randn(1, 64, 20, 20)
m = Conv(64, 64, k=1, s=2)
profile(x, [m], n=3)
get_flops_with_torch_profiler(m)
time_sync()
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_utils_downloads():
"""Test file download utilities."""
from ultralytics.utils.downloads import get_google_drive_file_info
get_google_drive_file_info("https://drive.google.com/file/d/1cqT-cJgANNrhIHCrEufUYhQ4RqiWG_lJ/view?usp=drive_link")
def test_utils_ops():
"""Test various operations utilities."""
from ultralytics.utils.ops import (
ltwh2xywh,
ltwh2xyxy,
make_divisible,
xywh2ltwh,
xywh2xyxy,
xywhn2xyxy,
xywhr2xyxyxyxy,
xyxy2ltwh,
xyxy2xywh,
xyxy2xywhn,
xyxyxyxy2xywhr,
)
make_divisible(17, torch.tensor([8]))
boxes = torch.rand(10, 4) # xywh
torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
boxes = torch.rand(10, 5) # xywhr for OBB
boxes[:, 4] = torch.randn(10) * 30
torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
def test_utils_files():
"""Test file handling utilities."""
from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
file_age(SOURCE)
file_date(SOURCE)
get_latest_run(ROOT / "runs")
path = TMP / "path/with spaces"
path.mkdir(parents=True, exist_ok=True)
with spaces_in_path(path) as new_path:
print(new_path)
@pytest.mark.slow
def test_utils_patches_torch_save():
"""Test torch_save backoff when _torch_save throws RuntimeError."""
from unittest.mock import patch, MagicMock
from ultralytics.utils.patches import torch_save
mock = MagicMock(side_effect=RuntimeError)
with patch("ultralytics.utils.patches._torch_save", new=mock):
with pytest.raises(RuntimeError):
torch_save(torch.zeros(1), TMP / "test.pt")
assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
def test_nn_modules_conv():
"""Test Convolutional Neural Network modules."""
from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
c1, c2 = 8, 16 # input and output channels
x = torch.zeros(4, c1, 10, 10) # BCHW
# Run all modules not otherwise covered in tests
DWConvTranspose2d(c1, c2)(x)
ConvTranspose(c1, c2)(x)
Focus(c1, c2)(x)
CBAM(c1)(x)
# Fuse ops
m = Conv2(c1, c2)
m.fuse_convs()
m(x)
def test_nn_modules_block():
"""Test Neural Network block modules."""
from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
c1, c2 = 8, 16 # input and output channels
x = torch.zeros(4, c1, 10, 10) # BCHW
# Run all modules not otherwise covered in tests
C1(c1, c2)(x)
C3x(c1, c2)(x)
C3TR(c1, c2)(x)
C3Ghost(c1, c2)(x)
BottleneckCSP(c1, c2)(x)
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_hub():
"""Test Ultralytics HUB functionalities."""
from ultralytics.hub import export_fmts_hub, logout
from ultralytics.hub.utils import smart_request
export_fmts_hub()
logout()
smart_request("GET", "https://github.com", progress=True)
@pytest.fixture
def image():
"""Loads an image from a predefined source using OpenCV."""
return cv2.imread(str(SOURCE))
@pytest.mark.parametrize(
"auto_augment, erasing, force_color_jitter",
[
(None, 0.0, False),
("randaugment", 0.5, True),
("augmix", 0.2, False),
("autoaugment", 0.0, True),
],
)
def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
"""Tests classification transforms during training with various augmentation settings."""
import torchvision.transforms as T
from ultralytics.data.augment import classify_augmentations
transform = classify_augmentations(
size=224,
mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5),
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
hflip=0.5,
vflip=0.5,
auto_augment=auto_augment,
hsv_h=0.015,
hsv_s=0.4,
hsv_v=0.4,
force_color_jitter=force_color_jitter,
erasing=erasing,
interpolation=T.InterpolationMode.BILINEAR,
)
transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
assert transformed_image.shape == (3, 224, 224)
assert torch.is_tensor(transformed_image)
assert transformed_image.dtype == torch.float32
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_model_tune():
"""Tune YOLO model for performance."""
YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
def test_model_embeddings():
"""Test YOLO model embeddings."""
model_detect = YOLO(MODEL)
model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
def test_yolo_world():
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
model.set_classes(["tree", "window"])
model(ASSETS / "bus.jpg", conf=0.01)
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Argoverse-HD dataset (ring-front-center camera) https://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
# Documentation: https://docs.ultralytics.com/datasets/detect/argoverse/
# Example usage: yolo train data=Argoverse.yaml
# parent
# ├── ultralytics
# └── datasets
# └── Argoverse ← downloads here (31.5 GB)
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: ../datasets/Argoverse # dataset root dir
train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
# Classes
names:
0: person
1: bicycle
2: car
3: motorcycle
4: bus
5: truck
6: traffic_light
7: stop_sign
# Download script/URL (optional) ---------------------------------------------------------------------------------------
download: |
import json
from tqdm import tqdm
from ultralytics.utils.downloads import download
from pathlib import Path
def argoverse2yolo(set):
labels = {}
a = json.load(open(set, "rb"))
for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
img_id = annot['image_id']
img_name = a['images'][img_id]['name']
img_label_name = f'{img_name[:-3]}txt'
cls = annot['category_id'] # instance class id
x_center, y_center, width, height = annot['bbox']
x_center = (x_center + width / 2) / 1920.0 # offset and scale
y_center = (y_center + height / 2) / 1200.0 # offset and scale
width /= 1920.0 # scale
height /= 1200.0 # scale
img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
if not img_dir.exists():
img_dir.mkdir(parents=True, exist_ok=True)
k = str(img_dir / img_label_name)
if k not in labels:
labels[k] = []
labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
for k in labels:
with open(k, "w") as f:
f.writelines(labels[k])
# Download 'https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip' (deprecated S3 link)
dir = Path(yaml['path']) # dataset root dir
urls = ['https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link']
print("\n\nWARNING: Argoverse dataset MUST be downloaded manually, autodownload will NOT work.")
print(f"WARNING: Manually download Argoverse dataset '{urls[0]}' to '{dir}' and re-run your command.\n\n")
# download(urls, dir=dir)
# Convert
annotations_dir = 'Argoverse-HD/annotations/'
(dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
for d in "train.json", "val.json":
argoverse2yolo(dir / annotations_dir / d) # convert Argoverse annotations to YOLO labels
# Ultralytics YOLO 🚀, AGPL-3.0 license
# DOTA 1.5 dataset https://captain-whu.github.io/DOTA/index.html for object detection in aerial images by Wuhan University
# Documentation: https://docs.ultralytics.com/datasets/obb/dota-v2/
# Example usage: yolo train model=yolov8n-obb.pt data=DOTAv1.5.yaml
# parent
# ├── ultralytics
# └── datasets
# └── dota1.5 ← downloads here (2GB)
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
path: ../datasets/DOTAv1.5 # dataset root dir
train: images/train # train images (relative to 'path') 1411 images
val: images/val # val images (relative to 'path') 458 images
test: images/test # test images (optional) 937 images
# Classes for DOTA 1.5
names:
0: plane
1: ship
2: storage tank
3: baseball diamond
4: tennis court
5: basketball court
6: ground track field
7: harbor
8: bridge
9: large vehicle
10: small vehicle
11: helicopter
12: roundabout
13: soccer ball field
14: swimming pool
15: container crane
# Download script/URL (optional)
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/DOTAv1.5.zip
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment