yolov8-detector

YOLOv8 Detector with ONNX/TensorRT Acceleration

arm64
ONNXTensorRTComputer VisionYOLO
Overview
Primary hardware
NVIDIA Orin/Jetson, RK3588 (arm64)
What it does

Loads Ultralytics YOLOv8 ONNX weights, runs GPU-accelerated inference, publishes detections via REST or ROS 2 topic.

Why it saves time

Standard model every robotics dev asks for — tuned builds for Jetson & RK3588, no dependency pain.

Get access

Use StreamDeploy to manage OTA updates, versioned configs, and rollbacks across fleets.

Request access
Dockerfile
ARG BASE_IMAGE
# Jetson-friendly by default; works on generic arm64 with CPU fallback
FROM ${BASE_IMAGE:-"nvcr.io/nvidia/l4t-pytorch:r35.4.1-py3"}

ENV DEBIAN_FRONTEND=noninteractive LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
RUN apt-get update && apt-get install -y --no-install-recommends \
    python3-pip python3-dev build-essential curl ca-certificates \
    && rm -rf /var/lib/apt/lists/*

RUN pip3 install --no-cache-dir \
    fastapi==0.111.0 uvicorn[standard]==0.30.0 pillow==10.3.0 numpy==1.26.4 \
    onnx==1.16.0 onnxruntime-gpu==1.18.0 onnxruntime==1.18.0

# Optional ROS 2 mode
ENV ROS_DISTRO=humble
RUN apt-get update && apt-get install -y --no-install-recommends \
    gnupg lsb-release && \
    mkdir -p /etc/apt/keyrings && \
    curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc \
    | gpg --dearmor -o /etc/apt/keyrings/ros-archive-keyring.gpg && \
    echo "deb [arch=arm64,amd64 signed-by=/etc/apt/keyrings/ros-archive-keyring.gpg] \
    http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" \
    > /etc/apt/sources.list.d/ros2.list && \
    apt-get update && apt-get install -y --no-install-recommends \
      ros-${ROS_DISTRO}-ros-base ros-${ROS_DISTRO}-std-msgs \
    && rm -rf /var/lib/apt/lists/*

WORKDIR /app
COPY entrypoint.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh

ENV MODEL_PATH=/models/yolov8.onnx \
    MODE=rest \
    SERVICE_HOST=0.0.0.0 SERVICE_PORT=8080 \
    ROS_TOPIC=/detections

EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=5s --start-period=20s \
  CMD bash -lc '[ "$MODE" = "rest" ] && curl -fsS http://localhost:8080/health || exit 0'

ENTRYPOINT ["/app/entrypoint.sh"]
entrypoint.sh
#!/usr/bin/env bash
set -euo pipefail

if [[ "${MODE}" == "ros2" ]]; then
  source /opt/ros/${ROS_DISTRO}/setup.bash
  python3 - << 'PY'
import os, time, onnxruntime as ort, numpy as np
import rclpy
from rclpy.node import Node
from std_msgs.msg import String

model = os.getenv("MODEL_PATH","/models/yolov8.onnx")
session = ort.InferenceSession(model, providers=["CUDAExecutionProvider","CPUExecutionProvider"])

class Pub(Node):
    def __init__(self):
        super().__init__('yolo_pub')
        self.pub = self.create_publisher(String, os.getenv("ROS_TOPIC","/detections"), 10)
        self.timer = self.create_timer(1.0, self.tick)
    def tick(self):
        # Dummy payload; wire your camera/input
        out = {"boxes":[[0,0,10,10]],"classes":[0],"scores":[0.99]}
        msg = String()
        msg.data = str(out)
        self.pub.publish(msg)

rclpy.init(); node = Pub(); rclpy.spin(node)
PY
else
  cat > /app/server.py << 'PY'
import os, io
from fastapi import FastAPI, UploadFile, File
from fastapi.responses import JSONResponse
from PIL import Image
import numpy as np, onnxruntime as ort

app = FastAPI()
model_path = os.getenv("MODEL_PATH","/models/yolov8.onnx")
session = ort.InferenceSession(model_path, providers=["CUDAExecutionProvider","CPUExecutionProvider"])

@app.get("/health")
def health(): return {"ok": True, "model": os.path.basename(model_path), "providers": session.get_providers()}

@app.post("/infer")
async def infer(file: UploadFile = File(...)):
    img = Image.open(io.BytesIO(await file.read())).convert("RGB").resize((640,640))
    x = np.asarray(img).astype(np.float32)/255.0
    x = np.transpose(x, (2,0,1))[None, ...]
    outputs = session.run(None, {session.get_inputs()[0].name: x})
    return JSONResponse({"outputs":[o.tolist() for o in outputs]})
PY
  exec python3 -m uvicorn server:app --host "${SERVICE_HOST}" --port "${SERVICE_PORT}"
fi