Skip to content
Open
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

![Discord](https://img.shields.io/discord/662098530411741184.svg?logo=discord&colorB=7289DA)

Donkeycar is minimalist and modular self driving library for Python. It is developed for hobbyists and students with a focus on allowing fast experimentation and easy community contributions. It is being actively used at the high school and university level for learning and research. It offers a [rich graphical interface](https://docs.donkeycar.com/utility/ui/) and includes a [simulator](https://docs.donkeycar.com/guide/deep_learning/simulator/) so you can experiment with self-driving even before you build a robot.
Donkeycar is a minimalist and modular self driving library for Python. It is developed for hobbyists and students with a focus on allowing fast experimentation and easy community contributions. It is being actively used at the high school and university level for learning and research. It offers a [rich graphical interface](https://docs.donkeycar.com/utility/ui/) and includes a [simulator](https://docs.donkeycar.com/guide/deep_learning/simulator/) so you can experiment with self-driving even before you build a robot.

#### Quick Links
* [Donkeycar Updates & Examples](http://donkeycar.com)
Expand Down
2 changes: 1 addition & 1 deletion donkeycar/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from pyfiglet import Figlet
import logging

__version__ = '5.2.dev2'
__version__ = '5.2.dev6'

logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO').upper())

Expand Down
2 changes: 1 addition & 1 deletion donkeycar/parts/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(self, image_w=160, image_h=120, image_d=3,
self.camera = Picamera2()
config = self.camera.create_preview_configuration(
config_dict, transform=transform)
self.camera.align_configuration(config)
# self.camera.align_configuration(config) # this created issues with the libcamera2 library on the Pi5, which automatically changes the resolution to 128x120 in an attempt to align with native sensor resolution
Copy link

Copilot AI Feb 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The comment has a spelling error: 'libcamera2' should likely be 'libcamera' (without the '2'). The Picamera2 library uses libcamera (not libcamera2) as its underlying camera system.

Suggested change
# self.camera.align_configuration(config) # this created issues with the libcamera2 library on the Pi5, which automatically changes the resolution to 128x120 in an attempt to align with native sensor resolution
# self.camera.align_configuration(config) # this created issues with the libcamera library on the Pi5, which automatically changes the resolution to 128x120 in an attempt to align with native sensor resolution

Copilot uses AI. Check for mistakes.
self.camera.configure(config)
# try min / max frame rate as 0.1 / 1 ms (it will be slower though)
self.camera.set_controls({"FrameDurationLimits": (100, 1000)})
Expand Down
59 changes: 48 additions & 11 deletions donkeycar/parts/interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,38 @@
import numpy as np
from typing import Union, Sequence, List

import tensorflow as tf
from tensorflow import keras
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.compiler.tensorrt import trt_convert as trt
try:
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.compiler.tensorrt import trt_convert as trt
except ImportError:
tf = None
keras = None
tag_constants = None
signature_constants = None
trt = None

logger = logging.getLogger(__name__)


def get_tflite_interpreter():
"""Get TFLite Interpreter from tflite-runtime or full TensorFlow."""
try:
from tflite_runtime.interpreter import Interpreter
return Interpreter
except ImportError:
pass
try:
from ai_edge_litert.interpreter import Interpreter
return Interpreter
except ImportError:
pass
if tf is not None:
return tf.lite.Interpreter
raise ImportError("No TFLite runtime found. Install tflite-runtime or tensorflow.")


Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TensorRT support check crashes without TensorFlow

Medium Severity

When TensorFlow imports fail, trt is set to None, but has_trt_support() still calls trt.TrtGraphConverterV2(). The function only catches RuntimeError, so it raises AttributeError instead of returning False.

Additional Locations (1)

Fix in Cursor Fix in Web

def has_trt_support():
try:
converter = trt.TrtGraphConverterV2()
Expand Down Expand Up @@ -91,14 +115,14 @@ def set_model(self, pilot: 'KerasPilot') -> None:
""" Some interpreters will need the model"""
pass

def set_optimizer(self, optimizer: tf.keras.optimizers.Optimizer) -> None:
def set_optimizer(self, optimizer) -> None:
pass

def compile(self, **kwargs):
raise NotImplementedError('Requires implementation')

@abstractmethod
def get_input_shape(self, input_name) -> tf.TensorShape:
def get_input_shape(self, input_name):
pass

def predict(self, img_arr: np.ndarray, *other_arr: np.ndarray) \
Expand Down Expand Up @@ -127,7 +151,7 @@ class KerasInterpreter(Interpreter):

def __init__(self):
super().__init__()
self.model: tf.keras.Model = None
self.model = None

def set_model(self, pilot: 'KerasPilot') -> None:
self.model = pilot.create_model()
Expand All @@ -146,10 +170,10 @@ def set_model(self, pilot: 'KerasPilot') -> None:
self.shapes = (dict(zip(self.input_keys, input_shape)),
dict(zip(self.output_keys, output_shape)))

def set_optimizer(self, optimizer: tf.keras.optimizers.Optimizer) -> None:
def set_optimizer(self, optimizer) -> None:
self.model.optimizer = optimizer

def get_input_shape(self, input_name) -> tf.TensorShape:
def get_input_shape(self, input_name):
assert self.model, 'Model not set'
return self.shapes[0][input_name]

Expand All @@ -174,6 +198,18 @@ def predict_from_dict(self, input_dict):
def load(self, model_path: str) -> None:
logger.info(f'Loading model {model_path}')
self.model = keras.models.load_model(model_path, compile=False)
# Set input_keys and output_keys after loading (same as set_model)
input_shape = self.model.input_shape
if type(input_shape) is not list:
input_shape = [input_shape]
output_shape = self.model.output_shape
if type(output_shape) is not list:
output_shape = [output_shape]

self.input_keys = self.model.input_names
self.output_keys = self.model.output_names
self.shapes = (dict(zip(self.input_keys, input_shape)),
dict(zip(self.output_keys, output_shape)))

def load_weights(self, model_path: str, by_name: bool = True) -> \
None:
Expand Down Expand Up @@ -263,7 +299,8 @@ def load(self, model_path):
'TFlitePilot should load only .tflite files'
logger.info(f'Loading model {model_path}')
# Load TFLite model and extract input and output keys
self.interpreter = tf.lite.Interpreter(model_path=model_path)
Interpreter = get_tflite_interpreter()
self.interpreter = Interpreter(model_path=model_path)
self.signatures = self.interpreter.get_signature_list()
self.runner = self.interpreter.get_signature_runner()
self.input_keys = self.signatures['serving_default']['inputs']
Expand Down Expand Up @@ -312,7 +349,7 @@ def set_model(self, pilot: 'KerasPilot') -> None:
# state as the trt model hasn't been loaded yet
self.pilot = pilot

def get_input_shape(self, input_name) -> tf.TensorShape:
def get_input_shape(self, input_name):
assert self.graph_func, "Requires loadin the tensorrt model first"
return self.graph_func.structured_input_signature[1][input_name].shape

Expand Down
39 changes: 21 additions & 18 deletions donkeycar/parts/keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,27 @@
from typing import Dict, Tuple, Optional, Union, List, Sequence, Callable, Any
from logging import getLogger

from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2

import donkeycar as dk
from donkeycar.utils import normalize_image, linear_bin
from donkeycar.pipeline.types import TubRecord
from donkeycar.parts.interpreter import Interpreter, KerasInterpreter

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import (Dense, Input,Convolution2D,
MaxPooling2D, Activation, Dropout, Flatten, LSTM, BatchNormalization,
Conv3D, MaxPooling3D, Conv2DTranspose)

from tensorflow.keras.layers import TimeDistributed as TD
from tensorflow.keras.backend import concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
try:
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2
from tensorflow.keras.layers import (Dense, Input, Convolution2D,
MaxPooling2D, Activation, Dropout, Flatten, LSTM, BatchNormalization,
Conv3D, MaxPooling3D, Conv2DTranspose)
from tensorflow.keras.layers import TimeDistributed as TD
from tensorflow.keras.backend import concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
except ImportError:
tf = None
keras = None
DatasetV1 = None
DatasetV2 = None

ONE_BYTE_SCALE = 1.0 / 255.0

Expand Down Expand Up @@ -86,7 +90,7 @@ def set_optimizer(self, optimizer_type: str,
raise Exception(f"Unknown optimizer type: {optimizer_type}")
self.interpreter.set_optimizer(optimizer)

def get_input_shape(self, input_name) -> tf.TensorShape:
def get_input_shape(self, input_name):
return self.interpreter.get_input_shape(input_name)

def seq_size(self) -> int:
Expand All @@ -110,9 +114,8 @@ def run(self, img_arr: np.ndarray, *other_arr: List[float]) \
# self.output_shape() first dictionary keys, because that's how we
# set up the model
values = (norm_img_arr, ) + np_other_array
# note output_shapes() returns a 2-tuple of dicts for input shapes
# and output shapes(), so we need the first tuple here
input_dict = dict(zip(self.output_shapes()[0].keys(), values))
# use interpreter's input_keys directly (works with TFLite and Keras)
input_dict = dict(zip(self.interpreter.input_keys, values))
return self.inference_from_dict(input_dict)

def inference_from_dict(self, input_dict: Dict[str, np.ndarray]) \
Expand Down Expand Up @@ -146,7 +149,7 @@ def train(self,
verbose: int = 1,
min_delta: float = .0005,
patience: int = 5,
show_plot: bool = False) -> tf.keras.callbacks.History:
show_plot: bool = False):
"""
trains the model
"""
Expand Down Expand Up @@ -242,7 +245,7 @@ def output_types(self) -> Tuple[Dict[str, np.typename], ...]:
types = tuple({k: tf.float64 for k in d} for d in shapes)
return types

def output_shapes(self) -> Dict[str, tf.TensorShape]:
def output_shapes(self):
return {}

def __str__(self) -> str:
Expand Down
Loading