title
stringclasses
1 value
text
stringlengths
30
426k
id
stringlengths
27
30
ultralytics/trackers/bot_sort.py/BOTSORT/get_kalmanfilter class BOTSORT: def get_kalmanfilter(self): """Returns an instance of KalmanFilterXYWH for predicting and updating object states in the tracking process.""" return KalmanFilterXYWH()
negative_train_query659_01666
ultralytics/trackers/bot_sort.py/BOTSORT/init_track class BOTSORT: def init_track(self, dets, scores, cls, img=None): """Initialize object tracks using detection bounding boxes, scores, class labels, and optional ReID features.""" if len(dets) == 0: return [] if self.args.with_reid and self.encoder is not None: features_keep = self.encoder.inference(img, dets) return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections else: return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)]
negative_train_query659_01667
ultralytics/trackers/bot_sort.py/BOTSORT/get_dists class BOTSORT: def get_dists(self, tracks, detections): """Calculates distances between tracks and detections using IoU and optionally ReID embeddings.""" dists = matching.iou_distance(tracks, detections) dists_mask = dists > self.proximity_thresh if self.args.fuse_score: dists = matching.fuse_score(dists, detections) if self.args.with_reid and self.encoder is not None: emb_dists = matching.embedding_distance(tracks, detections) / 2.0 emb_dists[emb_dists > self.appearance_thresh] = 1.0 emb_dists[dists_mask] = 1.0 dists = np.minimum(dists, emb_dists) return dists
negative_train_query659_01668
ultralytics/trackers/bot_sort.py/BOTSORT/multi_predict class BOTSORT: def multi_predict(self, tracks): """Predicts the mean and covariance of multiple object tracks using a shared Kalman filter.""" BOTrack.multi_predict(tracks)
negative_train_query659_01669
ultralytics/trackers/bot_sort.py/BOTSORT/reset class BOTSORT: def reset(self): """Resets the BOTSORT tracker to its initial state, clearing all tracked objects and internal states.""" super().reset() self.gmc.reset_params()
negative_train_query659_01670
ultralytics/trackers/basetrack.py/BaseTrack/__init__ class BaseTrack: def __init__(self): """ Initializes a new track with a unique ID and foundational tracking attributes. Examples: Initialize a new track >>> track = BaseTrack() >>> print(track.track_id) 0 """ self.track_id = 0 self.is_activated = False self.state = TrackState.New self.history = OrderedDict() self.features = [] self.curr_feature = None self.score = 0 self.start_frame = 0 self.frame_id = 0 self.time_since_update = 0 self.location = (np.inf, np.inf)
negative_train_query659_01671
ultralytics/trackers/basetrack.py/BaseTrack/end_frame class BaseTrack: def end_frame(self): """Returns the ID of the most recent frame where the object was tracked.""" return self.frame_id
negative_train_query659_01672
ultralytics/trackers/basetrack.py/BaseTrack/next_id class BaseTrack: def next_id(): """Increment and return the next unique global track ID for object tracking.""" BaseTrack._count += 1 return BaseTrack._count
negative_train_query659_01673
ultralytics/trackers/basetrack.py/BaseTrack/activate class BaseTrack: def activate(self, *args): """Activates the track with provided arguments, initializing necessary attributes for tracking.""" raise NotImplementedError
negative_train_query659_01674
ultralytics/trackers/basetrack.py/BaseTrack/predict class BaseTrack: def predict(self): """Predicts the next state of the track based on the current state and tracking model.""" raise NotImplementedError
negative_train_query659_01675
ultralytics/trackers/basetrack.py/BaseTrack/update class BaseTrack: def update(self, *args, **kwargs): """Updates the track with new observations and data, modifying its state and attributes accordingly.""" raise NotImplementedError
negative_train_query659_01676
ultralytics/trackers/basetrack.py/BaseTrack/mark_lost class BaseTrack: def mark_lost(self): """Marks the track as lost by updating its state to TrackState.Lost.""" self.state = TrackState.Lost
negative_train_query659_01677
ultralytics/trackers/basetrack.py/BaseTrack/mark_removed class BaseTrack: def mark_removed(self): """Marks the track as removed by setting its state to TrackState.Removed.""" self.state = TrackState.Removed
negative_train_query659_01678
ultralytics/trackers/basetrack.py/BaseTrack/reset_id class BaseTrack: def reset_id(): """Reset the global track ID counter to its initial value.""" BaseTrack._count = 0
negative_train_query659_01679
ultralytics/trackers/utils/gmc.py/GMC/__init__ class GMC: def __init__(self, method: str = "sparseOptFlow", downscale: int = 2) -> None: """ Initialize a Generalized Motion Compensation (GMC) object with tracking method and downscale factor. Args: method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'. downscale (int): Downscale factor for processing frames. Examples: Initialize a GMC object with the 'sparseOptFlow' method and a downscale factor of 2 >>> gmc = GMC(method="sparseOptFlow", downscale=2) """ super().__init__() self.method = method self.downscale = max(1, downscale) if self.method == "orb": self.detector = cv2.FastFeatureDetector_create(20) self.extractor = cv2.ORB_create() self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) elif self.method == "sift": self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.matcher = cv2.BFMatcher(cv2.NORM_L2) elif self.method == "ecc": number_of_iterations = 5000 termination_eps = 1e-6 self.warp_mode = cv2.MOTION_EUCLIDEAN self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) elif self.method == "sparseOptFlow": self.feature_params = dict( maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04 ) elif self.method in {"none", "None", None}: self.method = None else: raise ValueError(f"Error: Unknown GMC method:{method}") self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None self.initializedFirstFrame = False
negative_train_query659_01680
ultralytics/trackers/utils/gmc.py/GMC/apply class GMC: def apply(self, raw_frame: np.array, detections: list = None) -> np.array: """ Apply object detection on a raw frame using the specified method. Args: raw_frame (np.ndarray): The raw frame to be processed, with shape (H, W, C). detections (List | None): List of detections to be used in the processing. Returns: (np.ndarray): Processed frame with applied object detection. Examples: >>> gmc = GMC(method="sparseOptFlow") >>> raw_frame = np.random.rand(480, 640, 3) >>> processed_frame = gmc.apply(raw_frame) >>> print(processed_frame.shape) (480, 640, 3) """ if self.method in {"orb", "sift"}: return self.applyFeatures(raw_frame, detections) elif self.method == "ecc": return self.applyEcc(raw_frame) elif self.method == "sparseOptFlow": return self.applySparseOptFlow(raw_frame) else: return np.eye(2, 3)
negative_train_query659_01681
ultralytics/trackers/utils/gmc.py/GMC/applyEcc class GMC: def applyEcc(self, raw_frame: np.array) -> np.array: """ Apply the ECC (Enhanced Correlation Coefficient) algorithm to a raw frame for motion compensation. Args: raw_frame (np.ndarray): The raw frame to be processed, with shape (H, W, C). Returns: (np.ndarray): The processed frame with the applied ECC transformation. Examples: >>> gmc = GMC(method="ecc") >>> processed_frame = gmc.applyEcc(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])) >>> print(processed_frame) [[1. 0. 0.] [0. 1. 0.]] """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3, dtype=np.float32) # Downscale image if self.downscale > 1.0: frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() # Initialization done self.initializedFirstFrame = True return H # Run the ECC algorithm. The results are stored in warp_matrix. # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) try: (_, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) except Exception as e: LOGGER.warning(f"WARNING: find transform failed. Set warp as identity {e}") return H
negative_train_query659_01682
ultralytics/trackers/utils/gmc.py/GMC/applyFeatures class GMC: def applyFeatures(self, raw_frame: np.array, detections: list = None) -> np.array: """ Apply feature-based methods like ORB or SIFT to a raw frame. Args: raw_frame (np.ndarray): The raw frame to be processed, with shape (H, W, C). detections (List | None): List of detections to be used in the processing. Returns: (np.ndarray): Processed frame. Examples: >>> gmc = GMC(method="orb") >>> raw_frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8) >>> processed_frame = gmc.applyFeatures(raw_frame) >>> print(processed_frame.shape) (2, 3) """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # Find the keypoints mask = np.zeros_like(frame) mask[int(0.02 * height) : int(0.98 * height), int(0.02 * width) : int(0.98 * width)] = 255 if detections is not None: for det in detections: tlbr = (det[:4] / self.downscale).astype(np.int_) mask[tlbr[1] : tlbr[3], tlbr[0] : tlbr[2]] = 0 keypoints = self.detector.detect(frame, mask) # Compute the descriptors keypoints, descriptors = self.extractor.compute(frame, keypoints) # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) # Initialization done self.initializedFirstFrame = True return H # Match descriptors knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) # Filter matches based on smallest spatial distance matches = [] spatialDistances = [] maxSpatialDistance = 0.25 * np.array([width, height]) # Handle empty matches case if len(knnMatches) == 0: # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H for m, n in knnMatches: if m.distance < 0.9 * n.distance: prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt currKeyPointLocation = keypoints[m.trainIdx].pt spatialDistance = ( prevKeyPointLocation[0] - currKeyPointLocation[0], prevKeyPointLocation[1] - currKeyPointLocation[1], ) if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and ( np.abs(spatialDistance[1]) < maxSpatialDistance[1] ): spatialDistances.append(spatialDistance) matches.append(m) meanSpatialDistances = np.mean(spatialDistances, 0) stdSpatialDistances = np.std(spatialDistances, 0) inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances goodMatches = [] prevPoints = [] currPoints = [] for i in range(len(matches)): if inliers[i, 0] and inliers[i, 1]: goodMatches.append(matches[i]) prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) currPoints.append(keypoints[matches[i].trainIdx].pt) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Draw the keypoint matches on the output image # if False: # import matplotlib.pyplot as plt # matches_img = np.hstack((self.prevFrame, frame)) # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) # W = self.prevFrame.shape[1] # for m in goodMatches: # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) # curr_pt[0] += W # color = np.random.randint(0, 255, 3) # color = (int(color[0]), int(color[1]), int(color[2])) # # matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) # matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) # matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) # # plt.figure() # plt.imshow(matches_img) # plt.show() # Find rigid matrix if prevPoints.shape[0] > 4: H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) # Handle downscale if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: LOGGER.warning("WARNING: not enough matching points") # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H
negative_train_query659_01683
ultralytics/trackers/utils/gmc.py/GMC/applySparseOptFlow class GMC: def applySparseOptFlow(self, raw_frame: np.array) -> np.array: """ Apply Sparse Optical Flow method to a raw frame. Args: raw_frame (np.ndarray): The raw frame to be processed, with shape (H, W, C). Returns: (np.ndarray): Processed frame with shape (2, 3). Examples: >>> gmc = GMC() >>> result = gmc.applySparseOptFlow(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])) >>> print(result) [[1. 0. 0.] [0. 1. 0.]] """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) # Find the keypoints keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) # Handle first frame if not self.initializedFirstFrame or self.prevKeyPoints is None: self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.initializedFirstFrame = True return H # Find correspondences matchedKeypoints, status, _ = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) # Leave good correspondences only prevPoints = [] currPoints = [] for i in range(len(status)): if status[i]: prevPoints.append(self.prevKeyPoints[i]) currPoints.append(matchedKeypoints[i]) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Find rigid matrix if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]): H, _ = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: LOGGER.warning("WARNING: not enough matching points") self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) return H
negative_train_query659_01684
ultralytics/trackers/utils/gmc.py/GMC/reset_params class GMC: def reset_params(self) -> None: """Reset the internal parameters including previous frame, keypoints, and descriptors.""" self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None self.initializedFirstFrame = False
negative_train_query659_01685
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/__init__ class KalmanFilterXYAH: def __init__(self): """ Initialize Kalman filter model matrices with motion and observation uncertainty weights. The Kalman filter is initialized with an 8-dimensional state space (x, y, a, h, vx, vy, va, vh), where (x, y) represents the bounding box center position, 'a' is the aspect ratio, 'h' is the height, and their respective velocities are (vx, vy, va, vh). The filter uses a constant velocity model for object motion and a linear observation model for bounding box location. Examples: Initialize a Kalman filter for tracking: >>> kf = KalmanFilterXYAH() """ ndim, dt = 4, 1.0 # Create Kalman filter model matrices self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current state estimate. These weights control # the amount of uncertainty in the model. self._std_weight_position = 1.0 / 20 self._std_weight_velocity = 1.0 / 160
negative_train_query659_01686
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/initiate class KalmanFilterXYAH: def initiate(self, measurement: np.ndarray) -> tuple: """ Create a track from an unassociated measurement. Args: measurement (ndarray): Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, and height h. Returns: (tuple[ndarray, ndarray]): Returns the mean vector (8-dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. Examples: >>> kf = KalmanFilterXYAH() >>> measurement = np.array([100, 50, 1.5, 200]) >>> mean, covariance = kf.initiate(measurement) """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3], ] covariance = np.diag(np.square(std)) return mean, covariance
negative_train_query659_01687
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/predict class KalmanFilterXYAH: def predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step. Args: mean (ndarray): The 8-dimensional mean vector of the object state at the previous time step. covariance (ndarray): The 8x8-dimensional covariance matrix of the object state at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. Examples: >>> kf = KalmanFilterXYAH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> predicted_mean, predicted_covariance = kf.predict(mean, covariance) """ std_pos = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, self._std_weight_position * mean[3], ] std_vel = [ self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, self._std_weight_velocity * mean[3], ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance
negative_train_query659_01688
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/project class KalmanFilterXYAH: def project(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Project state distribution to measurement space. Args: mean (ndarray): The state's mean vector (8 dimensional array). covariance (ndarray): The state's covariance matrix (8x8 dimensional). Returns: (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. Examples: >>> kf = KalmanFilterXYAH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> projected_mean, projected_covariance = kf.project(mean, covariance) """ std = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, self._std_weight_position * mean[3], ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov
negative_train_query659_01689
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/multi_predict class KalmanFilterXYAH: def multi_predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step for multiple object states (Vectorized version). Args: mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean matrix and covariance matrix of the predicted states. The mean matrix has shape (N, 8) and the covariance matrix has shape (N, 8, 8). Unobserved velocities are initialized to 0 mean. Examples: >>> mean = np.random.rand(10, 8) # 10 object states >>> covariance = np.random.rand(10, 8, 8) # Covariance matrices for 10 object states >>> predicted_mean, predicted_covariance = kalman_filter.multi_predict(mean, covariance) """ std_pos = [ self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3], ] std_vel = [ self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3], ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance
negative_train_query659_01690
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/update class KalmanFilterXYAH: def update(self, mean: np.ndarray, covariance: np.ndarray, measurement: np.ndarray) -> tuple: """ Run Kalman filter correction step. Args: mean (ndarray): The predicted state's mean vector (8 dimensional). covariance (ndarray): The state's covariance matrix (8x8 dimensional). measurement (ndarray): The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect ratio, and h the height of the bounding box. Returns: (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. Examples: >>> kf = KalmanFilterXYAH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> measurement = np.array([1, 1, 1, 1]) >>> new_mean, new_covariance = kf.update(mean, covariance, measurement) """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False ).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance
negative_train_query659_01691
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYAH/gating_distance class KalmanFilterXYAH: def gating_distance( self, mean: np.ndarray, covariance: np.ndarray, measurements: np.ndarray, only_position: bool = False, metric: str = "maha", ) -> np.ndarray: """ Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Args: mean (ndarray): Mean vector over the state distribution (8 dimensional). covariance (ndarray): Covariance of the state distribution (8x8 dimensional). measurements (ndarray): An (N, 4) matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position (bool): If True, distance computation is done with respect to box center position only. metric (str): The metric to use for calculating the distance. Options are 'gaussian' for the squared Euclidean distance and 'maha' for the squared Mahalanobis distance. Returns: (np.ndarray): Returns an array of length N, where the i-th element contains the squared distance between (mean, covariance) and `measurements[i]`. Examples: Compute gating distance using Mahalanobis metric: >>> kf = KalmanFilterXYAH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> measurements = np.array([[1, 1, 1, 1], [2, 2, 1, 1]]) >>> distances = kf.gating_distance(mean, covariance, measurements, only_position=False, metric="maha") """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] d = measurements - mean if metric == "gaussian": return np.sum(d * d, axis=1) elif metric == "maha": cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) return np.sum(z * z, axis=0) # square maha else: raise ValueError("Invalid distance metric")
negative_train_query659_01692
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYWH/initiate class KalmanFilterXYWH: def initiate(self, measurement: np.ndarray) -> tuple: """ Create track from unassociated measurement. Args: measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height. Returns: (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. Examples: >>> kf = KalmanFilterXYWH() >>> measurement = np.array([100, 50, 20, 40]) >>> mean, covariance = kf.initiate(measurement) >>> print(mean) [100. 50. 20. 40. 0. 0. 0. 0.] >>> print(covariance) [[ 4. 0. 0. 0. 0. 0. 0. 0.] [ 0. 4. 0. 0. 0. 0. 0. 0.] [ 0. 0. 4. 0. 0. 0. 0. 0.] [ 0. 0. 0. 4. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0.25 0. 0. 0.] [ 0. 0. 0. 0. 0. 0.25 0. 0.] [ 0. 0. 0. 0. 0. 0. 0.25 0.] [ 0. 0. 0. 0. 0. 0. 0. 0.25]] """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], ] covariance = np.diag(np.square(std)) return mean, covariance
negative_train_query659_01693
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYWH/predict class KalmanFilterXYWH: def predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step. Args: mean (ndarray): The 8-dimensional mean vector of the object state at the previous time step. covariance (ndarray): The 8x8-dimensional covariance matrix of the object state at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. Examples: >>> kf = KalmanFilterXYWH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> predicted_mean, predicted_covariance = kf.predict(mean, covariance) """ std_pos = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3], ] std_vel = [ self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance
negative_train_query659_01694
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYWH/project class KalmanFilterXYWH: def project(self, mean, covariance) -> tuple: """ Project state distribution to measurement space. Args: mean (ndarray): The state's mean vector (8 dimensional array). covariance (ndarray): The state's covariance matrix (8x8 dimensional). Returns: (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. Examples: >>> kf = KalmanFilterXYWH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> projected_mean, projected_cov = kf.project(mean, covariance) """ std = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3], ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov
negative_train_query659_01695
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYWH/multi_predict class KalmanFilterXYWH: def multi_predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step (Vectorized version). Args: mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. Examples: >>> mean = np.random.rand(5, 8) # 5 objects with 8-dimensional state vectors >>> covariance = np.random.rand(5, 8, 8) # 5 objects with 8x8 covariance matrices >>> kf = KalmanFilterXYWH() >>> predicted_mean, predicted_covariance = kf.multi_predict(mean, covariance) """ std_pos = [ self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], ] std_vel = [ self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance
negative_train_query659_01696
ultralytics/trackers/utils/kalman_filter.py/KalmanFilterXYWH/update class KalmanFilterXYWH: def update(self, mean, covariance, measurement) -> tuple: """ Run Kalman filter correction step. Args: mean (ndarray): The predicted state's mean vector (8 dimensional). covariance (ndarray): The state's covariance matrix (8x8 dimensional). measurement (ndarray): The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width, and h the height of the bounding box. Returns: (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. Examples: >>> kf = KalmanFilterXYWH() >>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0]) >>> covariance = np.eye(8) >>> measurement = np.array([0.5, 0.5, 1.2, 1.2]) >>> new_mean, new_covariance = kf.update(mean, covariance, measurement) """ return super().update(mean, covariance, measurement)
negative_train_query659_01697
ultralytics/trackers/utils/matching.py/linear_assignment def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple: """ Perform linear assignment using either the scipy or lap.lapjv method. Args: cost_matrix (np.ndarray): The matrix containing cost values for assignments, with shape (N, M). thresh (float): Threshold for considering an assignment valid. use_lap (bool): Use lap.lapjv for the assignment. If False, scipy.optimize.linear_sum_assignment is used. Returns: (tuple): A tuple containing: - matched_indices (np.ndarray): Array of matched indices of shape (K, 2), where K is the number of matches. - unmatched_a (np.ndarray): Array of unmatched indices from the first set, with shape (L,). - unmatched_b (np.ndarray): Array of unmatched indices from the second set, with shape (M,). Examples: >>> cost_matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> thresh = 5.0 >>> matched_indices, unmatched_a, unmatched_b = linear_assignment(cost_matrix, thresh, use_lap=True) """ if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) if use_lap: # Use lap.lapjv # https://github.com/gatagat/lap _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0] unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] else: # Use scipy.optimize.linear_sum_assignment # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh]) if len(matches) == 0: unmatched_a = list(np.arange(cost_matrix.shape[0])) unmatched_b = list(np.arange(cost_matrix.shape[1])) else: unmatched_a = list(set(np.arange(cost_matrix.shape[0])) - set(matches[:, 0])) unmatched_b = list(set(np.arange(cost_matrix.shape[1])) - set(matches[:, 1])) return matches, unmatched_a, unmatched_b
negative_train_query659_01698
ultralytics/trackers/utils/matching.py/iou_distance def iou_distance(atracks: list, btracks: list) -> np.ndarray: """ Compute cost based on Intersection over Union (IoU) between tracks. Args: atracks (list[STrack] | list[np.ndarray]): List of tracks 'a' or bounding boxes. btracks (list[STrack] | list[np.ndarray]): List of tracks 'b' or bounding boxes. Returns: (np.ndarray): Cost matrix computed based on IoU. Examples: Compute IoU distance between two sets of tracks >>> atracks = [np.array([0, 0, 10, 10]), np.array([20, 20, 30, 30])] >>> btracks = [np.array([5, 5, 15, 15]), np.array([25, 25, 35, 35])] >>> cost_matrix = iou_distance(atracks, btracks) """ if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks] btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks] ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if len(atlbrs) and len(btlbrs): if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5: ious = batch_probiou( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32), ).numpy() else: ious = bbox_ioa( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32), iou=True, ) return 1 - ious
negative_train_query659_01699
ultralytics/trackers/utils/matching.py/embedding_distance def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -> np.ndarray: """ Compute distance between tracks and detections based on embeddings. Args: tracks (list[STrack]): List of tracks, where each track contains embedding features. detections (list[BaseTrack]): List of detections, where each detection contains embedding features. metric (str): Metric for distance computation. Supported metrics include 'cosine', 'euclidean', etc. Returns: (np.ndarray): Cost matrix computed based on embeddings with shape (N, M), where N is the number of tracks and M is the number of detections. Examples: Compute the embedding distance between tracks and detections using cosine metric >>> tracks = [STrack(...), STrack(...)] # List of track objects with embedding features >>> detections = [BaseTrack(...), BaseTrack(...)] # List of detection objects with embedding features >>> cost_matrix = embedding_distance(tracks, detections, metric="cosine") """ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) # for i, track in enumerate(tracks): # cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features return cost_matrix
negative_train_query659_01700
ultralytics/trackers/utils/matching.py/fuse_score def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray: """ Fuses cost matrix with detection scores to produce a single similarity matrix. Args: cost_matrix (np.ndarray): The matrix containing cost values for assignments, with shape (N, M). detections (list[BaseTrack]): List of detections, each containing a score attribute. Returns: (np.ndarray): Fused similarity matrix with shape (N, M). Examples: Fuse a cost matrix with detection scores >>> cost_matrix = np.random.rand(5, 10) # 5 tracks and 10 detections >>> detections = [BaseTrack(score=np.random.rand()) for _ in range(10)] >>> fused_matrix = fuse_score(cost_matrix, detections) """ if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores return 1 - fuse_sim
negative_train_query659_01701
ultralytics/cfg/__init__.py/cfg2dict def cfg2dict(cfg): """ Converts a configuration object to a dictionary. Args: cfg (str | Path | Dict | SimpleNamespace): Configuration object to be converted. Can be a file path, a string, a dictionary, or a SimpleNamespace object. Returns: (Dict): Configuration object in dictionary format. Examples: Convert a YAML file path to a dictionary: >>> config_dict = cfg2dict("config.yaml") Convert a SimpleNamespace to a dictionary: >>> from types import SimpleNamespace >>> config_sn = SimpleNamespace(param1="value1", param2="value2") >>> config_dict = cfg2dict(config_sn) Pass through an already existing dictionary: >>> config_dict = cfg2dict({"param1": "value1", "param2": "value2"}) Notes: - If cfg is a path or string, it's loaded as YAML and converted to a dictionary. - If cfg is a SimpleNamespace object, it's converted to a dictionary using vars(). - If cfg is already a dictionary, it's returned unchanged. """ if isinstance(cfg, (str, Path)): cfg = yaml_load(cfg) # load dict elif isinstance(cfg, SimpleNamespace): cfg = vars(cfg) # convert to dict return cfg
negative_train_query659_01702
ultralytics/cfg/__init__.py/get_cfg def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None): """ Load and merge configuration data from a file or dictionary, with optional overrides. Args: cfg (str | Path | Dict | SimpleNamespace): Configuration data source. Can be a file path, dictionary, or SimpleNamespace object. overrides (Dict | None): Dictionary containing key-value pairs to override the base configuration. Returns: (SimpleNamespace): Namespace containing the merged configuration arguments. Examples: >>> from ultralytics.cfg import get_cfg >>> config = get_cfg() # Load default configuration >>> config = get_cfg("path/to/config.yaml", overrides={"epochs": 50, "batch_size": 16}) Notes: - If both `cfg` and `overrides` are provided, the values in `overrides` will take precedence. - Special handling ensures alignment and correctness of the configuration, such as converting numeric `project` and `name` to strings and validating configuration keys and values. - The function performs type and value checks on the configuration data. """ cfg = cfg2dict(cfg) # Merge overrides if overrides: overrides = cfg2dict(overrides) if "save_dir" not in cfg: overrides.pop("save_dir", None) # special override keys to ignore check_dict_alignment(cfg, overrides) cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides) # Special handling for numeric project/name for k in "project", "name": if k in cfg and isinstance(cfg[k], (int, float)): cfg[k] = str(cfg[k]) if cfg.get("name") == "model": # assign model to 'name' arg cfg["name"] = cfg.get("model", "").split(".")[0] LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.") # Type and Value checks check_cfg(cfg) # Return instance return IterableSimpleNamespace(**cfg)
negative_train_query659_01703
ultralytics/cfg/__init__.py/check_cfg def check_cfg(cfg, hard=True): """ Checks configuration argument types and values for the Ultralytics library. This function validates the types and values of configuration arguments, ensuring correctness and converting them if necessary. It checks for specific key types defined in global variables such as CFG_FLOAT_KEYS, CFG_FRACTION_KEYS, CFG_INT_KEYS, and CFG_BOOL_KEYS. Args: cfg (Dict): Configuration dictionary to validate. hard (bool): If True, raises exceptions for invalid types and values; if False, attempts to convert them. Examples: >>> config = { ... "epochs": 50, # valid integer ... "lr0": 0.01, # valid float ... "momentum": 1.2, # invalid float (out of 0.0-1.0 range) ... "save": "true", # invalid bool ... } >>> check_cfg(config, hard=False) >>> print(config) {'epochs': 50, 'lr0': 0.01, 'momentum': 1.2, 'save': False} # corrected 'save' key Notes: - The function modifies the input dictionary in-place. - None values are ignored as they may be from optional arguments. - Fraction keys are checked to be within the range [0.0, 1.0]. """ for k, v in cfg.items(): if v is not None: # None values may be from optional args if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)): if hard: raise TypeError( f"'{k}={v}' is of invalid type {type(v).__name__}. " f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')" ) cfg[k] = float(v) elif k in CFG_FRACTION_KEYS: if not isinstance(v, (int, float)): if hard: raise TypeError( f"'{k}={v}' is of invalid type {type(v).__name__}. " f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')" ) cfg[k] = v = float(v) if not (0.0 <= v <= 1.0): raise ValueError(f"'{k}={v}' is an invalid value. " f"Valid '{k}' values are between 0.0 and 1.0.") elif k in CFG_INT_KEYS and not isinstance(v, int): if hard: raise TypeError( f"'{k}={v}' is of invalid type {type(v).__name__}. " f"'{k}' must be an int (i.e. '{k}=8')" ) cfg[k] = int(v) elif k in CFG_BOOL_KEYS and not isinstance(v, bool): if hard: raise TypeError( f"'{k}={v}' is of invalid type {type(v).__name__}. " f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')" ) cfg[k] = bool(v)
negative_train_query659_01704
ultralytics/cfg/__init__.py/get_save_dir def get_save_dir(args, name=None): """ Returns the directory path for saving outputs, derived from arguments or default settings. Args: args (SimpleNamespace): Namespace object containing configurations such as 'project', 'name', 'task', 'mode', and 'save_dir'. name (str | None): Optional name for the output directory. If not provided, it defaults to 'args.name' or the 'args.mode'. Returns: (Path): Directory path where outputs should be saved. Examples: >>> from types import SimpleNamespace >>> args = SimpleNamespace(project="my_project", task="detect", mode="train", exist_ok=True) >>> save_dir = get_save_dir(args) >>> print(save_dir) my_project/detect/train """ if getattr(args, "save_dir", None): save_dir = args.save_dir else: from ultralytics.utils.files import increment_path project = args.project or (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task name = name or args.name or f"{args.mode}" save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True) return Path(save_dir)
negative_train_query659_01705
ultralytics/cfg/__init__.py/_handle_deprecation def _handle_deprecation(custom): """ Handles deprecated configuration keys by mapping them to current equivalents with deprecation warnings. Args: custom (Dict): Configuration dictionary potentially containing deprecated keys. Examples: >>> custom_config = {"boxes": True, "hide_labels": "False", "line_thickness": 2} >>> _handle_deprecation(custom_config) >>> print(custom_config) {'show_boxes': True, 'show_labels': True, 'line_width': 2} Notes: This function modifies the input dictionary in-place, replacing deprecated keys with their current equivalents. It also handles value conversions where necessary, such as inverting boolean values for 'hide_labels' and 'hide_conf'. """ for key in custom.copy().keys(): if key == "boxes": deprecation_warn(key, "show_boxes") custom["show_boxes"] = custom.pop("boxes") if key == "hide_labels": deprecation_warn(key, "show_labels") custom["show_labels"] = custom.pop("hide_labels") == "False" if key == "hide_conf": deprecation_warn(key, "show_conf") custom["show_conf"] = custom.pop("hide_conf") == "False" if key == "line_thickness": deprecation_warn(key, "line_width") custom["line_width"] = custom.pop("line_thickness") return custom
negative_train_query659_01706
ultralytics/cfg/__init__.py/check_dict_alignment def check_dict_alignment(base: Dict, custom: Dict, e=None): """ Checks alignment between custom and base configuration dictionaries, handling deprecated keys and providing error messages for mismatched keys. Args: base (Dict): The base configuration dictionary containing valid keys. custom (Dict): The custom configuration dictionary to be checked for alignment. e (Exception | None): Optional error instance passed by the calling function. Raises: SystemExit: If mismatched keys are found between the custom and base dictionaries. Examples: >>> base_cfg = {"epochs": 50, "lr0": 0.01, "batch_size": 16} >>> custom_cfg = {"epoch": 100, "lr": 0.02, "batch_size": 32} >>> try: ... check_dict_alignment(base_cfg, custom_cfg) ... except SystemExit: ... print("Mismatched keys found") Notes: - Suggests corrections for mismatched keys based on similarity to valid keys. - Automatically replaces deprecated keys in the custom configuration with updated equivalents. - Prints detailed error messages for each mismatched key to help users correct their configurations. """ custom = _handle_deprecation(custom) base_keys, custom_keys = (set(x.keys()) for x in (base, custom)) mismatched = [k for k in custom_keys if k not in base_keys] if mismatched: from difflib import get_close_matches string = "" for x in mismatched: matches = get_close_matches(x, base_keys) # key list matches = [f"{k}={base[k]}" if base.get(k) is not None else k for k in matches] match_str = f"Similar arguments are i.e. {matches}." if matches else "" string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n" raise SyntaxError(string + CLI_HELP_MSG) from e
negative_train_query659_01707
ultralytics/cfg/__init__.py/merge_equals_args def merge_equals_args(args: List[str]) -> List[str]: """ Merges arguments around isolated '=' in a list of strings and joins fragments with brackets. This function handles the following cases: 1. ['arg', '=', 'val'] becomes ['arg=val'] 2. ['arg=', 'val'] becomes ['arg=val'] 3. ['arg', '=val'] becomes ['arg=val'] 4. Joins fragments with brackets, e.g., ['imgsz=[3,', '640,', '640]'] becomes ['imgsz=[3,640,640]'] Args: args (List[str]): A list of strings where each element represents an argument or fragment. Returns: List[str]: A list of strings where the arguments around isolated '=' are merged and fragments with brackets are joined. Examples: >>> args = ["arg1", "=", "value", "arg2=", "value2", "arg3", "=value3", "imgsz=[3,", "640,", "640]"] >>> merge_and_join_args(args) ['arg1=value', 'arg2=value2', 'arg3=value3', 'imgsz=[3,640,640]'] """ new_args = [] current = "" depth = 0 i = 0 while i < len(args): arg = args[i] # Handle equals sign merging if arg == "=" and 0 < i < len(args) - 1: # merge ['arg', '=', 'val'] new_args[-1] += f"={args[i + 1]}" i += 2 continue elif arg.endswith("=") and i < len(args) - 1 and "=" not in args[i + 1]: # merge ['arg=', 'val'] new_args.append(f"{arg}{args[i + 1]}") i += 2 continue elif arg.startswith("=") and i > 0: # merge ['arg', '=val'] new_args[-1] += arg i += 1 continue # Handle bracket joining depth += arg.count("[") - arg.count("]") current += arg if depth == 0: new_args.append(current) current = "" i += 1 # Append any remaining current string if current: new_args.append(current) return new_args
negative_train_query659_01708
ultralytics/cfg/__init__.py/handle_yolo_hub def handle_yolo_hub(args: List[str]) -> None: """ Handles Ultralytics HUB command-line interface (CLI) commands for authentication. This function processes Ultralytics HUB CLI commands such as login and logout. It should be called when executing a script with arguments related to HUB authentication. Args: args (List[str]): A list of command line arguments. The first argument should be either 'login' or 'logout'. For 'login', an optional second argument can be the API key. Examples: ```bash yolo login YOUR_API_KEY ``` Notes: - The function imports the 'hub' module from ultralytics to perform login and logout operations. - For the 'login' command, if no API key is provided, an empty string is passed to the login function. - The 'logout' command does not require any additional arguments. """ from ultralytics import hub if args[0] == "login": key = args[1] if len(args) > 1 else "" # Log in to Ultralytics HUB using the provided API key hub.login(key) elif args[0] == "logout": # Log out from Ultralytics HUB hub.logout()
negative_train_query659_01709
ultralytics/cfg/__init__.py/handle_yolo_settings def handle_yolo_settings(args: List[str]) -> None: """ Handles YOLO settings command-line interface (CLI) commands. This function processes YOLO settings CLI commands such as reset and updating individual settings. It should be called when executing a script with arguments related to YOLO settings management. Args: args (List[str]): A list of command line arguments for YOLO settings management. Examples: >>> handle_yolo_settings(["reset"]) # Reset YOLO settings >>> handle_yolo_settings(["default_cfg_path=yolo11n.yaml"]) # Update a specific setting Notes: - If no arguments are provided, the function will display the current settings. - The 'reset' command will delete the existing settings file and create new default settings. - Other arguments are treated as key-value pairs to update specific settings. - The function will check for alignment between the provided settings and the existing ones. - After processing, the updated settings will be displayed. - For more information on handling YOLO settings, visit: https://docs.ultralytics.com/quickstart/#ultralytics-settings """ url = "https://docs.ultralytics.com/quickstart/#ultralytics-settings" # help URL try: if any(args): if args[0] == "reset": SETTINGS_FILE.unlink() # delete the settings file SETTINGS.reset() # create new settings LOGGER.info("Settings reset successfully") # inform the user that settings have been reset else: # save a new setting new = dict(parse_key_value_pair(a) for a in args) check_dict_alignment(SETTINGS, new) SETTINGS.update(new) print(SETTINGS) # print the current settings LOGGER.info(f"💡 Learn more about Ultralytics Settings at {url}") except Exception as e: LOGGER.warning(f"WARNING ⚠️ settings error: '{e}'. Please see {url} for help.")
negative_train_query659_01710
ultralytics/cfg/__init__.py/handle_yolo_solutions def handle_yolo_solutions(args: List[str]) -> None: """ Processes YOLO solutions arguments and runs the specified computer vision solutions pipeline. Args: args (List[str]): Command-line arguments for configuring and running the Ultralytics YOLO solutions: https://docs.ultralytics.com/solutions/, It can include solution name, source, and other configuration parameters. Returns: None: The function processes video frames and saves the output but doesn't return any value. Examples: Run people counting solution with default settings: >>> handle_yolo_solutions(["count"]) Run analytics with custom configuration: >>> handle_yolo_solutions(["analytics", "conf=0.25", "source=path/to/video/file.mp4"]) Notes: - Default configurations are merged from DEFAULT_SOL_DICT and DEFAULT_CFG_DICT - Arguments can be provided in the format 'key=value' or as boolean flags - Available solutions are defined in SOLUTION_MAP with their respective classes and methods - If an invalid solution is provided, defaults to 'count' solution - Output videos are saved in 'runs/solution/{solution_name}' directory - For 'analytics' solution, frame numbers are tracked for generating analytical graphs - Video processing can be interrupted by pressing 'q' - Processes video frames sequentially and saves output in .avi format - If no source is specified, downloads and uses a default sample video """ full_args_dict = {**DEFAULT_SOL_DICT, **DEFAULT_CFG_DICT} # arguments dictionary overrides = {} # check dictionary alignment for arg in merge_equals_args(args): arg = arg.lstrip("-").rstrip(",") if "=" in arg: try: k, v = parse_key_value_pair(arg) overrides[k] = v except (NameError, SyntaxError, ValueError, AssertionError) as e: check_dict_alignment(full_args_dict, {arg: ""}, e) elif arg in full_args_dict and isinstance(full_args_dict.get(arg), bool): overrides[arg] = True check_dict_alignment(full_args_dict, overrides) # dict alignment # Get solution name if args and args[0] in SOLUTION_MAP: if args[0] != "help": s_n = args.pop(0) # Extract the solution name directly else: LOGGER.info(SOLUTIONS_HELP_MSG) else: LOGGER.warning( f"⚠️ No valid solution provided. Using default 'count'. Available: {', '.join(SOLUTION_MAP.keys())}" ) s_n = "count" # Default solution if none provided if args and args[0] == "help": # Add check for return if user call `yolo solutions help` return cls, method = SOLUTION_MAP[s_n] # solution class name, method name and default source from ultralytics import solutions # import ultralytics solutions solution = getattr(solutions, cls)(IS_CLI=True, **overrides) # get solution class i.e ObjectCounter process = getattr(solution, method) # get specific function of class for processing i.e, count from ObjectCounter cap = cv2.VideoCapture(solution.CFG["source"]) # read the video file # extract width, height and fps of the video file, create save directory and initialize video writer import os # for directory creation from pathlib import Path from ultralytics.utils.files import increment_path # for output directory path update w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) if s_n == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080 w, h = 1920, 1080 save_dir = increment_path(Path("runs") / "solutions" / "exp", exist_ok=False) save_dir.mkdir(parents=True, exist_ok=True) # create the output directory vw = cv2.VideoWriter(os.path.join(save_dir, "solution.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) try: # Process video frames f_n = 0 # frame number, required for analytical graphs while cap.isOpened(): success, frame = cap.read() if not success: break frame = process(frame, f_n := f_n + 1) if s_n == "analytics" else process(frame) vw.write(frame) if cv2.waitKey(1) & 0xFF == ord("q"): break finally: cap.release()
negative_train_query659_01711
ultralytics/cfg/__init__.py/handle_streamlit_inference def handle_streamlit_inference(): """ Open the Ultralytics Live Inference Streamlit app for real-time object detection. This function initializes and runs a Streamlit application designed for performing live object detection using Ultralytics models. It checks for the required Streamlit package and launches the app. Examples: >>> handle_streamlit_inference() Notes: - Requires Streamlit version 1.29.0 or higher. - The app is launched using the 'streamlit run' command. - The Streamlit app file is located in the Ultralytics package directory. """ checks.check_requirements("streamlit>=1.29.0") LOGGER.info("💡 Loading Ultralytics Live Inference app...") subprocess.run(["streamlit", "run", ROOT / "solutions/streamlit_inference.py", "--server.headless", "true"])
negative_train_query659_01712
ultralytics/cfg/__init__.py/parse_key_value_pair def parse_key_value_pair(pair: str = "key=value"): """ Parses a key-value pair string into separate key and value components. Args: pair (str): A string containing a key-value pair in the format "key=value". Returns: (tuple): A tuple containing two elements: - key (str): The parsed key. - value (str): The parsed value. Raises: AssertionError: If the value is missing or empty. Examples: >>> key, value = parse_key_value_pair("model=yolo11n.pt") >>> print(f"Key: {key}, Value: {value}") Key: model, Value: yolo11n.pt >>> key, value = parse_key_value_pair("epochs=100") >>> print(f"Key: {key}, Value: {value}") Key: epochs, Value: 100 Notes: - The function splits the input string on the first '=' character. - Leading and trailing whitespace is removed from both key and value. - An assertion error is raised if the value is empty after stripping. """ k, v = pair.split("=", 1) # split on first '=' sign k, v = k.strip(), v.strip() # remove spaces assert v, f"missing '{k}' value" return k, smart_value(v)
negative_train_query659_01713
ultralytics/cfg/__init__.py/smart_value def smart_value(v): """ Converts a string representation of a value to its appropriate Python type. This function attempts to convert a given string into a Python object of the most appropriate type. It handles conversions to None, bool, int, float, and other types that can be evaluated safely. Args: v (str): The string representation of the value to be converted. Returns: (Any): The converted value. The type can be None, bool, int, float, or the original string if no conversion is applicable. Examples: >>> smart_value("42") 42 >>> smart_value("3.14") 3.14 >>> smart_value("True") True >>> smart_value("None") None >>> smart_value("some_string") 'some_string' Notes: - The function uses a case-insensitive comparison for boolean and None values. - For other types, it attempts to use Python's eval() function, which can be unsafe if used on untrusted input. - If no conversion is possible, the original string is returned. """ v_lower = v.lower() if v_lower == "none": return None elif v_lower == "true": return True elif v_lower == "false": return False else: try: return eval(v) except Exception: return v
negative_train_query659_01714
ultralytics/cfg/__init__.py/entrypoint def entrypoint(debug=""): """ Ultralytics entrypoint function for parsing and executing command-line arguments. This function serves as the main entry point for the Ultralytics CLI, parsing command-line arguments and executing the corresponding tasks such as training, validation, prediction, exporting models, and more. Args: debug (str): Space-separated string of command-line arguments for debugging purposes. Examples: Train a detection model for 10 epochs with an initial learning_rate of 0.01: >>> entrypoint("train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01") Predict a YouTube video using a pretrained segmentation model at image size 320: >>> entrypoint("predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320") Validate a pretrained detection model at batch-size 1 and image size 640: >>> entrypoint("val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640") Notes: - If no arguments are passed, the function will display the usage help message. - For a list of all available commands and their arguments, see the provided help messages and the Ultralytics documentation at https://docs.ultralytics.com. """ args = (debug.split(" ") if debug else ARGV)[1:] if not args: # no arguments passed LOGGER.info(CLI_HELP_MSG) return special = { "help": lambda: LOGGER.info(CLI_HELP_MSG), "checks": checks.collect_system_info, "version": lambda: LOGGER.info(__version__), "settings": lambda: handle_yolo_settings(args[1:]), "cfg": lambda: yaml_print(DEFAULT_CFG_PATH), "hub": lambda: handle_yolo_hub(args[1:]), "login": lambda: handle_yolo_hub(args), "logout": lambda: handle_yolo_hub(args), "copy-cfg": copy_default_cfg, "streamlit-predict": lambda: handle_streamlit_inference(), "solutions": lambda: handle_yolo_solutions(args[1:]), } full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special} # Define common misuses of special commands, i.e. -h, -help, --help special.update({k[0]: v for k, v in special.items()}) # singular special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith("s")}) # singular special = {**special, **{f"-{k}": v for k, v in special.items()}, **{f"--{k}": v for k, v in special.items()}} overrides = {} # basic overrides, i.e. imgsz=320 for a in merge_equals_args(args): # merge spaces around '=' sign if a.startswith("--"): LOGGER.warning(f"WARNING ⚠️ argument '{a}' does not require leading dashes '--', updating to '{a[2:]}'.") a = a[2:] if a.endswith(","): LOGGER.warning(f"WARNING ⚠️ argument '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.") a = a[:-1] if "=" in a: try: k, v = parse_key_value_pair(a) if k == "cfg" and v is not None: # custom.yaml passed LOGGER.info(f"Overriding {DEFAULT_CFG_PATH} with {v}") overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != "cfg"} else: overrides[k] = v except (NameError, SyntaxError, ValueError, AssertionError) as e: check_dict_alignment(full_args_dict, {a: ""}, e) elif a in TASKS: overrides["task"] = a elif a in MODES: overrides["mode"] = a elif a.lower() in special: special[a.lower()]() return elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool): overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True elif a in DEFAULT_CFG_DICT: raise SyntaxError( f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}" ) else: check_dict_alignment(full_args_dict, {a: ""}) # Check keys check_dict_alignment(full_args_dict, overrides) # Mode mode = overrides.get("mode") if mode is None: mode = DEFAULT_CFG.mode or "predict" LOGGER.warning(f"WARNING ⚠️ 'mode' argument is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") elif mode not in MODES: raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") # Task task = overrides.pop("task", None) if task: if task not in TASKS: raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}") if "model" not in overrides: overrides["model"] = TASK2MODEL[task] # Model model = overrides.pop("model", DEFAULT_CFG.model) if model is None: model = "yolo11n.pt" LOGGER.warning(f"WARNING ⚠️ 'model' argument is missing. Using default 'model={model}'.") overrides["model"] = model stem = Path(model).stem.lower() if "rtdetr" in stem: # guess architecture from ultralytics import RTDETR model = RTDETR(model) # no task argument elif "fastsam" in stem: from ultralytics import FastSAM model = FastSAM(model) elif "sam_" in stem or "sam2_" in stem or "sam2.1_" in stem: from ultralytics import SAM model = SAM(model) else: from ultralytics import YOLO model = YOLO(model, task=task) if isinstance(overrides.get("pretrained"), str): model.load(overrides["pretrained"]) # Task Update if task != model.task: if task: LOGGER.warning( f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. " f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model." ) task = model.task # Mode if mode in {"predict", "track"} and "source" not in overrides: overrides["source"] = ( "https://ultralytics.com/images/boats.jpg" if task == "obb" else DEFAULT_CFG.source or ASSETS ) LOGGER.warning(f"WARNING ⚠️ 'source' argument is missing. Using default 'source={overrides['source']}'.") elif mode in {"train", "val"}: if "data" not in overrides and "resume" not in overrides: overrides["data"] = DEFAULT_CFG.data or TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data) LOGGER.warning(f"WARNING ⚠️ 'data' argument is missing. Using default 'data={overrides['data']}'.") elif mode == "export": if "format" not in overrides: overrides["format"] = DEFAULT_CFG.format or "torchscript" LOGGER.warning(f"WARNING ⚠️ 'format' argument is missing. Using default 'format={overrides['format']}'.") # Run command in python getattr(model, mode)(**overrides) # default args from model # Show help LOGGER.info(f"💡 Learn more at https://docs.ultralytics.com/modes/{mode}") # Recommend VS Code extension if IS_VSCODE and SETTINGS.get("vscode_msg", True): LOGGER.info(vscode_msg())
negative_train_query659_01715
ultralytics/cfg/__init__.py/copy_default_cfg def copy_default_cfg(): """ Copies the default configuration file and creates a new one with '_copy' appended to its name. This function duplicates the existing default configuration file (DEFAULT_CFG_PATH) and saves it with '_copy' appended to its name in the current working directory. It provides a convenient way to create a custom configuration file based on the default settings. Examples: >>> copy_default_cfg() # Output: default.yaml copied to /path/to/current/directory/default_copy.yaml # Example YOLO command with this new custom cfg: # yolo cfg='/path/to/current/directory/default_copy.yaml' imgsz=320 batch=8 Notes: - The new configuration file is created in the current working directory. - After copying, the function prints a message with the new file's location and an example YOLO command demonstrating how to use the new configuration file. - This function is useful for users who want to modify the default configuration without altering the original file. """ new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml") shutil.copy2(DEFAULT_CFG_PATH, new_file) LOGGER.info( f"{DEFAULT_CFG_PATH} copied to {new_file}\n" f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8" )
negative_train_query659_01716