from grid.model.perception.tracking.mft import MFTcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center", "rgb").dataqueries = torch.tensor([ [0., 600., 350.], [0., 600., 250.], [10., 600., 500.], [20., 750., 600.], [30., 900., 200.]])model = MFT(queries = queries, save_results=False, use_local = True)for frame in video_frames: model.run(frame)
The MFT class implements a point tracking model that processes video frames and tracks points based on provided queries.
from grid.model.perception.tracking.mft import MFTcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center", "rgb").dataqueries = torch.tensor([ [0., 600., 350.], [0., 600., 250.], [10., 600., 500.], [20., 750., 600.], [30., 900., 200.]])model = MFT(queries = queries, save_results=False, use_local = True)for frame in video_frames: model.run(frame)
from grid.model.perception.tracking.mft import MFTcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center", "rgb").dataqueries = torch.tensor([ [0., 600., 350.], [0., 600., 250.], [10., 600., 500.], [20., 750., 600.], [30., 900., 200.]])model = MFT(queries = queries, save_results=False, use_local = True)for frame in video_frames: model.run(frame)
Assistant
Responses are generated using AI and may contain mistakes.