Skip to content

Commit

Permalink
DOCS-2086: Add ML, CV, and Data code snippets (#3920)
Browse files Browse the repository at this point in the history
  • Loading branch information
skyleilani authored May 8, 2024
1 parent d7557df commit 3b93024
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 2 deletions.
6 changes: 6 additions & 0 deletions services/datamanager/data_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,14 @@ func init() {
}

// Service defines what a Data Manager Service should expose to the users.
//
// Sync example:
//
// // Sync data stored on the machine to the cloud.
// err := data.Sync(context.Background(), nil)
type Service interface {
resource.Resource
// Sync will sync data stored on the machine to the cloud.
Sync(ctx context.Context, extra map[string]interface{}) error
}

Expand Down
13 changes: 13 additions & 0 deletions services/mlmodel/mlmodel.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,22 @@ func init() {
// Service defines the ML Model interface, which takes a map of inputs, runs it through
// an inference engine, and creates a map of outputs. Metadata is necessary in order to build
// the struct that will decode that map[string]interface{} correctly.
//
// Infer example:
//
// input_tensors := ml.Tensors{"0": tensor.New(tensor.WithShape(1, 2, 3), tensor.WithBacking([]int{1, 2, 3, 4, 5, 6}))}
//
// output_tensors, err := myMLModel.Infer(context.Background(), input_tensors)
//
// Metadata example:
//
// metadata, err := myMLModel.Metadata(context.Background())
type Service interface {
resource.Resource
// Infer returns an output tensor map after running an input tensor map through an interface model.
Infer(ctx context.Context, tensors ml.Tensors) (ml.Tensors, error)

// Metadata returns the metadata: name, data type, expected tensor/array shape, inputs, and outputs associated with the ML model.
Metadata(ctx context.Context) (MLMetadata, error)
}

Expand Down
84 changes: 82 additions & 2 deletions services/vision/vision.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,24 +29,104 @@ func init() {
}

// A Service that implements various computer vision algorithms like detection and segmentation.
//
// DetectionsFromCamera example:
//
// // Get detections from the camera output
// detections, err := visService.DetectionsFromCamera(context.Background(), myCam, nil)
// if err != nil {
// logger.Fatalf("Could not get detections: %v", err)
// }
// if len(detections) > 0 {
// logger.Info(detections[0])
// }
//
// Detections example:
//
// // Get the stream from a camera
// camStream, err := myCam.Stream(context.Background())
//
// // Get an image from the camera stream
// img, release, err := camStream.Next(context.Background())
// defer release()
//
// // Get the detections from the image
// detections, err := visService.Detections(context.Background(), img, nil)
// if err != nil {
// logger.Fatalf("Could not get detections: %v", err)
// }
// if len(detections) > 0 {
// logger.Info(detections[0])
// }
//
// ClassificationsFromCamera example:
//
// // Get the 2 classifications with the highest confidence scores from the camera output
// classifications, err := visService.ClassificationsFromCamera(context.Background(), myCam, 2, nil)
// if err != nil {
// logger.Fatalf("Could not get classifications: %v", err)
// }
// if len(classifications) > 0 {
// logger.Info(classifications[0])
// }
//
// Classifications example:
//
// // Get the stream from a camera
// camStream, err := myCam.Stream(context.Background())
// if err!=nil {
// logger.Error(err)
// return
// }
//
// // Get an image from the camera stream
// img, release, err := camStream.Next(context.Background())
// defer release()
//
// // Get the 2 classifications with the highest confidence scores from the image
// classifications, err := visService.Classifications(context.Background(), img, 2, nil)
// if err != nil {
// logger.Fatalf("Could not get classifications: %v", err)
// }
// if len(classifications) > 0 {
// logger.Info(classifications[0])
// }
//
// GetObjectPointClouds example:
//
// // Get the objects from the camera output
// objects, err := visService.GetObjectPointClouds(context.Background(), "cam1", nil)
// if err != nil {
// logger.Fatalf("Could not get point clouds: %v", err)
// }
// if len(objects) > 0 {
// logger.Info(objects[0])
// }
type Service interface {
resource.Resource
// DetectionsFromCamera returns a list of detections from the next image from a specified camera using a configured detector.
DetectionsFromCamera(ctx context.Context, cameraName string, extra map[string]interface{}) ([]objectdetection.Detection, error)

// Detections returns a list of detections from a given image using a configured detector.
Detections(ctx context.Context, img image.Image, extra map[string]interface{}) ([]objectdetection.Detection, error)
// classifier methods

// ClassificationsFromCamera returns a list of classifications from the next image from a specified camera using a configured classifier.
ClassificationsFromCamera(
ctx context.Context,
cameraName string,
n int,
extra map[string]interface{},
) (classification.Classifications, error)

// Classifications returns a list of classifications from a given image using a configured classifier.
Classifications(
ctx context.Context,
img image.Image,
n int,
extra map[string]interface{},
) (classification.Classifications, error)
// segmenter methods

// GetObjectPointClouds returns a list of 3D point cloud objects and metadata from the latest 3D camera image using a specified segmenter.
GetObjectPointClouds(ctx context.Context, cameraName string, extra map[string]interface{}) ([]*viz.Object, error)
}

Expand Down

0 comments on commit 3b93024

Please sign in to comment.