-
Notifications
You must be signed in to change notification settings - Fork 110
/
obstacles_depth.go
163 lines (149 loc) · 5.63 KB
/
obstacles_depth.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
//go:build !no_cgo
// Package obstaclesdepth uses an underlying depth camera to fulfill GetObjectPointClouds,
// projecting its depth map to a point cloud, an then applying a point cloud clustering algorithm
package obstaclesdepth
import (
"context"
"sort"
"github.com/golang/geo/r3"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"go.viam.com/rdk/components/camera"
"go.viam.com/rdk/logging"
"go.viam.com/rdk/resource"
"go.viam.com/rdk/rimage"
"go.viam.com/rdk/rimage/depthadapter"
"go.viam.com/rdk/rimage/transform"
"go.viam.com/rdk/robot"
svision "go.viam.com/rdk/services/vision"
"go.viam.com/rdk/spatialmath"
"go.viam.com/rdk/utils"
vision "go.viam.com/rdk/vision"
"go.viam.com/rdk/vision/segmentation"
)
var model = resource.DefaultModelFamily.WithModel("obstacles_depth")
func init() {
resource.RegisterService(svision.API, model, resource.Registration[svision.Service, *ObsDepthConfig]{
DeprecatedRobotConstructor: func(
ctx context.Context, r any, c resource.Config, logger logging.Logger,
) (svision.Service, error) {
attrs, err := resource.NativeConfig[*ObsDepthConfig](c)
if err != nil {
return nil, err
}
actualR, err := utils.AssertType[robot.Robot](r)
if err != nil {
return nil, err
}
return registerObstaclesDepth(ctx, c.ResourceName(), attrs, actualR, logger)
},
})
}
// ObsDepthConfig specifies the parameters to be used for the obstacle depth service.
type ObsDepthConfig struct {
resource.TriviallyValidateConfig
MinPtsInPlane int `json:"min_points_in_plane"`
MinPtsInSegment int `json:"min_points_in_segment"`
MaxDistFromPlane float64 `json:"max_dist_from_plane_mm"`
ClusteringRadius int `json:"clustering_radius"`
ClusteringStrictness float64 `json:"clustering_strictness"`
AngleTolerance float64 `json:"ground_angle_tolerance_degs"`
}
// obsDepth is the underlying struct actually used by the service.
type obsDepth struct {
clusteringConf *segmentation.ErCCLConfig
intrinsics *transform.PinholeCameraIntrinsics
}
func registerObstaclesDepth(
ctx context.Context,
name resource.Name,
conf *ObsDepthConfig,
r robot.Robot,
logger logging.Logger,
) (svision.Service, error) {
_, span := trace.StartSpan(ctx, "service::vision::registerObstacleDepth")
defer span.End()
if conf == nil {
return nil, errors.New("config for obstacles_depth cannot be nil")
}
// build the clustering config
cfg := &segmentation.ErCCLConfig{
MinPtsInPlane: conf.MinPtsInPlane,
MinPtsInSegment: conf.MinPtsInSegment,
MaxDistFromPlane: conf.MaxDistFromPlane,
NormalVec: r3.Vector{0, -1, 0},
AngleTolerance: conf.AngleTolerance,
ClusteringRadius: conf.ClusteringRadius,
ClusteringStrictness: conf.ClusteringStrictness,
}
err := cfg.CheckValid()
if err != nil {
return nil, errors.Wrap(err, "error building clustering config for obstacles_depth")
}
myObsDep := &obsDepth{
clusteringConf: cfg,
}
segmenter := myObsDep.buildObsDepth(logger) // does the thing
return svision.NewService(name, r, nil, nil, nil, segmenter)
}
// BuildObsDepth will check for intrinsics and determine how to build based on that.
func (o *obsDepth) buildObsDepth(logger logging.Logger) func(
ctx context.Context, src camera.VideoSource) ([]*vision.Object, error) {
return func(ctx context.Context, src camera.VideoSource) ([]*vision.Object, error) {
props, err := src.Properties(ctx)
if err != nil {
logger.CWarnw(ctx, "could not find camera properties. obstacles depth started without camera's intrinsic parameters", "error", err)
return o.obsDepthNoIntrinsics(ctx, src)
}
if props.IntrinsicParams == nil {
logger.CWarn(ctx, "obstacles depth started but camera did not have intrinsic parameters")
return o.obsDepthNoIntrinsics(ctx, src)
}
o.intrinsics = props.IntrinsicParams
return o.obsDepthWithIntrinsics(ctx, src)
}
}
// buildObsDepthNoIntrinsics will return the median depth in the depth map as a Geometry point.
func (o *obsDepth) obsDepthNoIntrinsics(ctx context.Context, src camera.VideoSource) ([]*vision.Object, error) {
pic, release, err := camera.ReadImage(ctx, src)
if err != nil {
return nil, errors.Errorf("could not get image from %s", src)
}
defer release()
dm, err := rimage.ConvertImageToDepthMap(ctx, pic)
if err != nil {
return nil, errors.New("could not convert image to depth map")
}
depData := dm.Data()
if len(depData) == 0 {
return nil, errors.New("could not get info from depth map")
}
// Sort the depth data [smallest...largest]
sort.Slice(depData, func(i, j int) bool {
return depData[i] < depData[j]
})
med := int(0.5 * float64(len(depData)))
pt := spatialmath.NewPoint(r3.Vector{X: 0, Y: 0, Z: float64(depData[med])}, "")
toReturn := make([]*vision.Object, 1)
toReturn[0] = &vision.Object{Geometry: pt}
return toReturn, nil
}
// buildObsDepthWithIntrinsics will use the methodology in Manduchi et al. to find obstacle points
// before clustering and projecting those points into 3D obstacles.
func (o *obsDepth) obsDepthWithIntrinsics(ctx context.Context, src camera.VideoSource) ([]*vision.Object, error) {
// Check if we have intrinsics here. If not, don't even try
if o.intrinsics == nil {
return nil, errors.New("tried to build obstacles depth with intrinsics but no instrinsics found")
}
pic, release, err := camera.ReadImage(ctx, src)
if err != nil {
return nil, errors.Errorf("could not get image from %s", src)
}
defer release()
dm, err := rimage.ConvertImageToDepthMap(ctx, pic)
if err != nil {
return nil, errors.New("could not convert image to depth map")
}
cloud := depthadapter.ToPointCloud(dm, o.intrinsics)
return segmentation.ApplyERCCLToPointCloud(ctx, cloud, o.clusteringConf)
}