diff --git a/README.md b/README.md index bd7f54704..f8e052bc7 100644 --- a/README.md +++ b/README.md @@ -67,8 +67,8 @@ This project adheres to the [JuliaRobotics code of conduct](https://github.com/J [cjl-cov-url]: https://codecov.io/github/JuliaRobotics/Caesar.jl?branch=master [cjl-ci-dev-img]: https://github.com/JuliaRobotics/Caesar.jl/actions/workflows/ci.yml/badge.svg [cjl-ci-dev-url]: https://github.com/JuliaRobotics/Caesar.jl/actions/workflows/ci.yml -[cjl-ci-stb-img]: https://travis-ci.org/JuliaRobotics/Caesar.jl.svg?branch=release/v0.10 -[cjl-ci-stb-url]: https://travis-ci.org/JuliaRobotics/Caesar.jl +[cjl-ci-stb-img]: https://github.com/JuliaRobotics/Caesar.jl/actions/workflows/ci.yml/badge.svg +[cjl-ci-stb-url]: https://github.com/JuliaRobotics/Caesar.jl/actions/workflows/ci.yml [cjl-stbl-img]: https://juliahub.com/docs/Caesar/version.svg diff --git a/examples/dev/RoomPillarsModes.jl b/examples/dev/RoomPillarsModes.jl new file mode 100644 index 000000000..f3df18f04 --- /dev/null +++ b/examples/dev/RoomPillarsModes.jl @@ -0,0 +1,74 @@ +# quick scatter align test + +using Revise +using Caesar, Images +using GLMakie +using TensorCast + +## + +world_model = zeros(0,2) +offs = zeros(1,2) +for y in -9.5:1:9.5, i in 1:100 + offs[2] = y + world_model = vcat(world_model, 0.05*randn(1,2) + offs) +end + +m1 = deepcopy(world_model)[300:700,:] + +world_model = world_model[shuffle(1:size(world_model,1)), :] +m1 = m1[shuffle(1:size(m1,1)),:] + +## + + +scatter(world_model) + + +## + +fg = initfg() + +addVariable!(fg, :x0, Pose2) +addVariable!(fg, :x1, Pose2) + +addFactor!(fg, [:x0], PriorPose2(MvNormal([0;0;0.], [1 0 0; 0 1 0; 0 0 0.1]))) + + +@cast wm_[i][d] := world_model[i,d] +@cast m1_[i][d] := m1[i,d] + +wm__ = collect.(wm_) +m1__ = collect.(m1_) + +bw = [0.1;0.1] +cloud1=manikde!(Position2, wm__; bw) +cloud2=manikde!(Position2, m1__; bw) + +sap = ScatterAlignPose2(; + cloud1, + cloud2, + sample_count = 500 +) + +addFactor!(fg, [:x0; :x1], sap) + +## + +x1 = approxConv(fg, :x0x1f1, :x1) +xy = (s->s.x[1]).(x1) +scatter(hcat(xy...)' ) + + +## + + +nt = Caesar.overlayScatter(sap) +plotScatterAlign(nt) + +## + +@cast pts[i,d] := pts_[i][d] +scatter(pts) + +## \ No newline at end of file diff --git a/src/3rdParty/_PCL/services/ROSConversions.jl b/src/3rdParty/_PCL/services/ROSConversions.jl index 2fdac7af2..8fe5d8bf8 100644 --- a/src/3rdParty/_PCL/services/ROSConversions.jl +++ b/src/3rdParty/_PCL/services/ROSConversions.jl @@ -6,6 +6,7 @@ using .RobotOS +# FIXME, note this is not working, functions using Main. as workaround @rosimport std_msgs.msg: Header @rosimport sensor_msgs.msg: PointField @rosimport sensor_msgs.msg: PointCloud2 diff --git a/src/images/images.jl b/src/images/images.jl index ba3d36f17..6fedfb60e 100644 --- a/src/images/images.jl +++ b/src/images/images.jl @@ -16,14 +16,19 @@ Also see: [`toROSImage`](@ref) function toImage(msgd::Dict{String,Any}) data = haskey(msgd, "data_b64") ? base64decode(msgd["data_b64"]) : UInt8.(msgd["data"]) h, w = msgd["height"], msgd["width"] - - if msgd["encoding"] == "mono8" + @show h, w, msgd["step"] + if msgd["encoding"] == "mono8" || msgd["encoding"] == "8UC1" + # img_ = normedview(N0f8, data) + # reshape(img_, w, h)' img = Matrix{Gray{N0f8}}(undef, h, w) # assuming one endian type for now, TODO both little and big endian for i in 1:h, j in 1:w - img[i,j] = Gray{N0f8}(data[msgd["step"]*(i-1)+j]/255) + img[i,j] = Gray{N0f8}(data[msgd["step"]*(i-1)+j]/(2^8 - 1)) end img + elseif msgd["encoding"] == "mono16" || msgd["encoding"] == "16UC1" + img_ = normedview(N0f16, data) + reshape(img_, w, h)' else error("Conversion for ROS sensor_msgs.Image encoding not implemented yet $(msgd["encoding"])") end