diff --git a/.gitignore b/.gitignore index 5c3f2075b..cd7993fee 100644 --- a/.gitignore +++ b/.gitignore @@ -5,5 +5,6 @@ examples/lcmserver/lcmtypes/rome/ examples/lcmserver/rome/ examples/tracking/simpleradar/exports/* docs/build +docs/Manifest.toml results/* build diff --git a/docs/make.jl b/docs/make.jl index 410849a5a..b0942cda6 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -9,6 +9,8 @@ import IncrementalInference: fmcmc!, localProduct, prodmultiplefullpartials, pro import IncrementalInference: cliqGibbs, packFromLocalPotentials!, treeProductDwn, updateFGBT!, upGibbsCliqueDensity import IncrementalInference: initfg, downGibbsCliqueDensity import IncrementalInference: solveGraphParametric, solveGraphParametric! +import IncrementalInference: _solveCCWNumeric! +import IncrementalInference: initParametricFrom! using KernelDensityEstimatePlotting # import KernelDensityEstimatePlotting: plotKDE @@ -18,6 +20,7 @@ using RoMEPlotting using DistributedFactorGraphs import DistributedFactorGraphs: showFactor, showVariable import DistributedFactorGraphs: deleteVariable! +import DistributedFactorGraphs: loadDFG, loadDFG! makedocs( modules = [Caesar, RoME, IncrementalInference, RoMEPlotting, KernelDensityEstimatePlotting, DistributedFactorGraphs], diff --git a/docs/src/concepts/2d_plotting.md b/docs/src/concepts/2d_plotting.md index a0a44979b..4843deff4 100644 --- a/docs/src/concepts/2d_plotting.md +++ b/docs/src/concepts/2d_plotting.md @@ -166,7 +166,7 @@ plotPose ### Debug With Local Graph Product Plot -One useful function is to check that data in the factor graph makes sense. While the full inference algorithm uses a Bayes (Junction) tree to assemble marginal belief estimates in an efficient manner, it is often useful for a straight forward graph based sanity check. The [`plotLocalProduct`](@ref) projects through [`approxConv`](@ref) each of the factors connected to the target variable and plots the result. This example looks at the loop-closure point around `:x0`, which is also pinned down by the only prior in the canonical Hexagonal factor graph. +One useful function is to check that data in the factor graph makes sense. While the full inference algorithm uses a Bayes (Junction) tree to assemble marginal belief estimates in an efficient manner, it is often useful for a straight forward graph based sanity check. The [`plotLocalProduct`](@ref) projects through [`approxConvBelief`](@ref) each of the factors connected to the target variable and plots the result. This example looks at the loop-closure point around `:x0`, which is also pinned down by the only prior in the canonical Hexagonal factor graph. ```julia @show ls(fg, :x0); # ls(fg, :x0) = [:x0f1, :x0x1f1, :x0l1f1] diff --git a/docs/src/concepts/arena_visualizations.md b/docs/src/concepts/arena_visualizations.md index 8407d4bcd..acf8ccbef 100644 --- a/docs/src/concepts/arena_visualizations.md +++ b/docs/src/concepts/arena_visualizations.md @@ -1,4 +1,4 @@ -# [Visualization 3D](@ref visualization_3d) +# [Visualization 3D](@id visualization_3d) ## Introduction diff --git a/docs/src/concepts/available_varfacs.md b/docs/src/concepts/available_varfacs.md index 1722dfd44..c2e2a3f2b 100644 --- a/docs/src/concepts/available_varfacs.md +++ b/docs/src/concepts/available_varfacs.md @@ -21,8 +21,7 @@ The variables and factors in Caesar should be sufficient for a variety of roboti Default variables in IncrementalInference ```@docs -ContinuousScalar -ContinuousEuclid{N} +Position{N} ``` ### 2D Variables @@ -40,7 +39,6 @@ DynPose2 ```@docs Point3 Pose3 -InertialPose3 ``` !!! note @@ -102,10 +100,10 @@ VelPose2VelPose2 DynPose2Pose2 Pose3Pose3 PriorPose3ZRP -PartialPriorRollPitchZ -PartialPose3XYYaw Pose3Pose3XYYaw ``` + + # Extending Caesar with New Variables and Factors diff --git a/docs/src/concepts/building_graphs.md b/docs/src/concepts/building_graphs.md index 593e825b4..5c3bf41ee 100644 --- a/docs/src/concepts/building_graphs.md +++ b/docs/src/concepts/building_graphs.md @@ -1,4 +1,4 @@ -# [Building Graphs](@ref building_graphs) +# [Building Graphs](@id building_graphs) Irrespective of your application - real-time robotics, batch processing of survey data, or really complex multi-hypothesis modeling - you're going to need to add factors and variables to a graph. This section discusses how to do that in Caesar. @@ -53,13 +53,7 @@ addVariable! deleteVariable! ``` -### Initializing Variables - -The MM-iSAMv2 algorithm uses one of two approaches to automatically initialize variables. The `initManual!` function can be used if you wish to overwrite or pre-empt this initialization. - -```@docs -initManual! -``` +The MM-iSAMv2 algorithm uses one of two approaches to automatically initialize variables, or can be [initialized manually](@ref variable_init). ## Factors diff --git a/docs/src/concepts/dataassociation.md b/docs/src/concepts/dataassociation.md index e899beda7..89fd3e79c 100644 --- a/docs/src/concepts/dataassociation.md +++ b/docs/src/concepts/dataassociation.md @@ -1,4 +1,4 @@ -# Data Association and Hypotheses +# [Data Association and Hypotheses](@id data_multihypo) Ambiguous data and processing often produce complicated data association situations. In SLAM, loop-closures are a major source of concern when developing autonomous subsystems or behaviors. To illustrate this point, consider the two scenarios depicted below: diff --git a/docs/src/concepts/entry_data.md b/docs/src/concepts/entry_data.md index 02d8fbaf6..e3a754f9b 100644 --- a/docs/src/concepts/entry_data.md +++ b/docs/src/concepts/entry_data.md @@ -140,7 +140,4 @@ addData!(dfg,:default_folder_store,:x0,:nnModel, ## Experimental Features -Loading images is a relatively common task, hence a convenience function has been developed: -```@docs -Caesar.fetchDataImage -``` +Loading images is a relatively common task, hence a convenience function has been developed, when `using ImageMagick` try `Caesar.fetchDataImage`. \ No newline at end of file diff --git a/docs/src/concepts/interacting_fgs.md b/docs/src/concepts/interacting_fgs.md index 68aec0978..c43eb3927 100644 --- a/docs/src/concepts/interacting_fgs.md +++ b/docs/src/concepts/interacting_fgs.md @@ -32,7 +32,7 @@ loadDFG! A later part of the documentation will show [how to include a `Entry=>Data` blob store](https://juliarobotics.org/Caesar.jl/latest/concepts/entry_data/). -## Querying the FactorGraph +## [Querying the Graph](@id querying_graph) ### List Variables: @@ -134,7 +134,7 @@ It is also possible to sample the above belief objects for more samples: pts = rand(X0, 200) ``` -## Building On-Manifold KDEs +## [Building On-Manifold KDEs](@id build_manikde) These kernel density belief objects can be constructed from points as follows: ```julia @@ -166,5 +166,5 @@ joinLogPath ```@docs getFactorDim -getManifolds +getManifold ``` \ No newline at end of file diff --git a/docs/src/concepts/solving_graphs.md b/docs/src/concepts/solving_graphs.md index 48bb12ae3..06e16fd8c 100644 --- a/docs/src/concepts/solving_graphs.md +++ b/docs/src/concepts/solving_graphs.md @@ -14,7 +14,7 @@ The returned Bayes (Junction) `tree` object is described in more detail on [a de solveTree! ``` -## Automatic vs Manual Init +## [Automatic vs Manual Init](@id variable_init) Currently the main automatic initialization technique used by IncrementalInference.jl by delayed propagation of belief on the factor graph. This can be globally or locally controlled via: ```julia diff --git a/docs/src/concepts/using_manifolds.md b/docs/src/concepts/using_manifolds.md index d1a410071..ea4c9eec3 100644 --- a/docs/src/concepts/using_manifolds.md +++ b/docs/src/concepts/using_manifolds.md @@ -6,7 +6,7 @@ The Community has been developing high quality [documentation for Manifolds.jl]( ## Separate Manifold Beliefs Page -Follow these hyperlinks if you are looking for information on working with Manifold Beliefs or [`ManifoldKernelDensity`s](@ref manikde_page) +See [`building a Manifold Kernel Density`](@ref build_manikde) or for more information. ## Why Manifolds.jl diff --git a/docs/src/dev/known_issues.md b/docs/src/dev/known_issues.md index 2eb4e95b3..60ef09e03 100644 --- a/docs/src/dev/known_issues.md +++ b/docs/src/dev/known_issues.md @@ -10,7 +10,7 @@ This page is used to list known issues: ### Install 3D Visualization Utils (e.g. Arena.jl) 3D Visualizations are provided by [Arena.jl](https://github.com/JuliaRobotics/Arena.jl) as well as development package Amphitheater.jl. -Please follow instructions on the [Visualizations page](concepts/arena_visualizations.md) for a variety of 3D utilities. +Please follow instructions on the [Visualizations page](../concepts/arena_visualizations.md) for a variety of 3D utilities. !!! note Arena.jl and Amphitheater.jl are currently being refactored as part of the broader DistributedFactorGraph migration, the features are are in beta stage (1Q2020). diff --git a/docs/src/examples/adding_variables_factors.md b/docs/src/examples/adding_variables_factors.md index cb0573fa5..774133eb6 100644 --- a/docs/src/examples/adding_variables_factors.md +++ b/docs/src/examples/adding_variables_factors.md @@ -4,7 +4,7 @@ A couple of important points: * You **do not need to** modify or insert your new code into Caesar/RoME/IncrementalInference source code libraries -- they can be created and run anywhere on-the-fly! * As long as the factors exist in the working space when the solver is run, the factors are automatically used -- this is possible due to Julia's [multiple dispatch design](https://docs.julialang.org/en/v1/manual/methods/index.html) * Caesar.jl is designed to allow you to add new variables and factors to your own independent repository and incorporate them at will at compile-time or even run-time -* Residual function definitions for new factors types use a [callable struct (a.k.a functor) architecture](@ref custom_residual_factor) to simultaneously allow: +* Residual function definitions for new factors types use a [callable struct (a.k.a functor) architecture](@ref custom_relative_factor) to simultaneously allow: * Multiple dispatch (i.e. 'polymorphic' behavior) * Meta-data and in-place memory storage for advanced and performant code * An outside callback implementation style @@ -17,13 +17,12 @@ A couple of important points: All factors inherit from one of the following types, depending on their function: -* AbstractPrior: AbstractPrior are priors (unary factors) that provide an absolute constraint for a single variable. A simple example of this is an absolute GPS prior, or equivalently a (0, 0, 0) starting location in a [`Pose2`](@ref) scenario. - * Requires: A getSample function -* IIF.AbstractRelativeMinimize: IIF.AbstractRelativeMinimize are relative factors that introduce an algebraic relationship between two or more variables. A simple example of this is an odometry factor between two pose variables, or a range factor indicating the range between a pose and another variable. - * Requires: A getSample function and a residual function definition +* `AbstractPrior` is for priors (unary factors) that provide an absolute constraint for a single variable. A simple example of this is an absolute GPS prior, or equivalently a (0, 0, 0) starting location in a [`Pose2`](@ref) scenario. + * Requires: A `getSample` function +* `AbstractRelativeMinimize` uses Optim.jl and is for relative factors that introduce an algebraic relationship between two or more variables. A simple example of this is an odometry factor between two pose variables, or a range factor indicating the range between a pose and another variable. + * Requires: A `getSample` function and a residual function definition * The minimize suffix specifies that the residual function of this factor will be enforced by numerical minimization (find me the minimum of this function) -* IIF.AbstractRelativeRoots: IIF.AbstractRelativeRoots are relative factors that introduce algebraic relationships between two or more variables. They are the same as IIF.AbstractRelativeMinimize, however they use root finding to find the zero crossings (rather than numerical minimization). - * Requires: A getSample function and a residual function definition +* [NEW] `AbstractManifoldMinimize` uses [Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl). How do you decide which to use? * If you are creating factors for world-frame information that will be tied to a single variable, inherit from `<:AbstractPrior` @@ -33,16 +32,14 @@ How do you decide which to use? TBD: Users should start with IIF.AbstractRelativeMinimize, discuss why and when they should promote their factors to IIF.AbstractRelativeRoots. !!! note - `IIF.AbstractRelativeMinimize` does not imply that the overall inference algorithm only minimizes an objective function. The MM-iSAM algorithm is built around fixed-point analysis. Minimization is used here to locally enforce the residual function. + `AbstractRelativeMinimize` does not imply that the overall inference algorithm only minimizes an objective function. The MM-iSAM algorithm is built around fixed-point analysis. Minimization is used here to locally enforce the residual function. What you need to build in the new factor: * A struct for the factor itself * A sampler function to return measurements from the random ditributions -* If you are building a [`IIF.AbstractRelativeMinimize`](@ref) or a [`IIF.AbstractRelativeRoots`](@ref) you need to define a residual function to introduce the relative algebraic relationship between the variables +* If you are building a `<:AbstractRelative` you need to define a residual function to introduce the relative algebraic relationship between the variables * Minimization function should be lower-bounded and smooth * A packed type of the factor which must be named Packed[Factor name], and allows the factor to be packed/transmitted/unpacked * Serialization and deserialization methods * These are convert functions that pack and unpack the factor (which may be highly complex) into serialization-compatible formats - * As the factors are mostly comprised of distributions (of type [`SamplableBelief`](@ref)), functions are provided to pack and unpack the distributions: - * Packing: To convert from a [`SamplableBelief`](@ref) to a serializable obhect, use `convert(PackedSamplableBelief, ::SamplableBelief)` - * Unpacking: To convert from string back to a `SamplableBelief`, use `convert(SamplableBelief, ::PackedSamplableBelief)` + * As the factors are mostly comprised of distributions (of type `SampleableBelief`), while `JSON3.jl`` is used for serialization. diff --git a/docs/src/examples/basic_definingfactors.md b/docs/src/examples/basic_definingfactors.md index c461a3912..02549acf6 100644 --- a/docs/src/examples/basic_definingfactors.md +++ b/docs/src/examples/basic_definingfactors.md @@ -49,8 +49,8 @@ This new prior can now readily be added to an ongoing factor graph: ```julia # lets generate a random nonparametric belief -pts = [samplePoint(getManifold(ContinuousEuclid{1}), Normal(8.0,2.0)) for _=1:75] -someBelief = manikde!(ContinuousEuclid{1}, pts) +pts = [samplePoint(getManifold(Position{1}), Normal(8.0,2.0)) for _=1:75] +someBelief = manikde!(Position{1}, pts) # and build your new factor as an object myprior = MyPrior(someBelief) @@ -61,7 +61,10 @@ and add it to the existing factor graph from earlier, lets say: addFactor!(fg, [:x1], myprior) ``` -Thats it, this factor is now part of the graph. This should be a solvable graph: +!!! note + Variable types `Postion{1}` or `ContinuousEuclid{1}` are algebraically equivalent. + +That's it, this factor is now part of the graph. This should be a solvable graph: ```julia solveGraph!(fg); # exact alias of solveTree!(fg) ``` diff --git a/docs/src/examples/canonical_graphs.md b/docs/src/examples/canonical_graphs.md index 2ff9f35a5..a6336ba9a 100644 --- a/docs/src/examples/canonical_graphs.md +++ b/docs/src/examples/canonical_graphs.md @@ -11,8 +11,8 @@ RoME.generateGraph_Circle RoME.generateGraph_ZeroPose RoME.generateGraph_Hexagonal RoME.generateGraph_Beehive! -RoME.generateGraph_Honeycomb! RoME.generateGraph_Helix2D! RoME.generateGraph_Helix2DSlew! RoME.generateGraph_Helix2DSpiral! -``` \ No newline at end of file +``` + diff --git a/docs/src/examples/deadreckontether.md b/docs/src/examples/deadreckontether.md index 49cfae014..373258e96 100644 --- a/docs/src/examples/deadreckontether.md +++ b/docs/src/examples/deadreckontether.md @@ -20,7 +20,7 @@ Overview of related functions while this documentation is being expanded: ## DRT Construct -The idea is that the dead reckong tracking method is to update a single value based on high-rate sensor data. Perhaps 'particles' values can be propagated as a non-Gaussian prediction, depending on allowable compute resources, and for that see [`approxConv`](@ref). Some specialized plumbing has been built to facilitate rapid single value propagation using the factor graph. +The idea is that the dead reckong tracking method is to update a single value based on high-rate sensor data. Perhaps 'particles' values can be propagated as a non-Gaussian prediction, depending on allowable compute resources, and for that see [`approxConvBelief`](@ref). Some specialized plumbing has been built to facilitate rapid single value propagation using the factor graph. ### Suppress w/ `solvable` diff --git a/docs/src/examples/examples.md b/docs/src/examples/examples.md index d16c263ff..83685eb2a 100644 --- a/docs/src/examples/examples.md +++ b/docs/src/examples/examples.md @@ -71,11 +71,11 @@ This example presents a novel multimodal solution to an otherwise intractible mu

``` -The fractional multi-hypothesis assignments `addFactor!(..., multihypo=[1.0; 0.5;0.5])`. The [Multihypothesis](@ref) Section discusses this feature in more detail. Similarly for tri-nary or higher multi-hypotheses. +The fractional multi-hypothesis assignments `addFactor!(..., multihypo=[1.0; 0.5;0.5])`. Similarly for tri-nary or higher multi-hypotheses. ### Probabilistic Data Association (Uncertain loop closures) -Example where the standard multihypothesis `addFactor!(.., multihypo=[1.0;0.5;0.5])` interface is used. This is from the Kitti driving dataset. [Video here](https://www.youtube.com/watch?v=9hEonD8KDrs). The [Multihypothesis](@ref) Section discusses this feature in more detail. +Example where the standard multihypothesis `addFactor!(.., multihypo=[1.0;0.5;0.5])` interface is used. This is from the Kitti driving dataset. [Video here](https://www.youtube.com/watch?v=9hEonD8KDrs). The [data association and multihypothesis](@ref data_multihypo) section discusses this feature in more detail. ```@raw html IMAGE ALT TEXT HERE diff --git a/docs/src/examples/parametric_solve.md b/docs/src/examples/parametric_solve.md index c3f36668d..1f37729ee 100644 --- a/docs/src/examples/parametric_solve.md +++ b/docs/src/examples/parametric_solve.md @@ -21,15 +21,9 @@ initParametricFrom! Factor that supports a parametric solution, with supported distributions (such as `Normal` and `MvNormal`), can be used in a parametric batch solver `solveGraphParametric`. -### `getParametricMeasurement` +### `getMeasurementParametric` -Parameteric calculations require the mean and covariance from Gaussian measurement functions (factors) using the function - -```@docs -IncrementalInference.getParametricMeasurement -``` - -`getParametricMeasurement` defaults to looking for a supported distribution in field `.Z` followed by `.z`. Therefore, if the factor uses this fieldname, `getParametricMeasurement` does not need to be extended. You can extend by simply implementing, for example, your own `IncrementalInference.getParametricMeasurement(f::OtherFactor) = m.density`. +Parameteric calculations require the mean and covariance from Gaussian measurement functions (factors) using the `getMeasurementParametric` `getMeasurementParametric` defaults to looking for a supported distribution in field `.Z` followed by `.z`. Therefore, if the factor uses this fieldname, `getMeasurementParametric` does not need to be extended. You can extend by simply implementing, for example, your own `IncrementalInference.getMeasurementParametric(f::OtherFactor) = m.density`. For this example, the `Z` field will automatically be detected used by default for `MyFactor` from above. @@ -39,10 +33,10 @@ struct MyFactor{T <: SamplableBelief} <: IIF.AbstractRelativeRoots end ``` -An example of where implementing `getParametricMeasurement` is needed can be found in the RoME factor [`Pose2Point2BearingRange`](@ref) +An example of where implementing `getMeasurementParametric` is needed can be found in the RoME factor [`Pose2Point2BearingRange`](@ref) ```julia -import getParametricMeasurement -function getParametricMeasurement(s::Pose2Point2BearingRange{<:Normal, <:Normal}) +import getMeasurementParametric +function getMeasurementParametric(s::Pose2Point2BearingRange{<:Normal, <:Normal}) meas = [mean(s.bearing), mean(s.range)] iΣ = [1/var(s.bearing) 0; diff --git a/docs/src/examples/using_pcl.md b/docs/src/examples/using_pcl.md index 105d9b0cd..87ae219a3 100644 --- a/docs/src/examples/using_pcl.md +++ b/docs/src/examples/using_pcl.md @@ -34,11 +34,11 @@ Strong integration between PCL and [ROS](http://www.ros.org) predominantly throu These have been integrated through conversions to equivalent Julian types already listed above. ROS conversions requires RobotOS.jl be loaded, see page on using [ROS Direct](@ref ros_direct). ```@docs -_PCL.PCLPointCloud2 _PCL.PointXYZ _PCL.Header _PCL.PointField _PCL.FieldMapper +_PCL.PCLPointCloud2 ``` ## Aligning Point Clouds diff --git a/docs/src/func_ref.md b/docs/src/func_ref.md index 5eddda071..7de656e0a 100644 --- a/docs/src/func_ref.md +++ b/docs/src/func_ref.md @@ -22,17 +22,14 @@ areCliqVariablesAllMarginalized attemptTreeSimilarClique childCliqs cliqHistFilterTransitions -csmAnimate cycleInitByVarOrder! doautoinit! drawCliqSubgraphUpMocking fifoFreeze! filterHistAllToArray -findRelatedFromPotential fmcmc! getClique getCliqAllVarIds -getCliqAllVarSyms getCliqAssocMat getCliqDepth getCliqDownMsgsAfterDownSolve @@ -45,8 +42,6 @@ getCliqVarIdsPriors getCliqVarSingletons getParent getTreeAllFrontalSyms -getVariableDim -getVariableInferredDim hasClique isInitialized isMarginalized @@ -57,19 +52,13 @@ makeCsmMovie parentCliq predictVariableByFactor printCliqHistorySummary -prodmultiplefullpartials -prodmultipleonefullpartials -resetBuildTreeFromOrder! resetCliqSolve! resetData! resetTreeCliquesForUpSolve! resetVariable! -sandboxCliqResolveStep setfreeze! setValKDE! setVariableInitialized! -setVariableInferDim! -solveCliq! solveCliqWithStateMachine! transferUpdateSubGraph! treeProductDwn diff --git a/docs/src/introduction.md b/docs/src/introduction.md index 6193dbd43..a2adfcd0b 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -13,14 +13,14 @@ Caesar.jl addresses numerous issues that arise in prior SLAM solutions, includin * Solving under-defined systems, * Inference with non-Gaussian measurements, * Standard features for natively handling ambiguous data association and multi-hypotheses, - * Native multi-modal (hypothesis) representation in the factor-graph, see [Data Association and Hypotheses](@ref): + * Native multi-modal (hypothesis) representation in the factor-graph, see [Data Association and Hypotheses](@ref data_multihypo): * Multi-modal and non-parametric representation of constraints; * Gaussian distributions are but one of the many representations of measurement error; * Simplifying bespoke factor development, * Centralized (or peer-to-peer decentralized) factor-graph persistence, * i.e. Federated multi-session/agent reduction. * Multi-CPU inference. -* Out-of-library extendable for [Creating New Variables and Factors](@ref); +* Out-of-library extendable for [Custom New Variables](@ref custom_variables) and [Factors](@ref custom_prior_factor); * Natively supports legacy Gaussian parametric and max-mixtures solutions; * Local in-memory solving on the device as well as database-driven centralized solving (micro-service architecture); * Natively support *Clique Recycling* (i.e. fixed-lag out-marginalization) for continuous operation as well as off-line batch solving, see more at [Using Incremental Updates (Clique Recycling I)](@ref); diff --git a/docs/src/principles/bayestreePrinciples.md b/docs/src/principles/bayestreePrinciples.md index c444785c9..3d7d488c6 100644 --- a/docs/src/principles/bayestreePrinciples.md +++ b/docs/src/principles/bayestreePrinciples.md @@ -64,7 +64,7 @@ buildTreeReset!(fg, vo) ``` !!! note - a list of variables or factors can be obtained through the `ls` and related functions, see [Querying the FactorGraph](@ref). + a list of variables or factors can be obtained through the `ls` and related functions, see [Querying the Factor Graph](@ref querying_graph). ## Interfacing with the MM-iSAMv2 Solver @@ -79,7 +79,7 @@ tree = solveTree!(fg) ``` !!! note - See the [Solving Graphs](@ref) section for more details on the solver. + See the [Solving Graphs](@ref solving_graphs) section for more details on the solver. ### Get the Elimination Order Used diff --git a/ext/Pose2AprilTag4Corners.jl b/ext/Pose2AprilTag4Corners.jl index 39ef66040..99226755f 100644 --- a/ext/Pose2AprilTag4Corners.jl +++ b/ext/Pose2AprilTag4Corners.jl @@ -258,28 +258,6 @@ end ## calibrate via preimage -""" - $SIGNATURES - -Helper function to generate and calculate the aggregate cost indicative of the discrepancy between -the deconvolution prediction and prescribed measurement of mutliple `Pose2AprilTag4Corners` factors -in the factor graph. - -The idea is that a bad calibration will give some kind of SLAM result, and if there is enough information -then the SLAM result can be used to bootstrap better and better calibration estimates of the camera that -was used to capture the AprilTag sightings. This function is meant to help do that secondary parameter -search inside a factor graph objection, after a regular solution has been found. - -Notes -- `pred, _ = approxDeconv(dfg, fct)` -- `fct.preimage[1](pred[:,idx], [f_width, f_height, c_width, c_height, taglength])` - - `fct.preimage[1]` is a function to find the preimage. -- `obj = (fc_wh) -> fct.preimage[1](pred[:,idx], fc_wh)` - - `fc_wh = [f_width, f_height, c_width, c_height, 0.172]` -- `obj2 = (fcwh) -> obj([fcwh[1]; fcwh[1]; fcwh[2]; c_height; taglength])` -- `result = Optim.optimize(obj, fct.preimage[2], BFGS(), Optim.options(x_tol=1e-8))` - - A stored starting estimate for optimization `fct.preimage[2]` -""" function generateCostAprilTagsPreimageCalib(dfg::AbstractDFG, fsyms::Vector{Symbol}=lsf(dfg, Pose2AprilTag4Corners ); idx::Int = 1, # the sample number diff --git a/ext/WeakdepsPrototypes.jl b/ext/WeakdepsPrototypes.jl index 36ea02fcd..8fd9df17a 100644 --- a/ext/WeakdepsPrototypes.jl +++ b/ext/WeakdepsPrototypes.jl @@ -6,6 +6,29 @@ export Pose2AprilTag4Corners, PackedPose2AprilTag4Corners export generateCostAprilTagsPreimageCalib function drawBearingLinesAprilTags! end + +""" + $SIGNATURES + +Helper function to generate and calculate the aggregate cost indicative of the discrepancy between +the deconvolution prediction and prescribed measurement of mutliple `Pose2AprilTag4Corners` factors +in the factor graph. + +The idea is that a bad calibration will give some kind of SLAM result, and if there is enough information +then the SLAM result can be used to bootstrap better and better calibration estimates of the camera that +was used to capture the AprilTag sightings. This function is meant to help do that secondary parameter +search inside a factor graph objection, after a regular solution has been found. + +Notes +- `pred, _ = approxDeconv(dfg, fct)` +- `fct.preimage[1](pred[:,idx], [f_width, f_height, c_width, c_height, taglength])` + - `fct.preimage[1]` is a function to find the preimage. +- `obj = (fc_wh) -> fct.preimage[1](pred[:,idx], fc_wh)` + - `fc_wh = [f_width, f_height, c_width, c_height, 0.172]` +- `obj2 = (fcwh) -> obj([fcwh[1]; fcwh[1]; fcwh[2]; c_height; taglength])` +- `result = Optim.optimize(obj, fct.preimage[2], BFGS(), Optim.options(x_tol=1e-8))` + - A stored starting estimate for optimization `fct.preimage[2]` +""" function generateCostAprilTagsPreimageCalib end ## ============================================== diff --git a/src/3rdParty/_PCL/entities/PCLTypes.jl b/src/3rdParty/_PCL/entities/PCLTypes.jl index f03fc1f15..f47bc5ea5 100644 --- a/src/3rdParty/_PCL/entities/PCLTypes.jl +++ b/src/3rdParty/_PCL/entities/PCLTypes.jl @@ -134,7 +134,12 @@ Base.@kwdef struct Header frame_id::String = "" end -# https://pointclouds.org/documentation/structpcl_1_1_p_c_l_point_field.html +""" + $TYPEDEF + +How a point is stored in memory. +- https://pointclouds.org/documentation/structpcl_1_1_p_c_l_point_field.html +""" Base.@kwdef struct PointField """ name of field """ name::String @@ -165,7 +170,13 @@ end # https://pointclouds.org/documentation/common_2include_2pcl_2point__cloud_8h_source.html#l00072 const MsgFieldMap = Vector{FieldMapping} -# https://docs.ros.org/en/hydro/api/pcl/html/conversions_8h_source.html#l00091 +""" +$TYPEDEF + +Which field values to store and how to map them to values during serialization. + +- https://docs.ros.org/en/hydro/api/pcl/html/conversions_8h_source.html#l00091 +""" Base.@kwdef struct FieldMapper{T<:PointT} fields_::Vector{<:PointField} = Vector{PointField}() map_::Vector{<:FieldMapping} = Vector{FieldMapping}() @@ -182,7 +193,7 @@ References: - https://pointclouds.org/documentation/classpcl_1_1_point_cloud.html - https://pointclouds.org/documentation/common_2include_2pcl_2point__cloud_8h_source.html -See also: [`Caesar._PCL.toROSPointCloud2`](@ref) +See also: `Caesar._PCL.toROSPointCloud2` """ Base.@kwdef struct PCLPointCloud2 """ the point cloud header """ diff --git a/src/images/ROSConversions.jl b/src/images/ROSConversions.jl index 397d272de..e061ab446 100644 --- a/src/images/ROSConversions.jl +++ b/src/images/ROSConversions.jl @@ -32,7 +32,7 @@ toImage(msg::Main.sensor_msgs.msg.Image) = unmarshal(msg) |> toImage Convert `Caesar.Image::Dict` type to ROS message `sensor_msgs.msg.Image`. -See also: [`Caesar.unmarshal`](@ref), [`Caesar.toImage`](@ref), [`Caesar._PCL.toROSPointCloud2`](@ref) +See also: [`Caesar.unmarshal`](@ref), [`Caesar.toImage`](@ref), `Caesar._PCL.toROSPointCloud2` """ function toROSImage(msgd::Dict{String,Any}) header = Main.std_msgs.msg.Header();