In contrast to humans who use many types of manipulation to accomplish daily tasks and can easily sequence and execute pertinent actions, robots are confined to simple tasks that are often painstakingly broken down by the humans who operate those robots. Towards the goal of increasing robot autonomy, this project will extend the capabilities of manipulation planners. For the purposes of this research, manipulation planning is the domain in-between classical motion planning and what is often called task and motion planning, which includes temporal reasoning and high-order logics. This research adopts a constraint-centric view and defines a set of low-dimensional subspaces, or modes, amongst which the system must transition. The definition of transitions is also constraint-centric and is only possible because of the unified approach used when considering modes. The work depends on constructs from differential geometry and the use of powerful motion planners. It adopts a synergistic layered scheme where a discrete planner decides the sequence of modes while being constantly informed by a continuous planner that attempts the transitions between modes. The work will start with a specific but general type of constraints, manifold constraints, and later expand to other types. The proposed research will identify the limits of using constraints as a unifying construct in manipulation planning and in doing so, it will also allow for the incorporation of manipulation-specific primitives that can extend the framework.
This work has been supported by grant NSF RI 2008720.
@article{lee2022-apes, abstract = {Sampling-based motion planners are widely used for motion planning with high-dof robots. These planners generally rely on a uniform distribution to explore the search space. Recent work has explored learning biased sampling distributions to improve the time efficiency of these planners. However, learning such distributions is challenging, since there is no direct connection between the choice of distributions and the performance of the downstream planner. To alleviate this challenge, this paper proposes APES, a framework that learns sampling distributions optimized directly for the planner's performance. This is done using a critic, which serves as a differentiable surrogate objective modeling the planner's performance - thus allowing gradients to circumvent the non-differentiable planner. Leveraging the differentiability of the critic, we train a generator, which outputs sampling distributions optimized for the given problem instance. We evaluate APES on a series of realistic and challenging high-dof manipulation problems in simulation. Our experimental results demonstrate that APES can learn high-quality distributions that improve planning performance more than other biased sampling baselines.}, title = {Adaptive Experience Sampling for Motion Planning using the Generator-Critic Framework}, journal = {IEEE Robotics and Automation Letters}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, author = {Lee, Yiyuan and Chamzas, Constantinos and E. Kavraki, Lydia}, year = {2022}, month = jul, keywords = {fundamentals of sampling-based motion planning} }
@inproceedings{pan2022failing-execution, abstract = {Future robotic deployments will require robots to be able to repeatedly solve a variety of tasks in application domains. Task and motion planning addresses complex robotic problems that combine discrete reasoning over states and actions and geometric interactions during action executions. Moving beyond deterministic settings, stochastic actions can be handled by modeling the problem as a Markov Decision Process. The underlying probabilities however are typically hard to model since failures might be caused by hardware imperfections, sensing noise, or physical interactions. We propose a framework to address a task and motion planning setting where actions can fail during execution. To achieve a task goal actions need to be computed and executed despite failures. The robot has to infer which actions are robust and for each new problem effectively choose a solution that reduces expected execution failures. The key idea is to continually recover and refine the underlying beliefs associated with actions across multiple different problems in the domain. Our proposed method can find solutions that reduce the expected number of discrete, executed actions. Results in physics-based simulation indicate that our method outperforms baseline replanning strategies to deal with failing executions}, author = {Pan, Tianyang and Wells, Andrew M. and Shome, Rahul and Kavraki, Lydia E.}, keywords = {task and motion planning}, booktitle = {2022 International Conference on Robotics and Automation (ICRA)}, month = may, publisher = {IEEE}, title = {Failure is an option: Task and Motion Planning with Failing Executions}, year = {2022} }
@inproceedings{quintero-chamzas2022-blind, abstract = {Motion planning is a core problem in robotics, with a range of existing methods aimed to address its diverse set of challenges. However, most existing methods rely on complete knowledge of the robot environment; an assumption that seldom holds true due to inherent limitations of robot perception. To enable tractable motion planning for high-DOF robots under partial observability, we introduce BLIND, an algorithm that leverages human guidance. BLIND utilizes inverse reinforcement learning to derive motion-level guidance from human critiques. The algorithm overcomes the computational challenge of reward learning for high-DOF robots by projecting the robot’s continuous configuration space to a motion-planner-guided discrete task model. The learned reward is in turn used as guidance to generate robot motion using a novel motion planner. We demonstrate BLIND using the Fetch robot an dperform two simulation experiments with partial observability. Our experiments demonstrate that, despite the challenge of partial observability and high dimensionality, BLIND is capable of generating safe robot motion and outperforms baselines on metrics of teaching efficiency, success rate, and path quality.}, author = {Quintero-Pe{\~n}a, Carlos and Chamzas, Constantinos and Sun, Zhanyi and Unhelkar, Vaibhav and Kavraki, Lydia E}, keywords = {uncertainty}, booktitle = {2022 International Conference on Robotics and Automation (ICRA)}, month = may, publisher = {IEEE}, title = {Human-Guided Motion Planning in Partially Observable Environments}, year = {2022} }
@article{chamzas2022-motion-bench-maker, abstract = {Recently, there has been a wealth of development in motion planning for robotic manipulationnew motion planners are continuously proposed, each with its own unique set of strengths and weaknesses. However, evaluating these new planners is challenging, and researchers often create their own ad-hoc problems for benchmarking, which is time-consuming, prone to bias, and does not directly compare against other state-of-the-art planners. We present MotionBenchMaker, an open-source tool to generate benchmarking datasets for realistic robot manipulation problems. MotionBenchMaker is designed to be an extensible, easy-to-use tool that allows users to both generate datasets and benchmark them by comparing motion planning algorithms. Empirically, we show the benefit of using MotionBenchMaker as a tool to procedurally generate datasets which helps in the fair evaluation of planners. We also present a suite of over 40 prefabricated datasets, with 5 different commonly used robots in 8 environments, to serve as a common ground for future motion planning research.}, title = {MotionBenchMaker: A Tool to Generate and Benchmark Motion Planning Datasets}, volume = {7}, number = {2}, pages = {882–889}, issn = {2377-3766}, doi = {10.1109/LRA.2021.3133603}, journal = {IEEE Robotics and Automation Letters}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, author = {Chamzas, Constantinos and Quintero-Pe{\~n}a, Carlos and Kingston, Zachary and Orthey, Andreas and Rakita, Daniel and Gleicher, Michael and Toussaint, Marc and E. Kavraki, Lydia}, year = {2022}, month = apr, url = {https://dx.doi.org/10.1109/LRA.2021.3133603} }
@inproceedings{sobti2021-complex-motor-actions, abstract = {We present a framework for planning complex motor actions such as pouring or scooping from arbitrary start states in cluttered real-world scenes. Traditional approaches to such tasks use dynamic motion primitives (DMPs) learned from human demonstrations. We enhance a recently proposed state-of-the-art DMP technique capable of obstacle avoidance by including them within a novel hybrid framework. This complements DMPs with sampling-based motion planning algorithms, using the latter to explore the scene and reach promising regions from which a DMP can successfully complete the task. Experiments indicate that even obstacle-aware DMPs suffer in task success when used in scenarios which largely differ from the trained demonstration in terms of the start, goal, and obstacles. Our hybrid approach significantly outperforms obstacle-aware DMPs by successfully completing tasks in cluttered scenes for a pouring task in simulation. We further demonstrate our method on a real robot for pouring and scooping tasks.}, author = {Sobti, Shlok and Shome, Rahul and Chaudhuri, Swarat and Kavraki, Lydia E.}, booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent Robots and Systems}, month = sep, title = {{A Sampling-based Motion Planning Framework for Complex Motor Actions}}, year = {2021}, keywords = {Motion and Path Planning, Manipulation Planning, Learning from Demonstration } }
@inproceedings{shome2021-bundle-of-edges, abstract = {Using sampling to estimate the connectivity of high-dimensional configuration spaces has been the theoretical underpinning for effective sampling-based motion planners. Typical strategies either build a roadmap, or a tree as the underlying search structure that connects sampled configurations, with a focus on guaranteeing completeness and optimality as the number of samples tends to infinity. Roadmap-based planners allow preprocessing the space, and can solve multiple kinematic motion planning problems, but need a steering function to connect pairwise-states. Such steering functions are difficult to define for kinodynamic systems, and limit the applicability of roadmaps to motion planning problems with dynamical systems. Recent advances in the analysis of single-query tree-based planners has shown that forward search trees based on random propagations are asymptotically optimal. The current work leverages these recent results and proposes a multi-query framework for kinodynamic planning. Bundles of kinodynamic edges can be sampled to cover the state space before the query arrives. Then, given a motion planning query, the connectivity of the state space reachable from the start can be recovered from a forward search tree reasoning about a local neighborhood of the edge bundle from each tree node. The work demonstrates theoretically that considering any constant radial neighborhood during this process is sufficient to guarantee asymptotic optimality. Experimental validation in five and twelve dimensional simulated systems also highlights the ability of the proposed edge bundles to express high-quality kinodynamic solutions. Our approach consistently finds higher quality solutions compared to SST, and RRT, often with faster initial solution times. The strategy of sampling kinodynamic edges is demonstrated to be a promising new paradigm.}, author = {Shome, Rahul and Kavraki, Lydia E.}, booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA)}, month = jun, title = {{Asymptotically Optimal Kinodynamic Planning Using Bundles of Edges}}, year = {2021}, pages = {9988-9994}, doi = {10.1109/ICRA48506.2021.9560836}, keywords = {Motion Planning, Asymptotic Optimality, Kinodynamic Planning, Bundle Of Edges} }
@inproceedings{quintero2021-robust-motion-planning, abstract = {Motion planning for high degree-of-freedom (DOF) robots is challenging, especially when acting in complex environments under sensing uncertainty. While there is significant work on how to plan under state uncertainty for low-DOF robots, existing methods cannot be easily translated into the high-DOF case, due to the complex geometry of the robot's body and its environment. In this paper, we present a method that enhances optimization-based motion planners to produce robust trajectories for high-DOF robots for convex obstacles. Our approach introduces robustness into planners that are based on sequential convex programming: We reformulate each convex subproblem as a robust optimization problem that ``protects'' the solution against deviations due to sensing uncertainty. The parameters of the robust problem are estimated by sampling from the distribution of noisy obstacles, and performing a first-order approximation of the signed distance function. The original merit function is updated to account for the new costs of the robust formulation at every step. The effectiveness of our approach is demonstrated on two simulated experiments that involve a full body square robot, that moves in randomly generated scenes, and a 7-DOF Fetch robot, performing tabletop operations. The results show nearly zero probability of collision for a reasonable range of the noise parameters for Gaussian and Uniform uncertainty.}, author = {Quintero-Pe{\~n}a, Carlos and Kyrillidis, Anastasios and Kavraki, Lydia E.}, booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA)}, month = jun, title = {{Robust Optimization-based Motion Planning for high-DOF Robots under Sensing Uncertainty}}, year = {2021}, pages = {9724-9730}, doi = {10.1109/ICRA48506.2021.9560917}, keywords = {uncertainty} }
@article{pairet2021-path-planning-for-manipulation, abstract = {Robotic systems may frequently come across similar manipulation planning problems that result in similar motion plans. Instead of planning each problem from scratch, it is preferable to leverage previously computed motion plans, i.e., experiences, to ease the planning. Different approaches have been proposed to exploit prior information on novel task instances. These methods, however, rely on a vast repertoire of experiences and fail when none relates closely to the current problem. Thus, an open challenge is the ability to generalise prior experiences to task instances that do not necessarily resemble the prior. This work tackles the above challenge with the proposition that experiences are “decomposable” and “malleable”, i.e., parts of an experience are suitable to relevantly explore the connectivity of the robot-task space even in non-experienced regions. Two new planners result from this insight: experience-driven random trees (ERT) and its bi-directional version ERTConnect. These planners adopt a tree sampling-based strategy that incrementally extracts and modulates parts of a single path experience to compose a valid motion plan. We demonstrate our method on task instances that significantly differ from the prior experiences, and compare with related state-of-the-art experience-based planners. While their repairing strategies fail to generalise priors of tens of experiences, our planner, with a single experience, significantly outperforms them in both success rate and planning time. Our planners are implemented and freely available in the Open Motion Planning Library.}, title = {Path Planning for Manipulation Using Experience-Driven Random Trees}, volume = {6}, issn = {2377-3774}, url = {http://dx.doi.org/10.1109/LRA.2021.3063063}, doi = {10.1109/lra.2021.3063063}, number = {2}, journal = {IEEE Robotics and Automation Letters}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, author = {Pairet, Eric and Chamzas, Constantinos and Petillot, Yvan R. and Kavraki, Lydia E.}, year = {2021}, month = apr, pages = {3295–3302} }