@article{10922, abstract = {We study structural rigidity for assemblies with mechanical joints. Existing methods identify whether an assembly is structurally rigid by assuming parts are perfectly rigid. Yet, an assembly identified as rigid may not be that “rigid” in practice, and existing methods cannot quantify how rigid an assembly is. We address this limitation by developing a new measure, worst-case rigidity, to quantify the rigidity of an assembly as the largest possible deformation that the assembly undergoes for arbitrary external loads of fixed magnitude. Computing worst-case rigidity is non-trivial due to non-rigid parts and different joint types. We thus formulate a new computational approach by encoding parts and their connections into a stiffness matrix, in which parts are modeled as deformable objects and joints as soft constraints. Based on this, we formulate worst-case rigidity analysis as an optimization that seeks the worst-case deformation of an assembly for arbitrary external loads, and solve the optimization problem via an eigenanalysis. Furthermore, we present methods to optimize the geometry and topology of various assemblies to enhance their rigidity, as guided by our rigidity measure. In the end, we validate our method on a variety of assembly structures with physical experiments and demonstrate its effectiveness by designing and fabricating several structurally rigid assemblies.}, author = {Liu, Zhenyuan and Hu, Jingyu and Xu, Hao and Song, Peng and Zhang, Ran and Bickel, Bernd and Fu, Chi-Wing}, issn = {1467-8659}, journal = {Computer Graphics Forum}, number = {2}, pages = {507--519}, publisher = {Wiley}, title = {{Worst-case rigidity analysis and optimization for assemblies with mechanical joints}}, doi = {10.1111/cgf.14490}, volume = {41}, year = {2022}, } @article{11735, abstract = {Interlocking puzzles are intriguing geometric games where the puzzle pieces are held together based on their geometric arrangement, preventing the puzzle from falling apart. High-level-of-difficulty, or simply high-level, interlocking puzzles are a subclass of interlocking puzzles that require multiple moves to take out the first subassembly from the puzzle. Solving a high-level interlocking puzzle is a challenging task since one has to explore many different configurations of the puzzle pieces until reaching a configuration where the first subassembly can be taken out. Designing a high-level interlocking puzzle with a user-specified level of difficulty is even harder since the puzzle pieces have to be interlocking in all the configurations before the first subassembly is taken out. In this paper, we present a computational approach to design high-level interlocking puzzles. The core idea is to represent all possible configurations of an interlocking puzzle as well as transitions among these configurations using a rooted, undirected graph called a disassembly graph and leverage this graph to find a disassembly plan that requires a minimal number of moves to take out the first subassembly from the puzzle. At the design stage, our algorithm iteratively constructs the geometry of each puzzle piece to expand the disassembly graph incrementally, aiming to achieve a user-specified level of difficulty. We show that our approach allows efficient generation of high-level interlocking puzzles of various shape complexities, including new solutions not attainable by state-of-the-art approaches.}, author = {Chen, Rulin and Wang, Ziqi and Song, Peng and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Computational design of high-level interlocking puzzles}}, doi = {10.1145/3528223.3530071}, volume = {41}, year = {2022}, } @article{11993, abstract = {Moulding refers to a set of manufacturing techniques in which a mould, usually a cavity or a solid frame, is used to shape a liquid or pliable material into an object of the desired shape. The popularity of moulding comes from its effectiveness, scalability and versatility in terms of employed materials. Its relevance as a fabrication process is demonstrated by the extensive literature covering different aspects related to mould design, from material flow simulation to the automation of mould geometry design. In this state-of-the-art report, we provide an extensive review of the automatic methods for the design of moulds, focusing on contributions from a geometric perspective. We classify existing mould design methods based on their computational approach and the nature of their target moulding process. We summarize the relationships between computational approaches and moulding techniques, highlighting their strengths and limitations. Finally, we discuss potential future research directions.}, author = {Alderighi, Thomas and Malomo, Luigi and Auzinger, Thomas and Bickel, Bernd and Cignoni, Paulo and Pietroni, Nico}, issn = {1467-8659}, journal = {Computer Graphics Forum}, keywords = {Computer Graphics and Computer-Aided Design}, number = {6}, pages = {435--452}, publisher = {Wiley}, title = {{State of the art in computational mould design}}, doi = {10.1111/cgf.14581}, volume = {41}, year = {2022}, } @inproceedings{12452, abstract = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.}, author = {Rao, Pramod and B R, Mallikarjun and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed}, booktitle = {33rd British Machine Vision Conference}, location = {London, United Kingdom}, publisher = {British Machine Vision Association and Society for Pattern Recognition}, title = {{VoRF: Volumetric Relightable Faces}}, year = {2022}, } @unpublished{11943, abstract = {Complex wiring between neurons underlies the information-processing network enabling all brain functions, including cognition and memory. For understanding how the network is structured, processes information, and changes over time, comprehensive visualization of the architecture of living brain tissue with its cellular and molecular components would open up major opportunities. However, electron microscopy (EM) provides nanometre-scale resolution required for full in-silico reconstruction1–5, yet is limited to fixed specimens and static representations. Light microscopy allows live observation, with super-resolution approaches6–12 facilitating nanoscale visualization, but comprehensive 3D-reconstruction of living brain tissue has been hindered by tissue photo-burden, photobleaching, insufficient 3D-resolution, and inadequate signal-to-noise ratio (SNR). Here we demonstrate saturated reconstruction of living brain tissue. We developed an integrated imaging and analysis technology, adapting stimulated emission depletion (STED) microscopy6,13 in extracellularly labelled tissue14 for high SNR and near-isotropic resolution. Centrally, a two-stage deep-learning approach leveraged previously obtained information on sample structure to drastically reduce photo-burden and enable automated volumetric reconstruction down to single synapse level. Live reconstruction provides unbiased analysis of tissue architecture across time in relation to functional activity and targeted activation, and contextual understanding of molecular labelling. This adoptable technology will facilitate novel insights into the dynamic functional architecture of living brain tissue.}, author = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G. N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Saturated reconstruction of living brain tissue}}, doi = {10.1101/2022.03.16.484431}, year = {2022}, }