@article{12972, abstract = {Embroidery is a long-standing and high-quality approach to making logos and images on textiles. Nowadays, it can also be performed via automated machines that weave threads with high spatial accuracy. A characteristic feature of the appearance of the threads is a high degree of anisotropy. The anisotropic behavior is caused by depositing thin but long strings of thread. As a result, the stitched patterns convey both color and direction. Artists leverage this anisotropic behavior to enhance pure color images with textures, illusions of motion, or depth cues. However, designing colorful embroidery patterns with prescribed directionality is a challenging task, one usually requiring an expert designer. In this work, we propose an interactive algorithm that generates machine-fabricable embroidery patterns from multi-chromatic images equipped with user-specified directionality fields.We cast the problem of finding a stitching pattern into vector theory. To find a suitable stitching pattern, we extract sources and sinks from the divergence field of the vector field extracted from the input and use them to trace streamlines. We further optimize the streamlines to guarantee a smooth and connected stitching pattern. The generated patterns approximate the color distribution constrained by the directionality field. To allow for further artistic control, the trade-off between color match and directionality match can be interactively explored via an intuitive slider. We showcase our approach by fabricating several embroidery paths.}, author = {Liu, Zhenyuan and Piovarci, Michael and Hafner, Christian and Charrondiere, Raphael and Bickel, Bernd}, issn = {1467-8659}, journal = {Computer Graphics Forum}, keywords = {embroidery, design, directionality, density, image}, location = {Saarbrucken, Germany}, number = {2}, pages = {397--409}, publisher = {Wiley}, title = {{Directionality-aware design of embroidery patterns}}, doi = {10.1111/cgf.14770 }, volume = {42}, year = {2023}, } @inproceedings{14241, abstract = {We present a technique to optimize the reflectivity of a surface while preserving its overall shape. The naïve optimization of the mesh vertices using the gradients of reflectivity simulations results in undesirable distortion. In contrast, our robust formulation optimizes the surface normal as an independent variable that bridges the reflectivity term with differential rendering, and the regularization term with as-rigid-as-possible elastic energy. We further adaptively subdivide the input mesh to improve the convergence. Consequently, our method can minimize the retroreflectivity of a wide range of input shapes, resulting in sharply creased shapes ubiquitous among stealth aircraft and Sci-Fi vehicles. Furthermore, by changing the reward for the direction of the outgoing light directions, our method can be applied to other reflectivity design tasks, such as the optimization of architectural walls to concentrate light in a specific region. We have tested the proposed method using light-transport simulations and real-world 3D-printed objects.}, author = {Tojo, Kenji and Shamir, Ariel and Bickel, Bernd and Umetani, Nobuyuki}, booktitle = {SIGGRAPH 2023 Conference Proceedings}, isbn = {9798400701597}, location = {Los Angeles, CA, United States}, publisher = {Association for Computing Machinery}, title = {{Stealth shaper: Reflectivity optimization as surface stylization}}, doi = {10.1145/3588432.3591542}, year = {2023}, } @article{14488, abstract = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handling both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with individual latent spaces for identity and illumination. The prior model is learned in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis, even when applied to unseen subjects under uncontrolled illumination. This work is an extension of Rao et al. (VoRF: Volumetric Relightable Faces 2022). We provide extensive evaluation and ablative studies of our model and also provide an application, where any face can be relighted using textual input.}, author = {Rao, Pramod and Mallikarjun, B. R. and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Pfister, Hanspeter and Matusik, Wojciech and Zhan, Fangneng and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed}, issn = {1573-1405}, journal = {International Journal of Computer Vision}, publisher = {Springer Nature}, title = {{A deeper analysis of volumetric relightiable faces}}, doi = {10.1007/s11263-023-01899-3}, year = {2023}, } @article{14628, abstract = {We introduce a compact, intuitive procedural graph representation for cellular metamaterials, which are small-scale, tileable structures that can be architected to exhibit many useful material properties. Because the structures’ “architectures” vary widely—with elements such as beams, thin shells, and solid bulks—it is difficult to explore them using existing representations. Generic approaches like voxel grids are versatile, but it is cumbersome to represent and edit individual structures; architecture-specific approaches address these issues, but are incompatible with one another. By contrast, our procedural graph succinctly represents the construction process for any structure using a simple skeleton annotated with spatially varying thickness. To express the highly constrained triply periodic minimal surfaces (TPMS) in this manner, we present the first fully automated version of the conjugate surface construction method, which allows novices to create complex TPMS from intuitive input. We demonstrate our representation’s expressiveness, accuracy, and compactness by constructing a wide range of established structures and hundreds of novel structures with diverse architectures and material properties. We also conduct a user study to verify our representation’s ease-of-use and ability to expand engineers’ capacity for exploration.}, author = {Makatura, Liane and Wang, Bohan and Chen, Yi-Lu and Deng, Bolei and Wojtan, Christopher J and Bickel, Bernd and Matusik, Wojciech}, issn = {0730-0301}, journal = {ACM Transactions on Graphics}, keywords = {Computer Graphics and Computer-Aided Design}, number = {5}, publisher = {Association for Computing Machinery}, title = {{Procedural metamaterials: A unified procedural graph for metamaterial design}}, doi = {10.1145/3605389}, volume = {42}, year = {2023}, } @inproceedings{12976, abstract = {3D printing based on continuous deposition of materials, such as filament-based 3D printing, has seen widespread adoption thanks to its versatility in working with a wide range of materials. An important shortcoming of this type of technology is its limited multi-material capabilities. While there are simple hardware designs that enable multi-material printing in principle, the required software is heavily underdeveloped. A typical hardware design fuses together individual materials fed into a single chamber from multiple inlets before they are deposited. This design, however, introduces a time delay between the intended material mixture and its actual deposition. In this work, inspired by diverse path planning research in robotics, we show that this mechanical challenge can be addressed via improved printer control. We propose to formulate the search for optimal multi-material printing policies in a reinforcement learning setup. We put forward a simple numerical deposition model that takes into account the non-linear material mixing and delayed material deposition. To validate our system we focus on color fabrication, a problem known for its strict requirements for varying material mixtures at a high spatial frequency. We demonstrate that our learned control policy outperforms state-of-the-art hand-crafted algorithms.}, author = {Liao, Kang and Tricard, Thibault and Piovarci, Michael and Seidel, Hans-Peter and Babaei, Vahid}, booktitle = {2023 IEEE International Conference on Robotics and Automation}, issn = {1050-4729}, keywords = {reinforcement learning, deposition, control, color, multi-filament}, location = {London, United Kingdom}, pages = {12345--12352}, publisher = {IEEE}, title = {{Learning deposition policies for fused multi-material 3D printing}}, doi = {10.1109/ICRA48891.2023.10160465}, volume = {2023}, year = {2023}, } @article{13265, abstract = {In this study, we propose a computational framework for optimizing the continuity of the toolpath in fabricating surface models on an extrusion-based 3D printer. Toolpath continuity is a critical issue that influences both the quality and the efficiency of extrusion-based fabrication. Transfer moves lead to rough and bumpy surfaces, where this phenomenon worsens for materials with large viscosity, like clay. The effects of continuity on the surface models are even more severe in terms of the quality of the surface and the stability of the model. We introduce a criterion called the one–path patch (OPP) to represent a patch on the surface of the shell that can be traversed along one path by considering the constraints on fabrication. We study the properties of the OPPs and their merging operations to propose a bottom-up OPP merging procedure to decompose the given shell surface into a minimal number of OPPs, and to generate the “as-continuous-as-possible” (ACAP) toolpath. Furthermore, we augment the path planning algorithm with a curved-layer printing scheme that reduces staircase defects and improves the continuity of the toolpath by connecting multiple segments. We evaluated the ACAP algorithm on ceramic and thermoplastic materials, and the results showed that it improves the fabrication of surface models in terms of both efficiency and surface quality.}, author = {Zhong, Fanchao and Xu, Yonglai and Zhao, Haisen and Lu, Lin}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {3}, publisher = {Association for Computing Machinery}, title = {{As-Continuous-As-Possible extrusion-based fabrication of surface models}}, doi = {10.1145/3575859}, volume = {42}, year = {2023}, } @article{13267, abstract = {Three-dimensional (3D) reconstruction of living brain tissue down to an individual synapse level would create opportunities for decoding the dynamics and structure–function relationships of the brain’s complex and dense information processing network; however, this has been hindered by insufficient 3D resolution, inadequate signal-to-noise ratio and prohibitive light burden in optical imaging, whereas electron microscopy is inherently static. Here we solved these challenges by developing an integrated optical/machine-learning technology, LIONESS (live information-optimized nanoscopy enabling saturated segmentation). This leverages optical modifications to stimulated emission depletion microscopy in comprehensively, extracellularly labeled tissue and previous information on sample structure via machine learning to simultaneously achieve isotropic super-resolution, high signal-to-noise ratio and compatibility with living tissue. This allows dense deep-learning-based instance segmentation and 3D reconstruction at a synapse level, incorporating molecular, activity and morphodynamic information. LIONESS opens up avenues for studying the dynamic functional (nano-)architecture of living brain tissue.}, author = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Lyudchik, Julia and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G.N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G}, issn = {1548-7105}, journal = {Nature Methods}, pages = {1256--1265}, publisher = {Springer Nature}, title = {{Dense 4D nanoscale reconstruction of living brain tissue}}, doi = {10.1038/s41592-023-01936-6}, volume = {20}, year = {2023}, } @inproceedings{14798, abstract = {A faithful reproduction of gloss is inherently difficult because of the limited dynamic range, peak luminance, and 3D capabilities of display devices. This work investigates how the display capabilities affect gloss appearance with respect to a real-world reference object. To this end, we employ an accurate imaging pipeline to achieve a perceptual gloss match between a virtual and real object presented side-by-side on an augmented-reality high-dynamic-range (HDR) stereoscopic display, which has not been previously attained to this extent. Based on this precise gloss reproduction, we conduct a series of gloss matching experiments to study how gloss perception degrades based on individual factors: object albedo, display luminance, dynamic range, stereopsis, and tone mapping. We support the study with a detailed analysis of individual factors, followed by an in-depth discussion on the observed perceptual effects. Our experiments demonstrate that stereoscopic presentation has a limited effect on the gloss matching task on our HDR display. However, both reduced luminance and dynamic range of the display reduce the perceived gloss. This means that the visual system cannot compensate for the changes in gloss appearance across luminance (lack of gloss constancy), and the tone mapping operator should be carefully selected when reproducing gloss on a low dynamic range (LDR) display.}, author = {Chen, Bin and Jindal, Akshay and Piovarci, Michael and Wang, Chao and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana and Mantiuk, Rafał K.}, booktitle = {Proceedings of the SIGGRAPH Asia 2023 Conference}, isbn = {9798400703157}, location = {Sydney, Australia}, publisher = {Association for Computing Machinery}, title = {{The effect of display capabilities on the gloss consistency between real and virtual objects}}, doi = {10.1145/3610548.3618226}, year = {2023}, } @article{13049, abstract = {We propose a computational design approach for covering a surface with individually addressable RGB LEDs, effectively forming a low-resolution surface screen. To achieve a low-cost and scalable approach, we propose creating designs from flat PCB panels bent in-place along the surface of a 3D printed core. Working with standard rigid PCBs enables the use of established PCB manufacturing services, allowing the fabrication of designs with several hundred LEDs. Our approach optimizes the PCB geometry for folding, and then jointly optimizes the LED packing, circuit and routing, solving a challenging layout problem under strict manufacturing requirements. Unlike paper, PCBs cannot bend beyond a certain point without breaking. Therefore, we introduce parametric cut patterns acting as hinges, designed to allow bending while remaining compact. To tackle the joint optimization of placement, circuit and routing, we propose a specialized algorithm that splits the global problem into one sub-problem per triangle, which is then individually solved. Our technique generates PCB blueprints in a completely automated way. After being fabricated by a PCB manufacturing service, the boards are bent and glued by the user onto the 3D printed support. We demonstrate our technique on a range of physical models and virtual examples, creating intricate surface light patterns from hundreds of LEDs.}, author = {Freire, Marco and Bhargava, Manas and Schreck, Camille and Hugron, Pierre-Alexandre and Bickel, Bernd and Lefebvre, Sylvain}, issn = {1557-7368}, journal = {Transactions on Graphics}, keywords = {PCB design and layout, Mesh geometry models}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {Association for Computing Machinery}, title = {{PCBend: Light up your 3D shapes with foldable circuit boards}}, doi = {10.1145/3592411}, volume = {42}, year = {2023}, } @article{12984, abstract = {Tattoos are a highly popular medium, with both artistic and medical applications. Although the mechanical process of tattoo application has evolved historically, the results are reliant on the artisanal skill of the artist. This can be especially challenging for some skin tones, or in cases where artists lack experience. We provide the first systematic overview of tattooing as a computational fabrication technique. We built an automated tattooing rig and a recipe for the creation of silicone sheets mimicking realistic skin tones, which allowed us to create an accurate model predicting tattoo appearance. This enables several exciting applications including tattoo previewing, color retargeting, novel ink spectra optimization, color-accurate prosthetics, and more.}, author = {Piovarci, Michael and Chapiro, Alexandre and Bickel, Bernd}, issn = {1557-7368}, journal = {Transactions on Graphics}, keywords = {appearance, modeling, reproduction, tattoo, skin color, gamut mapping, ink-optimization, prosthetic}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Skin-Screen: A computational fabrication framework for color tattoos}}, doi = {10.1145/3592432}, volume = {42}, year = {2023}, } @inproceedings{12979, abstract = {Color and gloss are fundamental aspects of surface appearance. State-of-the-art fabrication techniques can manipulate both properties of the printed 3D objects. However, in the context of appearance reproduction, perceptual aspects of color and gloss are usually handled separately, even though previous perceptual studies suggest their interaction. Our work is motivated by previous studies demonstrating a perceived color shift due to a change in the object's gloss, i.e., two samples with the same color but different surface gloss appear as they have different colors. In this paper, we conduct new experiments which support this observation and provide insights into the magnitude and direction of the perceived color change. We use the observations as guidance to design a new method that estimates and corrects the color shift enabling the fabrication of objects with the same perceived color but different surface gloss. We formulate the problem as an optimization procedure solved using differentiable rendering. We evaluate the effectiveness of our method in perceptual experiments with 3D objects fabricated using a multi-material 3D printer and demonstrate potential applications. }, author = {Condor, Jorge and Piovarci, Michael and Bickel, Bernd and Didyk, Piotr}, booktitle = {SIGGRAPH ’23 Conference Proceedings}, isbn = {9798400701597}, keywords = {color, gloss, perception, color compensation, color management}, location = {Los Angeles, CA, United States}, publisher = {Association for Computing Machinery}, title = {{Gloss-aware color correction for 3D printing}}, doi = {10.1145/3588432.3591546}, year = {2023}, } @phdthesis{12897, abstract = {Inverse design problems in fabrication-aware shape optimization are typically solved on discrete representations such as polygonal meshes. This thesis argues that there are benefits to treating these problems in the same domain as human designers, namely, the parametric one. One reason is that discretizing a parametric model usually removes the capability of making further manual changes to the design, because the human intent is captured by the shape parameters. Beyond this, knowledge about a design problem can sometimes reveal a structure that is present in a smooth representation, but is fundamentally altered by discretizing. In this case, working in the parametric domain may even simplify the optimization task. We present two lines of research that explore both of these aspects of fabrication-aware shape optimization on parametric representations. The first project studies the design of plane elastic curves and Kirchhoff rods, which are common mathematical models for describing the deformation of thin elastic rods such as beams, ribbons, cables, and hair. Our main contribution is a characterization of all curved shapes that can be attained by bending and twisting elastic rods having a stiffness that is allowed to vary across the length. Elements like these can be manufactured using digital fabrication devices such as 3d printers and digital cutters, and have applications in free-form architecture and soft robotics. We show that the family of curved shapes that can be produced this way admits geometric description that is concise and computationally convenient. In the case of plane curves, the geometric description is intuitive enough to allow a designer to determine whether a curved shape is physically achievable by visual inspection alone. We also present shape optimization algorithms that convert a user-defined curve in the plane or in three dimensions into the geometry of an elastic rod that will naturally deform to follow this curve when its endpoints are attached to a support structure. Implemented in an interactive software design tool, the rod geometry is generated in real time as the user edits a curve and enables fast prototyping. The second project tackles the problem of general-purpose shape optimization on CAD models using a novel variant of the extended finite element method (XFEM). Our goal is the decoupling between the simulation mesh and the CAD model, so no geometry-dependent meshing or remeshing needs to be performed when the CAD parameters change during optimization. This is achieved by discretizing the embedding space of the CAD model, and using a new high-accuracy numerical integration method to enable XFEM on free-form elements bounded by the parametric surface patches of the model. Our simulation is differentiable from the CAD parameters to the simulation output, which enables us to use off-the-shelf gradient-based optimization procedures. The result is a method that fits seamlessly into the CAD workflow because it works on the same representation as the designer, enabling the alternation of manual editing and fabrication-aware optimization at will.}, author = {Hafner, Christian}, isbn = {978-3-99078-031-2}, issn = {2663-337X}, pages = {180}, publisher = {Institute of Science and Technology Austria}, title = {{Inverse shape design with parametric representations: Kirchhoff Rods and parametric surface models}}, doi = {10.15479/at:ista:12897}, year = {2023}, } @article{13188, abstract = {The Kirchhoff rod model describes the bending and twisting of slender elastic rods in three dimensions, and has been widely studied to enable the prediction of how a rod will deform, given its geometry and boundary conditions. In this work, we study a number of inverse problems with the goal of computing the geometry of a straight rod that will automatically deform to match a curved target shape after attaching its endpoints to a support structure. Our solution lets us finely control the static equilibrium state of a rod by varying the cross-sectional profiles along its length. We also show that the set of physically realizable equilibrium states admits a concise geometric description in terms of linear line complexes, which leads to very efficient computational design algorithms. Implemented in an interactive software tool, they allow us to convert three-dimensional hand-drawn spline curves to elastic rods, and give feedback about the feasibility and practicality of a design in real time. We demonstrate the efficacy of our method by designing and manufacturing several physical prototypes with applications to interior design and soft robotics.}, author = {Hafner, Christian and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, keywords = {Computer Graphics, Computational Design, Computational Geometry, Shape Modeling}, number = {5}, publisher = {Association for Computing Machinery}, title = {{The design space of Kirchhoff rods}}, doi = {10.1145/3606033}, volume = {42}, year = {2023}, } @inproceedings{12135, abstract = {A good match of material appearance between real-world objects and their digital on-screen representations is critical for many applications such as fabrication, design, and e-commerce. However, faithful appearance reproduction is challenging, especially for complex phenomena, such as gloss. In most cases, the view-dependent nature of gloss and the range of luminance values required for reproducing glossy materials exceeds the current capabilities of display devices. As a result, appearance reproduction poses significant problems even with accurately rendered images. This paper studies the gap between the gloss perceived from real-world objects and their digital counterparts. Based on our psychophysical experiments on a wide range of 3D printed samples and their corresponding photographs, we derive insights on the influence of geometry, illumination, and the display’s brightness and measure the change in gloss appearance due to the display limitations. Our evaluation experiments demonstrate that using the prediction to correct material parameters in a rendering system improves the match of gloss appearance between real objects and their visualization on a display device.}, author = {Chen, Bin and Piovarci, Michael and Wang, Chao and Seidel, Hans-Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, booktitle = {SIGGRAPH Asia 2022 Conference Papers}, isbn = {9781450394703}, location = {Daegu, South Korea}, publisher = {Association for Computing Machinery}, title = {{Gloss management for consistent reproduction of real and virtual objects}}, doi = {10.1145/3550469.3555406}, volume = {2022}, year = {2022}, } @article{11442, abstract = {Enabling additive manufacturing to employ a wide range of novel, functional materials can be a major boost to this technology. However, making such materials printable requires painstaking trial-and-error by an expert operator, as they typically tend to exhibit peculiar rheological or hysteresis properties. Even in the case of successfully finding the process parameters, there is no guarantee of print-to-print consistency due to material differences between batches. These challenges make closed-loop feedback an attractive option where the process parameters are adjusted on-the-fly. There are several challenges for designing an efficient controller: the deposition parameters are complex and highly coupled, artifacts occur after long time horizons, simulating the deposition is computationally costly, and learning on hardware is intractable. In this work, we demonstrate the feasibility of learning a closed-loop control policy for additive manufacturing using reinforcement learning. We show that approximate, but efficient, numerical simulation is sufficient as long as it allows learning the behavioral patterns of deposition that translate to real-world experiences. In combination with reinforcement learning, our model can be used to discover control policies that outperform baseline controllers. Furthermore, the recovered policies have a minimal sim-to-real gap. We showcase this by applying our control policy in-vivo on a single-layer, direct ink writing printer. }, author = {Piovarci, Michael and Foshey, Michael and Xu, Jie and Erps, Timothy and Babaei, Vahid and Didyk, Piotr and Rusinkiewicz, Szymon and Matusik, Wojciech and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Closed-loop control of direct ink writing via reinforcement learning}}, doi = {10.1145/3528223.3530144}, volume = {41}, year = {2022}, } @article{10922, abstract = {We study structural rigidity for assemblies with mechanical joints. Existing methods identify whether an assembly is structurally rigid by assuming parts are perfectly rigid. Yet, an assembly identified as rigid may not be that “rigid” in practice, and existing methods cannot quantify how rigid an assembly is. We address this limitation by developing a new measure, worst-case rigidity, to quantify the rigidity of an assembly as the largest possible deformation that the assembly undergoes for arbitrary external loads of fixed magnitude. Computing worst-case rigidity is non-trivial due to non-rigid parts and different joint types. We thus formulate a new computational approach by encoding parts and their connections into a stiffness matrix, in which parts are modeled as deformable objects and joints as soft constraints. Based on this, we formulate worst-case rigidity analysis as an optimization that seeks the worst-case deformation of an assembly for arbitrary external loads, and solve the optimization problem via an eigenanalysis. Furthermore, we present methods to optimize the geometry and topology of various assemblies to enhance their rigidity, as guided by our rigidity measure. In the end, we validate our method on a variety of assembly structures with physical experiments and demonstrate its effectiveness by designing and fabricating several structurally rigid assemblies.}, author = {Liu, Zhenyuan and Hu, Jingyu and Xu, Hao and Song, Peng and Zhang, Ran and Bickel, Bernd and Fu, Chi-Wing}, issn = {1467-8659}, journal = {Computer Graphics Forum}, number = {2}, pages = {507--519}, publisher = {Wiley}, title = {{Worst-case rigidity analysis and optimization for assemblies with mechanical joints}}, doi = {10.1111/cgf.14490}, volume = {41}, year = {2022}, } @article{11735, abstract = {Interlocking puzzles are intriguing geometric games where the puzzle pieces are held together based on their geometric arrangement, preventing the puzzle from falling apart. High-level-of-difficulty, or simply high-level, interlocking puzzles are a subclass of interlocking puzzles that require multiple moves to take out the first subassembly from the puzzle. Solving a high-level interlocking puzzle is a challenging task since one has to explore many different configurations of the puzzle pieces until reaching a configuration where the first subassembly can be taken out. Designing a high-level interlocking puzzle with a user-specified level of difficulty is even harder since the puzzle pieces have to be interlocking in all the configurations before the first subassembly is taken out. In this paper, we present a computational approach to design high-level interlocking puzzles. The core idea is to represent all possible configurations of an interlocking puzzle as well as transitions among these configurations using a rooted, undirected graph called a disassembly graph and leverage this graph to find a disassembly plan that requires a minimal number of moves to take out the first subassembly from the puzzle. At the design stage, our algorithm iteratively constructs the geometry of each puzzle piece to expand the disassembly graph incrementally, aiming to achieve a user-specified level of difficulty. We show that our approach allows efficient generation of high-level interlocking puzzles of various shape complexities, including new solutions not attainable by state-of-the-art approaches.}, author = {Chen, Rulin and Wang, Ziqi and Song, Peng and Bickel, Bernd}, issn = {1557-7368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Computational design of high-level interlocking puzzles}}, doi = {10.1145/3528223.3530071}, volume = {41}, year = {2022}, } @article{11993, abstract = {Moulding refers to a set of manufacturing techniques in which a mould, usually a cavity or a solid frame, is used to shape a liquid or pliable material into an object of the desired shape. The popularity of moulding comes from its effectiveness, scalability and versatility in terms of employed materials. Its relevance as a fabrication process is demonstrated by the extensive literature covering different aspects related to mould design, from material flow simulation to the automation of mould geometry design. In this state-of-the-art report, we provide an extensive review of the automatic methods for the design of moulds, focusing on contributions from a geometric perspective. We classify existing mould design methods based on their computational approach and the nature of their target moulding process. We summarize the relationships between computational approaches and moulding techniques, highlighting their strengths and limitations. Finally, we discuss potential future research directions.}, author = {Alderighi, Thomas and Malomo, Luigi and Auzinger, Thomas and Bickel, Bernd and Cignoni, Paulo and Pietroni, Nico}, issn = {1467-8659}, journal = {Computer Graphics Forum}, keywords = {Computer Graphics and Computer-Aided Design}, number = {6}, pages = {435--452}, publisher = {Wiley}, title = {{State of the art in computational mould design}}, doi = {10.1111/cgf.14581}, volume = {41}, year = {2022}, } @inproceedings{12452, abstract = {Portrait viewpoint and illumination editing is an important problem with several applications in VR/AR, movies, and photography. Comprehensive knowledge of geometry and illumination is critical for obtaining photorealistic results. Current methods are unable to explicitly model in 3D while handing both viewpoint and illumination editing from a single image. In this paper, we propose VoRF, a novel approach that can take even a single portrait image as input and relight human heads under novel illuminations that can be viewed from arbitrary viewpoints. VoRF represents a human head as a continuous volumetric field and learns a prior model of human heads using a coordinate-based MLP with separate latent spaces for identity and illumination. The prior model is learnt in an auto-decoder manner over a diverse class of head shapes and appearances, allowing VoRF to generalize to novel test identities from a single input image. Additionally, VoRF has a reflectance MLP that uses the intermediate features of the prior model for rendering One-Light-at-A-Time (OLAT) images under novel views. We synthesize novel illuminations by combining these OLAT images with target environment maps. Qualitative and quantitative evaluations demonstrate the effectiveness of VoRF for relighting and novel view synthesis even when applied to unseen subjects under uncontrolled illuminations.}, author = {Rao, Pramod and B R, Mallikarjun and Fox, Gereon and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Tewari, Ayush and Theobalt, Christian and Elgharib, Mohamed}, booktitle = {33rd British Machine Vision Conference}, location = {London, United Kingdom}, publisher = {British Machine Vision Association and Society for Pattern Recognition}, title = {{VoRF: Volumetric Relightable Faces}}, year = {2022}, } @unpublished{11943, abstract = {Complex wiring between neurons underlies the information-processing network enabling all brain functions, including cognition and memory. For understanding how the network is structured, processes information, and changes over time, comprehensive visualization of the architecture of living brain tissue with its cellular and molecular components would open up major opportunities. However, electron microscopy (EM) provides nanometre-scale resolution required for full in-silico reconstruction1–5, yet is limited to fixed specimens and static representations. Light microscopy allows live observation, with super-resolution approaches6–12 facilitating nanoscale visualization, but comprehensive 3D-reconstruction of living brain tissue has been hindered by tissue photo-burden, photobleaching, insufficient 3D-resolution, and inadequate signal-to-noise ratio (SNR). Here we demonstrate saturated reconstruction of living brain tissue. We developed an integrated imaging and analysis technology, adapting stimulated emission depletion (STED) microscopy6,13 in extracellularly labelled tissue14 for high SNR and near-isotropic resolution. Centrally, a two-stage deep-learning approach leveraged previously obtained information on sample structure to drastically reduce photo-burden and enable automated volumetric reconstruction down to single synapse level. Live reconstruction provides unbiased analysis of tissue architecture across time in relation to functional activity and targeted activation, and contextual understanding of molecular labelling. This adoptable technology will facilitate novel insights into the dynamic functional architecture of living brain tissue.}, author = {Velicky, Philipp and Miguel Villalba, Eder and Michalska, Julia M and Wei, Donglai and Lin, Zudi and Watson, Jake and Troidl, Jakob and Beyer, Johanna and Ben Simon, Yoav and Sommer, Christoph M and Jahr, Wiebke and Cenameri, Alban and Broichhagen, Johannes and Grant, Seth G. N. and Jonas, Peter M and Novarino, Gaia and Pfister, Hanspeter and Bickel, Bernd and Danzl, Johann G}, booktitle = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {{Saturated reconstruction of living brain tissue}}, doi = {10.1101/2022.03.16.484431}, year = {2022}, }