@article{9819, abstract = {Photorealistic editing of head portraits is a challenging task as humans are very sensitive to inconsistencies in faces. We present an approach for high-quality intuitive editing of the camera viewpoint and scene illumination (parameterised with an environment map) in a portrait image. This requires our method to capture and control the full reflectance field of the person in the image. Most editing approaches rely on supervised learning using training data captured with setups such as light and camera stages. Such datasets are expensive to acquire, not readily available and do not capture all the rich variations of in-the-wild portrait images. In addition, most supervised approaches only focus on relighting, and do not allow camera viewpoint editing. Thus, they only capture and control a subset of the reflectance field. Recently, portrait editing has been demonstrated by operating in the generative model space of StyleGAN. While such approaches do not require direct supervision, there is a significant loss of quality when compared to the supervised approaches. In this paper, we present a method which learns from limited supervised training data. The training images only include people in a fixed neutral expression with eyes closed, without much hair or background variations. Each person is captured under 150 one-light-at-a-time conditions and under 8 camera poses. Instead of training directly in the image space, we design a supervised problem which learns transformations in the latent space of StyleGAN. This combines the best of supervised learning and generative adversarial modeling. We show that the StyleGAN prior allows for generalisation to different expressions, hairstyles and backgrounds. This produces high-quality photorealistic results for in-the-wild images and significantly outperforms existing methods. Our approach can edit the illumination and pose simultaneously, and runs at interactive rates.}, author = {Mallikarjun, B. R. and Tewari, Ayush and Dib, Abdallah and Weyrich, Tim and Bickel, Bernd and Seidel, Hans Peter and Pfister, Hanspeter and Matusik, Wojciech and Chevallier, Louis and Elgharib, Mohamed A. and Theobalt, Christian}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{PhotoApp: Photorealistic appearance editing of head portraits}}, doi = {10.1145/3450626.3459765}, volume = {40}, year = {2021}, } @article{9820, abstract = {Material appearance hinges on material reflectance properties but also surface geometry and illumination. The unlimited number of potential combinations between these factors makes understanding and predicting material appearance a very challenging task. In this work, we collect a large-scale dataset of perceptual ratings of appearance attributes with more than 215,680 responses for 42,120 distinct combinations of material, shape, and illumination. The goal of this dataset is twofold. First, we analyze for the first time the effects of illumination and geometry in material perception across such a large collection of varied appearances. We connect our findings to those of the literature, discussing how previous knowledge generalizes across very diverse materials, shapes, and illuminations. Second, we use the collected dataset to train a deep learning architecture for predicting perceptual attributes that correlate with human judgments. We demonstrate the consistent and robust behavior of our predictor in various challenging scenarios, which, for the first time, enables estimating perceived material attributes from general 2D images. Since our predictor relies on the final appearance in an image, it can compare appearance properties across different geometries and illumination conditions. Finally, we demonstrate several applications that use our predictor, including appearance reproduction using 3D printing, BRDF editing by integrating our predictor in a differentiable renderer, illumination design, or material recommendations for scene design.}, author = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{The effect of shape and illumination on material perception: Model and applications}}, doi = {10.1145/3450626.3459813}, volume = {40}, year = {2021}, } @article{9818, abstract = {Triangle mesh-based simulations are able to produce satisfying animations of knitted and woven cloth; however, they lack the rich geometric detail of yarn-level simulations. Naive texturing approaches do not consider yarn-level physics, while full yarn-level simulations may become prohibitively expensive for large garments. We propose a method to animate yarn-level cloth geometry on top of an underlying deforming mesh in a mechanics-aware fashion. Using triangle strains to interpolate precomputed yarn geometry, we are able to reproduce effects such as knit loops tightening under stretching. In combination with precomputed mesh animation or real-time mesh simulation, our method is able to animate yarn-level cloth in real-time at large scales.}, author = {Sperl, Georg and Narain, Rahul and Wojtan, Christopher J}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Mechanics-aware deformation of yarn pattern geometry}}, doi = {10.1145/3450626.3459816}, volume = {40}, year = {2021}, } @article{8535, abstract = {We propose a method to enhance the visual detail of a water surface simulation. Our method works as a post-processing step which takes a simulation as input and increases its apparent resolution by simulating many detailed Lagrangian water waves on top of it. We extend linear water wave theory to work in non-planar domains which deform over time, and we discretize the theory using Lagrangian wave packets attached to spline curves. The method is numerically stable and trivially parallelizable, and it produces high frequency ripples with dispersive wave-like behaviors customized to the underlying fluid simulation.}, author = {Skrivan, Tomas and Soderstrom, Andreas and Johansson, John and Sprenger, Christoph and Museth, Ken and Wojtan, Christopher J}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Wave curves: Simulating Lagrangian water waves on dynamically deforming surfaces}}, doi = {10.1145/3386569.3392466}, volume = {39}, year = {2020}, } @article{8384, abstract = {Previous research on animations of soap bubbles, films, and foams largely focuses on the motion and geometric shape of the bubble surface. These works neglect the evolution of the bubble’s thickness, which is normally responsible for visual phenomena like surface vortices, Newton’s interference patterns, capillary waves, and deformation-dependent rupturing of films in a foam. In this paper, we model these natural phenomena by introducing the film thickness as a reduced degree of freedom in the Navier-Stokes equations and deriving their equations of motion. We discretize the equations on a nonmanifold triangle mesh surface and couple it to an existing bubble solver. In doing so, we also introduce an incompressible fluid solver for 2.5D films and a novel advection algorithm for convecting fields across non-manifold surface junctions. Our simulations enhance state-of-the-art bubble solvers with additional effects caused by convection, rippling, draining, and evaporation of the thin film.}, author = {Ishida, Sadashige and Synak, Peter and Narita, Fumiya and Hachisuka, Toshiya and Wojtan, Christopher J}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{A model for soap film dynamics with evolving thickness}}, doi = {10.1145/3386569.3392405}, volume = {39}, year = {2020}, } @article{8385, abstract = {We present a method for animating yarn-level cloth effects using a thin-shell solver. We accomplish this through numerical homogenization: we first use a large number of yarn-level simulations to build a model of the potential energy density of the cloth, and then use this energy density function to compute forces in a thin shell simulator. We model several yarn-based materials, including both woven and knitted fabrics. Our model faithfully reproduces expected effects like the stiffness of woven fabrics, and the highly deformable nature and anisotropy of knitted fabrics. Our approach does not require any real-world experiments nor measurements; because the method is based entirely on simulations, it can generate entirely new material models quickly, without the need for testing apparatuses or human intervention. We provide data-driven models of several woven and knitted fabrics, which can be used for efficient simulation with an off-the-shelf cloth solver.}, author = {Sperl, Georg and Narain, Rahul and Wojtan, Christopher J}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{Homogenized yarn-level cloth}}, doi = {10.1145/3386569.3392412}, volume = {39}, year = {2020}, } @article{470, abstract = {This paper presents a method for simulating water surface waves as a displacement field on a 2D domain. Our method relies on Lagrangian particles that carry packets of water wave energy; each packet carries information about an entire group of wave trains, as opposed to only a single wave crest. Our approach is unconditionally stable and can simulate high resolution geometric details. This approach also presents a straightforward interface for artistic control, because it is essentially a particle system with intuitive parameters like wavelength and amplitude. Our implementation parallelizes well and runs in real time for moderately challenging scenarios.}, author = {Jeschke, Stefan and Wojtan, Christopher J}, issn = {07300301}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {ACM}, title = {{Water wave packets}}, doi = {10.1145/3072959.3073678}, volume = {36}, year = {2017}, } @article{486, abstract = {Color texture reproduction in 3D printing commonly ignores volumetric light transport (cross-talk) between surface points on a 3D print. Such light diffusion leads to significant blur of details and color bleeding, and is particularly severe for highly translucent resin-based print materials. Given their widely varying scattering properties, this cross-talk between surface points strongly depends on the internal structure of the volume surrounding each surface point. Existing scattering-aware methods use simplified models for light diffusion, and often accept the visual blur as an immutable property of the print medium. In contrast, our work counteracts heterogeneous scattering to obtain the impression of a crisp albedo texture on top of the 3D print, by optimizing for a fully volumetric material distribution that preserves the target appearance. Our method employs an efficient numerical optimizer on top of a general Monte-Carlo simulation of heterogeneous scattering, supported by a practical calibration procedure to obtain scattering parameters from a given set of printer materials. Despite the inherent translucency of the medium, we reproduce detailed surface textures on 3D prints. We evaluate our system using a commercial, five-tone 3D print process and compare against the printer’s native color texturing mode, demonstrating that our method preserves high-frequency features well without having to compromise on color gamut.}, author = {Elek, Oskar and Sumin, Denis and Zhang, Ran and Weyrich, Tim and Myszkowski, Karol and Bickel, Bernd and Wilkie, Alexander and Krivanek, Jaroslav}, issn = {07300301}, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {ACM}, title = {{Scattering-aware texture reproduction for 3D printing}}, doi = {10.1145/3130800.3130890}, volume = {36}, year = {2017}, } @inproceedings{1002, abstract = { We present an interactive design system to create functional mechanical objects. Our computational approach allows novice users to retarget an existing mechanical template to a user-specified input shape. Our proposed representation for a mechanical template encodes a parameterized mechanism, mechanical constraints that ensure a physically valid configuration, spatial relationships of mechanical parts to the user-provided shape, and functional constraints that specify an intended functionality. We provide an intuitive interface and optimization-in-the-loop approach for finding a valid configuration of the mechanism and the shape to ensure that higher-level functional goals are met. Our algorithm interactively optimizes the mechanism while the user manipulates the placement of mechanical components and the shape. Our system allows users to efficiently explore various design choices and to synthesize customized mechanical objects that can be fabricated with rapid prototyping technologies. We demonstrate the efficacy of our approach by retargeting various mechanical templates to different shapes and fabricating the resulting functional mechanical objects. }, author = {Zhang, Ran and Auzinger, Thomas and Ceylan, Duygu and Li, Wilmot and Bickel, Bernd}, issn = {07300301}, location = {Los Angeles, CA, United States }, number = {4}, publisher = {ACM}, title = {{Functionality-aware retargeting of mechanisms to 3D shapes}}, doi = {10.1145/3072959.3073710}, volume = {36}, year = {2017}, }