@article{9820, abstract = {Material appearance hinges on material reflectance properties but also surface geometry and illumination. The unlimited number of potential combinations between these factors makes understanding and predicting material appearance a very challenging task. In this work, we collect a large-scale dataset of perceptual ratings of appearance attributes with more than 215,680 responses for 42,120 distinct combinations of material, shape, and illumination. The goal of this dataset is twofold. First, we analyze for the first time the effects of illumination and geometry in material perception across such a large collection of varied appearances. We connect our findings to those of the literature, discussing how previous knowledge generalizes across very diverse materials, shapes, and illuminations. Second, we use the collected dataset to train a deep learning architecture for predicting perceptual attributes that correlate with human judgments. We demonstrate the consistent and robust behavior of our predictor in various challenging scenarios, which, for the first time, enables estimating perceived material attributes from general 2D images. Since our predictor relies on the final appearance in an image, it can compare appearance properties across different geometries and illumination conditions. Finally, we demonstrate several applications that use our predictor, including appearance reproduction using 3D printing, BRDF editing by integrating our predictor in a differentiable renderer, illumination design, or material recommendations for scene design.}, author = {Serrano, Ana and Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol}, issn = {15577368}, journal = {ACM Transactions on Graphics}, number = {4}, publisher = {Association for Computing Machinery}, title = {{The effect of shape and illumination on material perception: Model and applications}}, doi = {10.1145/3450626.3459813}, volume = {40}, year = {2021}, } @inproceedings{9957, abstract = {The reflectance field of a face describes the reflectance properties responsible for complex lighting effects including diffuse, specular, inter-reflection and self shadowing. Most existing methods for estimating the face reflectance from a monocular image assume faces to be diffuse with very few approaches adding a specular component. This still leaves out important perceptual aspects of reflectance as higher-order global illumination effects and self-shadowing are not modeled. We present a new neural representation for face reflectance where we can estimate all components of the reflectance responsible for the final appearance from a single monocular image. Instead of modeling each component of the reflectance separately using parametric models, our neural representation allows us to generate a basis set of faces in a geometric deformation-invariant space, parameterized by the input light direction, viewpoint and face geometry. We learn to reconstruct this reflectance field of a face just from a monocular image, which can be used to render the face from any viewpoint in any light condition. Our method is trained on a light-stage training dataset, which captures 300 people illuminated with 150 light conditions from 8 viewpoints. We show that our method outperforms existing monocular reflectance reconstruction methods, in terms of photorealism due to better capturing of physical premitives, such as sub-surface scattering, specularities, self-shadows and other higher-order effects.}, author = {B R, Mallikarjun and Tewari, Ayush and Oh, Tae-Hyun and Weyrich, Tim and Bickel, Bernd and Seidel, Hans-Peter and Pfister, Hanspeter and Matusik, Wojciech and Elgharib, Mohamed and Theobalt, Christian}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, isbn = {978-166544509-2}, issn = {1063-6919}, location = {Nashville, TN, United States; Virtual}, pages = {4791--4800}, publisher = {IEEE}, title = {{Monocular reconstruction of neural face reflectance fields}}, doi = {10.1109/CVPR46437.2021.00476}, year = {2021}, } @article{9547, abstract = {With the wider availability of full-color 3D printers, color-accurate 3D-print preparation has received increased attention. A key challenge lies in the inherent translucency of commonly used print materials that blurs out details of the color texture. Previous work tries to compensate for these scattering effects through strategic assignment of colored primary materials to printer voxels. To date, the highest-quality approach uses iterative optimization that relies on computationally expensive Monte Carlo light transport simulation to predict the surface appearance from subsurface scattering within a given print material distribution; that optimization, however, takes in the order of days on a single machine. In our work, we dramatically speed up the process by replacing the light transport simulation with a data-driven approach. Leveraging a deep neural network to predict the scattering within a highly heterogeneous medium, our method performs around two orders of magnitude faster than Monte Carlo rendering while yielding optimization results of similar quality level. The network is based on an established method from atmospheric cloud rendering, adapted to our domain and extended by a physically motivated weight sharing scheme that substantially reduces the network size. We analyze its performance in an end-to-end print preparation pipeline and compare quality and runtime to alternative approaches, and demonstrate its generalization to unseen geometry and material values. This for the first time enables full heterogenous material optimization for 3D-print preparation within time frames in the order of the actual printing time.}, author = {Rittig, Tobias and Sumin, Denis and Babaei, Vahid and Didyk, Piotr and Voloboy, Alexey and Wilkie, Alexander and Bickel, Bernd and Myszkowski, Karol and Weyrich, Tim and Křivánek, Jaroslav}, issn = {1467-8659}, journal = {Computer Graphics Forum}, number = {2}, pages = {205--219}, publisher = {Wiley}, title = {{Neural acceleration of scattering-aware color 3D printing}}, doi = {10.1111/cgf.142626}, volume = {40}, year = {2021}, } @article{10574, abstract = {The understanding of material appearance perception is a complex problem due to interactions between material reflectance, surface geometry, and illumination. Recently, Serrano et al. collected the largest dataset to date with subjective ratings of material appearance attributes, including glossiness, metallicness, sharpness and contrast of reflections. In this work, we make use of their dataset to investigate for the first time the impact of the interactions between illumination, geometry, and eight different material categories in perceived appearance attributes. After an initial analysis, we select for further analysis the four material categories that cover the largest range for all perceptual attributes: fabric, plastic, ceramic, and metal. Using a cumulative link mixed model (CLMM) for robust regression, we discover interactions between these material categories and four representative illuminations and object geometries. We believe that our findings contribute to expanding the knowledge on material appearance perception and can be useful for many applications, such as scene design, where any particular material in a given shape can be aligned with dominant classes of illumination, so that a desired strength of appearance attributes can be achieved.}, author = {Chen, Bin and Wang, Chao and Piovarci, Michael and Seidel, Hans Peter and Didyk, Piotr and Myszkowski, Karol and Serrano, Ana}, issn = {1432-2315}, journal = {Visual Computer}, number = {12}, pages = {2975--2987}, publisher = {Springer Nature}, title = {{The effect of geometry and illumination on appearance perception of different material categories}}, doi = {10.1007/s00371-021-02227-x}, volume = {37}, year = {2021}, } @article{10184, abstract = {We introduce a novel technique to automatically decompose an input object’s volume into a set of parts that can be represented by two opposite height fields. Such decomposition enables the manufacturing of individual parts using two-piece reusable rigid molds. Our decomposition strategy relies on a new energy formulation that utilizes a pre-computed signal on the mesh volume representing the accessibility for a predefined set of extraction directions. Thanks to this novel formulation, our method allows for efficient optimization of a fabrication-aware partitioning of volumes in a completely automatic way. We demonstrate the efficacy of our approach by generating valid volume partitionings for a wide range of complex objects and physically reproducing several of them.}, author = {Alderighi, Thomas and Malomo, Luigi and Bickel, Bernd and Cignoni, Paolo and Pietroni, Nico}, issn = {1557-7368 }, journal = {ACM Transactions on Graphics}, number = {6}, publisher = {Association for Computing Machinery}, title = {{Volume decomposition for two-piece rigid casting}}, doi = {10.1145/3478513.3480555}, volume = {40}, year = {2021}, }