data
dict
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxAAT0N", "doi": "10.1109/TVCG.2010.25", "abstract": "Artists use different means of stylization to control the focus on different objects in the scene. This allows them to portray complex meaning and achieve certain artistic effects. Most prior work on painterly rendering of videos, however, uses only a single painting style, with fixed global parameters, irrespective of objects and their layout in the images. This often leads to inadequate artistic control. Moreover, brush stroke orientation is typically assumed to follow an everywhere continuous directional field. In this paper, we propose a video painting system that accounts for the spatial support of objects in the images or videos, and uses this information to specify style parameters and stroke orientation for painterly rendering. Since objects occupy distinct image locations and move relatively smoothly from one video frame to another, our object-based painterly rendering approach is characterized by style parameters that coherently vary in space and time. Space-time-varying style parameters enable more artistic freedom, such as emphasis/de-emphasis, increase or decrease of contrast, exaggeration or abstraction of different objects in the scene in a temporally coherent fashion.", "abstracts": [ { "abstractType": "Regular", "content": "Artists use different means of stylization to control the focus on different objects in the scene. This allows them to portray complex meaning and achieve certain artistic effects. Most prior work on painterly rendering of videos, however, uses only a single painting style, with fixed global parameters, irrespective of objects and their layout in the images. This often leads to inadequate artistic control. Moreover, brush stroke orientation is typically assumed to follow an everywhere continuous directional field. In this paper, we propose a video painting system that accounts for the spatial support of objects in the images or videos, and uses this information to specify style parameters and stroke orientation for painterly rendering. Since objects occupy distinct image locations and move relatively smoothly from one video frame to another, our object-based painterly rendering approach is characterized by style parameters that coherently vary in space and time. Space-time-varying style parameters enable more artistic freedom, such as emphasis/de-emphasis, increase or decrease of contrast, exaggeration or abstraction of different objects in the scene in a temporally coherent fashion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Artists use different means of stylization to control the focus on different objects in the scene. This allows them to portray complex meaning and achieve certain artistic effects. Most prior work on painterly rendering of videos, however, uses only a single painting style, with fixed global parameters, irrespective of objects and their layout in the images. This often leads to inadequate artistic control. Moreover, brush stroke orientation is typically assumed to follow an everywhere continuous directional field. In this paper, we propose a video painting system that accounts for the spatial support of objects in the images or videos, and uses this information to specify style parameters and stroke orientation for painterly rendering. Since objects occupy distinct image locations and move relatively smoothly from one video frame to another, our object-based painterly rendering approach is characterized by style parameters that coherently vary in space and time. Space-time-varying style parameters enable more artistic freedom, such as emphasis/de-emphasis, increase or decrease of contrast, exaggeration or abstraction of different objects in the scene in a temporally coherent fashion.", "title": "Video Painting with Space-Time-Varying Style Parameters", "normalizedTitle": "Video Painting with Space-Time-Varying Style Parameters", "fno": "ttg2011010074", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Nonphotorealistic Rendering", "Video Painting", "Multistyle Painting", "Tensor Field Design" ], "authors": [ { "givenName": "Mizuki", "surname": "Kagaya", "fullName": "Mizuki Kagaya", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" }, { "givenName": "William", "surname": "Brendel", "fullName": "William Brendel", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" }, { "givenName": "Qingqing", "surname": "Deng", "fullName": "Qingqing Deng", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" }, { "givenName": "Todd", "surname": "Kesterson", "fullName": "Todd Kesterson", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" }, { "givenName": "Sinisa", "surname": "Todorovic", "fullName": "Sinisa Todorovic", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" }, { "givenName": "Patrick J.", "surname": "Neill", "fullName": "Patrick J. Neill", "affiliation": "NVidia, Santa Clara", "__typename": "ArticleAuthorType" }, { "givenName": "Eugene", "surname": "Zhang", "fullName": "Eugene Zhang", "affiliation": "Oregon State University, Corvallis", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "74-87", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457g997", "title": "Deep Photo Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g997/12OmNs59JSE", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmvit/2017/4993/0/07878716", "title": "Generation of Stereo Oil Paintings from RGBD Images", "doi": null, "abstractUrl": "/proceedings-article/cmvit/2017/07878716/12OmNxT56Bx", "parentPublication": { "id": "proceedings/cmvit/2017/4993/0", "title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/psivt/2010/4285/0/4285a462", "title": "Artistic Emulation - Filter Blending for Painterly Rendering", "doi": null, "abstractUrl": "/proceedings-article/psivt/2010/4285a462/12OmNyRg48N", "parentPublication": { "id": "proceedings/psivt/2010/4285/0", "title": "Image and Video Technology, Pacific-Rim Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/3789a089", "title": "Artwork-Based 3D Ink Style Modeling and Rendering", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a089/12OmNzlUKmq", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/01/ttg2012010058", "title": "Video Painting Based on a Stabilized Time-Varying Flow Field", "doi": null, "abstractUrl": "/journal/tg/2012/01/ttg2012010058/13rRUwInv4m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/04/v0443", "title": "Cubist Style Rendering from Photographs", "doi": null, "abstractUrl": "/journal/tg/2003/04/v0443/13rRUx0gev0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/05/ttg2013050866", "title": "State of the \"Art”: A Taxonomy of Artistic Stylization Techniques for Images and Video", "doi": null, "abstractUrl": "/journal/tg/2013/05/ttg2013050866/13rRUxBa561", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030475", "title": "AniPaint: Interactive Painterly Animation from Video", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030475/13rRUygT7sB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a009", "title": "MaeSTrO: A Mobile App for Style Transfer Orchestration Using Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a009/17D45XwUAKX", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a284", "title": "Real-time Rendering of 3D Animal Models in Chinese Ink Painting Style", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a284/1p1grC3XnGw", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011010064", "articleId": "13rRUxZ0o1u", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011010088", "articleId": "13rRUNvgziB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgziB", "doi": "10.1109/TVCG.2010.28", "abstract": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.", "abstracts": [ { "abstractType": "Regular", "content": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.", "title": "Inductively Generating Euler Diagrams", "normalizedTitle": "Inductively Generating Euler Diagrams", "fno": "ttg2011010088", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Information Visualization", "Diagram Layout", "Diagram Generation", "Euler Diagrams", "Venn Diagrams" ], "authors": [ { "givenName": "Gem", "surname": "Stapleton", "fullName": "Gem Stapleton", "affiliation": "University of Brighton, Brighton, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Rodgers", "fullName": "Peter Rodgers", "affiliation": "University of Kent, Canterbury, UK", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Howse", "fullName": "John Howse", "affiliation": "University of Brighton, Brighton, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Leishi", "surname": "Zhang", "fullName": "Leishi Zhang", "affiliation": "University of Kent, Canterbury, UK", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "88-100", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2008/3268/0/3268a594", "title": "Visualise Undrawable Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880272", "title": "Layout Metrics for Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2011/1246/0/06070401", "title": "Drawing Euler diagrams with circles and ellipses", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49", "parentPublication": { "id": "proceedings/vlhcc/2011/1246/0", "title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2011/1246/0/06070382", "title": "SketchSet: Creating Euler diagrams using pen or mouse", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2011/06070382/12OmNx965CA", "parentPublication": { "id": "proceedings/vlhcc/2011/1246/0", "title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2007/2900/0/29000771", "title": "Evaluating the Comprehension of Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2007/29000771/12OmNxjjEhx", "parentPublication": { "id": "proceedings/iv/2007/2900/0", "title": "2007 11th International Conference Information Visualization (IV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2013/0369/0/06645262", "title": "Improving user comprehension of Euler diagrams", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2013/06645262/12OmNxveNOL", "parentPublication": { "id": "proceedings/vlhcc/2013/0369/0", "title": "2013 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2008/3268/0/3268a585", "title": "Embedding Wellformed Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011071020", "title": "Drawing Euler Diagrams with Circles: The Theory of Piercings", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011071020/13rRUEgarBq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/07/05999665", "title": "Wellformedness Properties in Euler Diagrams: Which Should Be Used?", "doi": null, "abstractUrl": "/journal/tg/2012/07/05999665/13rRUILLkvo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061090", "title": "Untangling Euler Diagrams", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061090/13rRUILtJm3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011010074", "articleId": "13rRUxAAT0N", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011010101", "articleId": "13rRUx0xPIA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRFe", "name": "ttg2011010088s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2011010088s.pdf", "extension": "pdf", "size": "286 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPIA", "doi": "10.1109/TVCG.2010.24", "abstract": "This paper describes an automatic mechanism for drawing metro maps. We apply multicriteria optimization to find effective placement of stations with a good line layout and to label the map unambiguously. A number of metrics are defined, which are used in a weighted sum to find a fitness value for a layout of the map. A hill climbing optimizer is used to reduce the fitness value, and find improved map layouts. To avoid local minima, we apply clustering techniques to the map—the hill climber moves both stations and clusters when finding improved layouts. We show the method applied to a number of metro maps, and describe an empirical study that provides some quantitative evidence that automatically-drawn metro maps can help users to find routes more efficiently than either published maps or undistorted maps. Moreover, we have found that, in these cases, study subjects indicate a preference for automatically-drawn maps over the alternatives.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes an automatic mechanism for drawing metro maps. We apply multicriteria optimization to find effective placement of stations with a good line layout and to label the map unambiguously. A number of metrics are defined, which are used in a weighted sum to find a fitness value for a layout of the map. A hill climbing optimizer is used to reduce the fitness value, and find improved map layouts. To avoid local minima, we apply clustering techniques to the map—the hill climber moves both stations and clusters when finding improved layouts. We show the method applied to a number of metro maps, and describe an empirical study that provides some quantitative evidence that automatically-drawn metro maps can help users to find routes more efficiently than either published maps or undistorted maps. Moreover, we have found that, in these cases, study subjects indicate a preference for automatically-drawn maps over the alternatives.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes an automatic mechanism for drawing metro maps. We apply multicriteria optimization to find effective placement of stations with a good line layout and to label the map unambiguously. A number of metrics are defined, which are used in a weighted sum to find a fitness value for a layout of the map. A hill climbing optimizer is used to reduce the fitness value, and find improved map layouts. To avoid local minima, we apply clustering techniques to the map—the hill climber moves both stations and clusters when finding improved layouts. We show the method applied to a number of metro maps, and describe an empirical study that provides some quantitative evidence that automatically-drawn metro maps can help users to find routes more efficiently than either published maps or undistorted maps. Moreover, we have found that, in these cases, study subjects indicate a preference for automatically-drawn maps over the alternatives.", "title": "Automatic Metro Map Layout Using Multicriteria Optimization", "normalizedTitle": "Automatic Metro Map Layout Using Multicriteria Optimization", "fno": "ttg2011010101", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Information Visualization", "Diagram Layout", "Graph Drawing" ], "authors": [ { "givenName": "Jonathan", "surname": "Stott", "fullName": "Jonathan Stott", "affiliation": "University of Kent, Canterbury", "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Rodgers", "fullName": "Peter Rodgers", "affiliation": "University of Kent, Canterbury", "__typename": "ArticleAuthorType" }, { "givenName": "Juan Carlos", "surname": "Martínez-Ovando", "fullName": "Juan Carlos Martínez-Ovando", "affiliation": "University of Kent, Canterbury", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen G.", "surname": "Walker", "fullName": "Stephen G. Walker", "affiliation": "University of Kent, Canterbury", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "101-114", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2004/2177/0/21770488", "title": "Getting to more Abstract Places using the Metro Map Metaphor", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770488/12OmNANBZs7", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880272", "title": "Layout Metrics for Euler Diagrams", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770355", "title": "Metro Map Layout Using Multicriteria Optimization", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770355/12OmNvJXeDm", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2014/3919/0/06741398", "title": "Octilinear layouts for metro map visualization", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2014/06741398/12OmNvk7JXn", "parentPublication": { "id": "proceedings/bigcomp/2014/3919/0", "title": "2014 International Conference on Big Data and Smart Computing (BIGCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a009", "title": "Designing and Annotating Metro Maps with Loop Lines", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a009/12OmNylKAKN", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2005/2397/0/23970203", "title": "Automatic Layout of Project Plans Using a Metro Map Metaphor", "doi": null, "abstractUrl": "/proceedings-article/iv/2005/23970203/12OmNz61dEr", "parentPublication": { "id": "proceedings/iv/2005/2397/0", "title": "Ninth International Conference on Information Visualisation (IV'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/05/ttg2011050626", "title": "Drawing and Labeling High-Quality Metro Maps by Mixed-Integer Programming", "doi": null, "abstractUrl": "/journal/tg/2011/05/ttg2011050626/13rRUwdrdSw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/02/07102775", "title": "Interactive Metro Map Editing", "doi": null, "abstractUrl": "/journal/tg/2016/02/07102775/13rRUx0xPIL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122528", "title": "Focus+Context Metro Maps", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122528/13rRUyY294B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a167", "title": "Effects of Image Features and Task Complexity on Eye Movement while searching Metro Map routes", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a167/1KaH4YF3DRm", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011010088", "articleId": "13rRUNvgziB", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011010115", "articleId": "13rRUwhHcJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgNn", "name": "ttg2011010101s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2011010101s.pdf", "extension": "pdf", "size": "1.81 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwhHcJe", "doi": "10.1109/TVCG.2010.56", "abstract": "In three-dimensional medical imaging, segmentation of specific anatomy structure is often a preprocessing step for computer-aided detection/diagnosis (CAD) purposes, and its performance has a significant impact on diagnosis of diseases as well as objective quantitative assessment of therapeutic efficacy. However, the existence of various diseases, image noise or artifacts, and individual anatomical variety generally impose a challenge for accurate segmentation of specific structures. To address these problems, a shape analysis strategy termed “break-and-repair” is presented in this study to facilitate automated medical image segmentation. Similar to surface approximation using a limited number of control points, the basic idea is to remove problematic regions and then estimate a smooth and complete surface shape by representing the remaining regions with high fidelity as an implicit function. The innovation of this shape analysis strategy is the capability of solving challenging medical image segmentation problems in a unified framework, regardless of the variability of anatomical structures in question. In our implementation, principal curvature analysis is used to identify and remove the problematic regions and radial basis function (RBF) based implicit surface fitting is used to achieve a closed (or complete) surface boundary. The feasibility and performance of this strategy are demonstrated by applying it to automated segmentation of two completely different anatomical structures depicted on CT examinations, namely human lungs and pulmonary nodules. Our quantitative experiments on a large number of clinical CT examinations collected from different sources demonstrate the accuracy, robustness, and generality of the shape “break-and-repair” strategy in medical image segmentation.", "abstracts": [ { "abstractType": "Regular", "content": "In three-dimensional medical imaging, segmentation of specific anatomy structure is often a preprocessing step for computer-aided detection/diagnosis (CAD) purposes, and its performance has a significant impact on diagnosis of diseases as well as objective quantitative assessment of therapeutic efficacy. However, the existence of various diseases, image noise or artifacts, and individual anatomical variety generally impose a challenge for accurate segmentation of specific structures. To address these problems, a shape analysis strategy termed “break-and-repair” is presented in this study to facilitate automated medical image segmentation. Similar to surface approximation using a limited number of control points, the basic idea is to remove problematic regions and then estimate a smooth and complete surface shape by representing the remaining regions with high fidelity as an implicit function. The innovation of this shape analysis strategy is the capability of solving challenging medical image segmentation problems in a unified framework, regardless of the variability of anatomical structures in question. In our implementation, principal curvature analysis is used to identify and remove the problematic regions and radial basis function (RBF) based implicit surface fitting is used to achieve a closed (or complete) surface boundary. The feasibility and performance of this strategy are demonstrated by applying it to automated segmentation of two completely different anatomical structures depicted on CT examinations, namely human lungs and pulmonary nodules. Our quantitative experiments on a large number of clinical CT examinations collected from different sources demonstrate the accuracy, robustness, and generality of the shape “break-and-repair” strategy in medical image segmentation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In three-dimensional medical imaging, segmentation of specific anatomy structure is often a preprocessing step for computer-aided detection/diagnosis (CAD) purposes, and its performance has a significant impact on diagnosis of diseases as well as objective quantitative assessment of therapeutic efficacy. However, the existence of various diseases, image noise or artifacts, and individual anatomical variety generally impose a challenge for accurate segmentation of specific structures. To address these problems, a shape analysis strategy termed “break-and-repair” is presented in this study to facilitate automated medical image segmentation. Similar to surface approximation using a limited number of control points, the basic idea is to remove problematic regions and then estimate a smooth and complete surface shape by representing the remaining regions with high fidelity as an implicit function. The innovation of this shape analysis strategy is the capability of solving challenging medical image segmentation problems in a unified framework, regardless of the variability of anatomical structures in question. In our implementation, principal curvature analysis is used to identify and remove the problematic regions and radial basis function (RBF) based implicit surface fitting is used to achieve a closed (or complete) surface boundary. The feasibility and performance of this strategy are demonstrated by applying it to automated segmentation of two completely different anatomical structures depicted on CT examinations, namely human lungs and pulmonary nodules. Our quantitative experiments on a large number of clinical CT examinations collected from different sources demonstrate the accuracy, robustness, and generality of the shape “break-and-repair” strategy in medical image segmentation.", "title": "Shape “Break-and-Repair” Strategy and Its Application to Automated Medical Image Segmentation", "normalizedTitle": "Shape “Break-and-Repair” Strategy and Its Application to Automated Medical Image Segmentation", "fno": "ttg2011010115", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Shape Analysis", "Surface Interpolation", "Medical Image Segmentation", "Computer Aided Detection Diagnosis" ], "authors": [ { "givenName": "Jiantao", "surname": "Pu", "fullName": "Jiantao Pu", "affiliation": "University of Pittsburgh, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "David S.", "surname": "Paik", "fullName": "David S. Paik", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Xin", "surname": "Meng", "fullName": "Xin Meng", "affiliation": "University of Pittsburgh, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "Justus E.", "surname": "Roos", "fullName": "Justus E. Roos", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Geoffrey D.", "surname": "Rubin", "fullName": "Geoffrey D. Rubin", "affiliation": "Stanford University, Stanford, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "115-124", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dpvt/2006/2825/0/04155751", "title": "Hemispherical Harmonic Surface Description and Applications to Medical Image Analysis", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/04155751/12OmNARRYtV", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457424", "title": "Freeform shape clustering for customized design automation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457424/12OmNBTJIBa", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dpvt/2006/2825/0/282501034", "title": "A Spatio-Temporal Modeling Method for Shape Representation", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/282501034/12OmNrJRPpg", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2007/3122/0/3122a631", "title": "Graph Cuts Segmentation with Statistical Shape Priors for Medical Images", "doi": null, "abstractUrl": "/proceedings-article/sitis/2007/3122a631/12OmNvjgWt8", "parentPublication": { "id": "proceedings/sitis/2007/3122/0", "title": "2007 Third International IEEE Conference on Signal-Image Technologies and Internet-Based System", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wgec/2008/3334/0/3334a332", "title": "An Interactive Algorithm for Blurred Medical Image Segmentation Based on Curve Fitting", "doi": null, "abstractUrl": "/proceedings-article/wgec/2008/3334a332/12OmNwswg4p", "parentPublication": { "id": "proceedings/wgec/2008/3334/0", "title": "2008 Second International Conference on Genetic and Evolutionary Computing (WGEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1993/3870/0/00378150", "title": "Large deformable splines, crest lines and matching", "doi": null, "abstractUrl": "/proceedings-article/iccv/1993/00378150/12OmNy4IF1d", "parentPublication": { "id": "proceedings/iccv/1993/3870/0", "title": "1993 (4th) International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wgec/2008/3334/0/3334a289", "title": "An Improved Algorithm for Medical Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/wgec/2008/3334a289/12OmNzgwmKW", "parentPublication": { "id": "proceedings/wgec/2008/3334/0", "title": "2008 Second International Conference on Genetic and Evolutionary Computing (WGEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061366", "title": "Exploration and Visualization of Segmentation Uncertainty using Shape and Appearance Prior Information", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061366/13rRUwI5U2C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876018", "title": "ADR - Anatomy-Driven Reformation", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876018/13rRUwInvf9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlbdbi/2021/1790/0/179000a577", "title": "Research on Medical Image Segmentation Method", "doi": null, "abstractUrl": "/proceedings-article/mlbdbi/2021/179000a577/1BQirJn6C08", "parentPublication": { "id": "proceedings/mlbdbi/2021/1790/0", "title": "2021 3rd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011010101", "articleId": "13rRUx0xPIA", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011010125", "articleId": "13rRUwvT9gq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwvT9gq", "doi": "10.1109/TVCG.2011.2", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "2010 Reviewers List", "normalizedTitle": "2010 Reviewers List", "fno": "ttg2011010125", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "125-128", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "ttg2011010115", "articleId": "13rRUwhHcJe", "__typename": "AdjacentArticleType" }, "next": { "fno": "tg11", "articleId": "13rRUzphDxU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUzphDxU", "doi": "10.1109/TVCG.2011.1", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "2010 Annual Index", "normalizedTitle": "2010 Annual Index", "fno": "tg11", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "Not in Print", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "ttg2011010125", "articleId": "13rRUwvT9gq", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy2YLSU", "doi": "10.1109/2945.910817", "abstract": "Abstract—Reverse engineering ordinarily uses laser scanners since they can sample 3D data quickly and accurately relative to other systems. These laser scanner systems, however, yield an enormous amount of irregular and scattered digitized point data that requires intensive reconstruction processing. Reconstruction of freeform objects consists of two main stages: 1) parameterization and 2) surface fitting. Selection of an appropriate parameterization is essential for topology reconstruction as well as surface fitness. Current parameterization methods have topological problems that lead to undesired surface fitting results, such as noisy self-intersecting surfaces. Such problems are particularly common with concave shapes whose parametric grid is self-intersecting, resulting in a fitted surface that considerably twists and changes its original shape. In such cases, other parameterization approaches should be used in order to guarantee non-self-intersecting behavior. The parameterization method described in this paper is based on two stages: 1) 2D initial parameterization and 2) 3D adaptive parameterization. Two methods were developed for the first stage: Partial Differential Equation (PDE) parameterization and neural network Self Organizing Maps (SOM) parameterization. PDE parameterization yields a parametric grid without self-intersections. Neural network SOM parameterization creates a grid where all the sampled points, not only the boundary points, affect the grid, leading to a uniform and smooth surface. In the second stage, a 3D base surface was created and then adaptively modified. To this end, the Gradient Descent Algorithm (GDA) and Random Surface Error Correction (RSEC), both of which are iterative surface fitting methods, were developed and implemented. The feasibility of the developed parameterization methods and fitting algorithms is demonstrated on several examples using sculptured free objects.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Reverse engineering ordinarily uses laser scanners since they can sample 3D data quickly and accurately relative to other systems. These laser scanner systems, however, yield an enormous amount of irregular and scattered digitized point data that requires intensive reconstruction processing. Reconstruction of freeform objects consists of two main stages: 1) parameterization and 2) surface fitting. Selection of an appropriate parameterization is essential for topology reconstruction as well as surface fitness. Current parameterization methods have topological problems that lead to undesired surface fitting results, such as noisy self-intersecting surfaces. Such problems are particularly common with concave shapes whose parametric grid is self-intersecting, resulting in a fitted surface that considerably twists and changes its original shape. In such cases, other parameterization approaches should be used in order to guarantee non-self-intersecting behavior. The parameterization method described in this paper is based on two stages: 1) 2D initial parameterization and 2) 3D adaptive parameterization. Two methods were developed for the first stage: Partial Differential Equation (PDE) parameterization and neural network Self Organizing Maps (SOM) parameterization. PDE parameterization yields a parametric grid without self-intersections. Neural network SOM parameterization creates a grid where all the sampled points, not only the boundary points, affect the grid, leading to a uniform and smooth surface. In the second stage, a 3D base surface was created and then adaptively modified. To this end, the Gradient Descent Algorithm (GDA) and Random Surface Error Correction (RSEC), both of which are iterative surface fitting methods, were developed and implemented. The feasibility of the developed parameterization methods and fitting algorithms is demonstrated on several examples using sculptured free objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Reverse engineering ordinarily uses laser scanners since they can sample 3D data quickly and accurately relative to other systems. These laser scanner systems, however, yield an enormous amount of irregular and scattered digitized point data that requires intensive reconstruction processing. Reconstruction of freeform objects consists of two main stages: 1) parameterization and 2) surface fitting. Selection of an appropriate parameterization is essential for topology reconstruction as well as surface fitness. Current parameterization methods have topological problems that lead to undesired surface fitting results, such as noisy self-intersecting surfaces. Such problems are particularly common with concave shapes whose parametric grid is self-intersecting, resulting in a fitted surface that considerably twists and changes its original shape. In such cases, other parameterization approaches should be used in order to guarantee non-self-intersecting behavior. The parameterization method described in this paper is based on two stages: 1) 2D initial parameterization and 2) 3D adaptive parameterization. Two methods were developed for the first stage: Partial Differential Equation (PDE) parameterization and neural network Self Organizing Maps (SOM) parameterization. PDE parameterization yields a parametric grid without self-intersections. Neural network SOM parameterization creates a grid where all the sampled points, not only the boundary points, affect the grid, leading to a uniform and smooth surface. In the second stage, a 3D base surface was created and then adaptively modified. To this end, the Gradient Descent Algorithm (GDA) and Random Surface Error Correction (RSEC), both of which are iterative surface fitting methods, were developed and implemented. The feasibility of the developed parameterization methods and fitting algorithms is demonstrated on several examples using sculptured free objects.", "title": "Parameterization and Reconstruction from 3D Scattered Points Based on Neural Network and PDE Techniques", "normalizedTitle": "Parameterization and Reconstruction from 3D Scattered Points Based on Neural Network and PDE Techniques", "fno": "v0001", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Reverse Engineering", "Laser Scanner", "PDE Parameterization", "Neural Network SOM Parameterization", "Surface Reconstruction" ], "authors": [ { "givenName": "J.", "surname": "Barhak", "fullName": "J. Barhak", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Fischer", "fullName": "A. Fischer", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "1-16", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "v0017", "articleId": "13rRUNvgz41", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgz41", "doi": "10.1109/2945.910818", "abstract": "Abstract—We describe a method to create optimal linear spline approximations to arbitrary functions of one or two variables, given as scattered data without known connectivity. We start with an initial approximation consisting of a fixed number of vertices and improve this approximation by choosing different vertices, governed by a simulated annealing algorithm. In the case of one variable, the approximation is defined by line segments; in the case of two variables, the vertices are connected to define a Delaunay triangulation of the selected subset of sites in the plane. In a second version of this algorithm, specifically designed for the bivariate case, we choose vertex sets and also change the triangulation to achieve both optimal vertex placement and optimal triangulation. We then create a hierarchy of linear spline approximations, each one being a superset of all lower-resolution ones.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We describe a method to create optimal linear spline approximations to arbitrary functions of one or two variables, given as scattered data without known connectivity. We start with an initial approximation consisting of a fixed number of vertices and improve this approximation by choosing different vertices, governed by a simulated annealing algorithm. In the case of one variable, the approximation is defined by line segments; in the case of two variables, the vertices are connected to define a Delaunay triangulation of the selected subset of sites in the plane. In a second version of this algorithm, specifically designed for the bivariate case, we choose vertex sets and also change the triangulation to achieve both optimal vertex placement and optimal triangulation. We then create a hierarchy of linear spline approximations, each one being a superset of all lower-resolution ones.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We describe a method to create optimal linear spline approximations to arbitrary functions of one or two variables, given as scattered data without known connectivity. We start with an initial approximation consisting of a fixed number of vertices and improve this approximation by choosing different vertices, governed by a simulated annealing algorithm. In the case of one variable, the approximation is defined by line segments; in the case of two variables, the vertices are connected to define a Delaunay triangulation of the selected subset of sites in the plane. In a second version of this algorithm, specifically designed for the bivariate case, we choose vertex sets and also change the triangulation to achieve both optimal vertex placement and optimal triangulation. We then create a hierarchy of linear spline approximations, each one being a superset of all lower-resolution ones.", "title": "On Simulated Annealing and the Construction of Linear Spline Approximations for Scattered Data", "normalizedTitle": "On Simulated Annealing and the Construction of Linear Spline Approximations for Scattered Data", "fno": "v0017", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Function Approximation", "Linear Splines", "Simulated Annealing", "Multiresolution Approximation", "Data Dependent Triangulation" ], "authors": [ { "givenName": "Oliver", "surname": "Kreylos", "fullName": "Oliver Kreylos", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hamann", "fullName": "Bernd Hamann", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "17-31", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0001", "articleId": "13rRUy2YLSU", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0032", "articleId": "13rRUwghd4T", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwghd4T", "doi": "10.1109/2945.910819", "abstract": "Abstract—One of the most effective techniques for developing efficient isosurfacing algorithms is the reduction of visits to nonisosurface cells. Recent algorithms have drastically reduced the unnecessary cost of visiting nonisosurface cells. The experimental results show almost optimal performance in their isosurfacing processes. However, most of them have a bottleneck in that they require more than Z_$O(n)$_Z computation time for their preprocessing, where Z_$n$_Z denotes the total number of cells. In this paper, we propose an efficient isosurfacing technique, which can be applied to unstructured as well as structured volumes and which does not require more than Z_$O(n)$_Z computation time for its preprocessing. A preprocessing step generates an extrema skeleton, which consists of cells and connects all extremum points, by the volume thinning algorithm. All disjoint parts of every isosurface intersect at least one cell in the extrema skeleton. Our implementation generates isosurfaces by searching for isosurface cells in the extrema skeleton and then recursively visiting their adjacent isosurface cells, while it skips most of the nonisosurface cells. The computation time of the preprocessing is estimated as Z_$O(n)$_Z. The computation time of the isosurfacing process is estimated as Z_$O(n^{1/3} m + k)$_Z, where Z_$k$_Z denotes the number of isosurface cells and Z_$m$_Z denotes the number of extremum points since the number of cells in an extrema skeleton is estimated as Z_$O(n^{1/3} m)$_Z.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—One of the most effective techniques for developing efficient isosurfacing algorithms is the reduction of visits to nonisosurface cells. Recent algorithms have drastically reduced the unnecessary cost of visiting nonisosurface cells. The experimental results show almost optimal performance in their isosurfacing processes. However, most of them have a bottleneck in that they require more than $O(n)$ computation time for their preprocessing, where $n$ denotes the total number of cells. In this paper, we propose an efficient isosurfacing technique, which can be applied to unstructured as well as structured volumes and which does not require more than $O(n)$ computation time for its preprocessing. A preprocessing step generates an extrema skeleton, which consists of cells and connects all extremum points, by the volume thinning algorithm. All disjoint parts of every isosurface intersect at least one cell in the extrema skeleton. Our implementation generates isosurfaces by searching for isosurface cells in the extrema skeleton and then recursively visiting their adjacent isosurface cells, while it skips most of the nonisosurface cells. The computation time of the preprocessing is estimated as $O(n)$. The computation time of the isosurfacing process is estimated as $O(n^{1/3} m + k)$, where $k$ denotes the number of isosurface cells and $m$ denotes the number of extremum points since the number of cells in an extrema skeleton is estimated as $O(n^{1/3} m)$.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—One of the most effective techniques for developing efficient isosurfacing algorithms is the reduction of visits to nonisosurface cells. Recent algorithms have drastically reduced the unnecessary cost of visiting nonisosurface cells. The experimental results show almost optimal performance in their isosurfacing processes. However, most of them have a bottleneck in that they require more than - computation time for their preprocessing, where - denotes the total number of cells. In this paper, we propose an efficient isosurfacing technique, which can be applied to unstructured as well as structured volumes and which does not require more than - computation time for its preprocessing. A preprocessing step generates an extrema skeleton, which consists of cells and connects all extremum points, by the volume thinning algorithm. All disjoint parts of every isosurface intersect at least one cell in the extrema skeleton. Our implementation generates isosurfaces by searching for isosurface cells in the extrema skeleton and then recursively visiting their adjacent isosurface cells, while it skips most of the nonisosurface cells. The computation time of the preprocessing is estimated as -. The computation time of the isosurfacing process is estimated as -, where - denotes the number of isosurface cells and - denotes the number of extremum points since the number of cells in an extrema skeleton is estimated as -.", "title": "Fast Isosurface Generation Using the Volume Thinning Algorithm", "normalizedTitle": "Fast Isosurface Generation Using the Volume Thinning Algorithm", "fno": "v0032", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Isosurface", "Extremum Points", "Volume Thinning", "Extrema Skeleton", "Lattice Classification" ], "authors": [ { "givenName": "Takayuki", "surname": "Itoh", "fullName": "Takayuki Itoh", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Yasushi", "surname": "Yamaguchi", "fullName": "Yasushi Yamaguchi", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Koji", "surname": "Koyamada", "fullName": "Koji Koyamada", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "32-46", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0017", "articleId": "13rRUNvgz41", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0047", "articleId": "13rRUxASuhm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASuhm", "doi": "10.1109/2945.910820", "abstract": "Abstract—We introduce a new algorithm for computing the distance from a point to an arbitrary polygonal mesh. Our algorithm uses a multiresolution hierarchy of bounding volumes generated by geometric simplification. Our algorithm is dynamic, exploiting coherence between subsequent queries using a priority process and achieving constant time queries in some cases. It can be applied to meshes that transform rigidly or deform nonrigidly. We illustrate our algorithm with a simulation of particle dynamics and collisions with a deformable mesh, the computation of distance maps and offset surfaces, the computation of an approximation to the expensive Hausdorff distance between two shapes, and the detection of self-intersections. We also report comparison results between our algorithm and an alternative algorithm using an octree, upon which our method permits an order-of-magnitude speed-up.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We introduce a new algorithm for computing the distance from a point to an arbitrary polygonal mesh. Our algorithm uses a multiresolution hierarchy of bounding volumes generated by geometric simplification. Our algorithm is dynamic, exploiting coherence between subsequent queries using a priority process and achieving constant time queries in some cases. It can be applied to meshes that transform rigidly or deform nonrigidly. We illustrate our algorithm with a simulation of particle dynamics and collisions with a deformable mesh, the computation of distance maps and offset surfaces, the computation of an approximation to the expensive Hausdorff distance between two shapes, and the detection of self-intersections. We also report comparison results between our algorithm and an alternative algorithm using an octree, upon which our method permits an order-of-magnitude speed-up.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We introduce a new algorithm for computing the distance from a point to an arbitrary polygonal mesh. Our algorithm uses a multiresolution hierarchy of bounding volumes generated by geometric simplification. Our algorithm is dynamic, exploiting coherence between subsequent queries using a priority process and achieving constant time queries in some cases. It can be applied to meshes that transform rigidly or deform nonrigidly. We illustrate our algorithm with a simulation of particle dynamics and collisions with a deformable mesh, the computation of distance maps and offset surfaces, the computation of an approximation to the expensive Hausdorff distance between two shapes, and the detection of self-intersections. We also report comparison results between our algorithm and an alternative algorithm using an octree, upon which our method permits an order-of-magnitude speed-up.", "title": "'Meshsweeper': Dynamic Point-to-Polygonal-Mesh Distance and Applications", "normalizedTitle": "'Meshsweeper': Dynamic Point-to-Polygonal-Mesh Distance and Applications", "fno": "v0047", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Triangular Mesh", "Closest Point", "Multiresolution Hierarchy", "Priority Process", "Dynamic Queries" ], "authors": [ { "givenName": "André", "surname": "Guéziec", "fullName": "André Guéziec", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "47-61", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0032", "articleId": "13rRUwghd4T", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0062", "articleId": "13rRUwwJWFD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwwJWFD", "doi": "10.1109/2945.910822", "abstract": "Abstract—We present a new terrain decimation technique called a Quadtree Morph, or Q-morph. The new approach eliminates the usual popping artifacts associated with polygon reduction, replacing them with less objectionable smooth morphing. We show that Q-morphing is fast enough to create a view-dependent terrain model for each frame in an interactive environment. In contrast to most Geomorph algorithms, Q-morphing does not use a time step to interpolate between geometric configurations. Instead, the geometry motion in a Q-morph is based solely on the position of the viewer.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present a new terrain decimation technique called a Quadtree Morph, or Q-morph. The new approach eliminates the usual popping artifacts associated with polygon reduction, replacing them with less objectionable smooth morphing. We show that Q-morphing is fast enough to create a view-dependent terrain model for each frame in an interactive environment. In contrast to most Geomorph algorithms, Q-morphing does not use a time step to interpolate between geometric configurations. Instead, the geometry motion in a Q-morph is based solely on the position of the viewer.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present a new terrain decimation technique called a Quadtree Morph, or Q-morph. The new approach eliminates the usual popping artifacts associated with polygon reduction, replacing them with less objectionable smooth morphing. We show that Q-morphing is fast enough to create a view-dependent terrain model for each frame in an interactive environment. In contrast to most Geomorph algorithms, Q-morphing does not use a time step to interpolate between geometric configurations. Instead, the geometry motion in a Q-morph is based solely on the position of the viewer.", "title": "Terrain Decimation through Quadtree Morphing", "normalizedTitle": "Terrain Decimation through Quadtree Morphing", "fno": "v0062", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Polygon Decimation", "Terrain Rendering", "Geomorph" ], "authors": [ { "givenName": "David", "surname": "Cline", "fullName": "David Cline", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Parris K.", "surname": "Egbert", "fullName": "Parris K. Egbert", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "62-69", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0047", "articleId": "13rRUxASuhm", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0070", "articleId": "13rRUNvgzit", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvgzit", "doi": "10.1109/2945.910824", "abstract": "Abstract—We present a new graphical representation of the level-of-detail state spaces generated by hierarchical geometric scene descriptions with multiple levels of detail. These level-of-detail graphs permit the analytical investigation of the hierarchical level-of-detail optimization problem that arises for such descriptions. As an example of their use, we prove the equivalence of two hierarchical level-of-detail algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present a new graphical representation of the level-of-detail state spaces generated by hierarchical geometric scene descriptions with multiple levels of detail. These level-of-detail graphs permit the analytical investigation of the hierarchical level-of-detail optimization problem that arises for such descriptions. As an example of their use, we prove the equivalence of two hierarchical level-of-detail algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present a new graphical representation of the level-of-detail state spaces generated by hierarchical geometric scene descriptions with multiple levels of detail. These level-of-detail graphs permit the analytical investigation of the hierarchical level-of-detail optimization problem that arises for such descriptions. As an example of their use, we prove the equivalence of two hierarchical level-of-detail algorithms.", "title": "A Graphical Representation of the State Spaces of Hierarchical Level-of-Detail Scene Descriptions", "normalizedTitle": "A Graphical Representation of the State Spaces of Hierarchical Level-of-Detail Scene Descriptions", "fno": "v0070", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Level Of Detail", "Hierarchical", "Scene Description", "Graph", "State Space", "Optimization", "Approximation" ], "authors": [ { "givenName": "Ashton E.W.", "surname": "Mason", "fullName": "Ashton E.W. Mason", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Edwin H.", "surname": "Blake", "fullName": "Edwin H. Blake", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "70-75", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0062", "articleId": "13rRUwwJWFD", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0076", "articleId": "13rRUxDIth4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxDIth4", "doi": "10.1109/2945.910825", "abstract": "Abstract—This paper develops nonlinear multiresolution techniques for scientific visualization utilizing haptic methods. The visualization of data is critical to many areas of scientific pursuit. Scientific visualization is generally accomplished through computer graphic techniques. Recent advances in haptic technologies allow visual techniques to be augmented with haptic methods. The kinesthetic feedback provided through haptic techniques provides a second modality for visualization and allows for active exploration. Moreover, haptic methods can be utilized by individuals with visual impairments. Haptic representations of large data sets, however, can be confusing to a user, especially if a visual representation is not available or cannot be used. Additionally, most haptic devices utilize point interactions, resulting in a low information bandwidth and further complicating data exploration. Multiresolution techniques can be utilized to address the issues of low information bandwidth and data complexity. Commonly used multiresolution techniques are based on the wavelet decomposition. Such linear techniques, however, tend to smooth important data features, such as discontinuities or edges. In contrast, nonlinear techniques can be utilized that preserve edge structures while removing fine data details. This paper develops a multiresolution data decomposition method based on the affine median filter. This results in a hybrid structure that can be tuned to yield a decomposition that varies from a linear wavelet decomposition to that produced by the median filter. The performance of this hybrid structure is analyzed utilizing deterministic signals and statistically in the frequency domain. This analysis and qualitative and quantitative implementation results show that the affine median decomposition has advantages over previously proposed methods. In addition to multiresolution decomposition development, analysis, and results, haptic implementation methods are presented.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper develops nonlinear multiresolution techniques for scientific visualization utilizing haptic methods. The visualization of data is critical to many areas of scientific pursuit. Scientific visualization is generally accomplished through computer graphic techniques. Recent advances in haptic technologies allow visual techniques to be augmented with haptic methods. The kinesthetic feedback provided through haptic techniques provides a second modality for visualization and allows for active exploration. Moreover, haptic methods can be utilized by individuals with visual impairments. Haptic representations of large data sets, however, can be confusing to a user, especially if a visual representation is not available or cannot be used. Additionally, most haptic devices utilize point interactions, resulting in a low information bandwidth and further complicating data exploration. Multiresolution techniques can be utilized to address the issues of low information bandwidth and data complexity. Commonly used multiresolution techniques are based on the wavelet decomposition. Such linear techniques, however, tend to smooth important data features, such as discontinuities or edges. In contrast, nonlinear techniques can be utilized that preserve edge structures while removing fine data details. This paper develops a multiresolution data decomposition method based on the affine median filter. This results in a hybrid structure that can be tuned to yield a decomposition that varies from a linear wavelet decomposition to that produced by the median filter. The performance of this hybrid structure is analyzed utilizing deterministic signals and statistically in the frequency domain. This analysis and qualitative and quantitative implementation results show that the affine median decomposition has advantages over previously proposed methods. In addition to multiresolution decomposition development, analysis, and results, haptic implementation methods are presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper develops nonlinear multiresolution techniques for scientific visualization utilizing haptic methods. The visualization of data is critical to many areas of scientific pursuit. Scientific visualization is generally accomplished through computer graphic techniques. Recent advances in haptic technologies allow visual techniques to be augmented with haptic methods. The kinesthetic feedback provided through haptic techniques provides a second modality for visualization and allows for active exploration. Moreover, haptic methods can be utilized by individuals with visual impairments. Haptic representations of large data sets, however, can be confusing to a user, especially if a visual representation is not available or cannot be used. Additionally, most haptic devices utilize point interactions, resulting in a low information bandwidth and further complicating data exploration. Multiresolution techniques can be utilized to address the issues of low information bandwidth and data complexity. Commonly used multiresolution techniques are based on the wavelet decomposition. Such linear techniques, however, tend to smooth important data features, such as discontinuities or edges. In contrast, nonlinear techniques can be utilized that preserve edge structures while removing fine data details. This paper develops a multiresolution data decomposition method based on the affine median filter. This results in a hybrid structure that can be tuned to yield a decomposition that varies from a linear wavelet decomposition to that produced by the median filter. The performance of this hybrid structure is analyzed utilizing deterministic signals and statistically in the frequency domain. This analysis and qualitative and quantitative implementation results show that the affine median decomposition has advantages over previously proposed methods. In addition to multiresolution decomposition development, analysis, and results, haptic implementation methods are presented.", "title": "Nonlinear Multiresolution Techniques with Applications to Scientific Visualization in a Haptic Environment", "normalizedTitle": "Nonlinear Multiresolution Techniques with Applications to Scientific Visualization in a Haptic Environment", "fno": "v0076", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Haptics", "Multiresolution", "Wavelets", "Nonlinear Filtering", "Blindness" ], "authors": [ { "givenName": "Mohammad Waqas", "surname": "Asghar", "fullName": "Mohammad Waqas Asghar", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Kenneth E.", "surname": "Barner", "fullName": "Kenneth E. Barner", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "76-93", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0070", "articleId": "13rRUNvgzit", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0094", "articleId": "13rRUILLkvf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzd7bm3", "title": "January-March", "year": "2001", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "7", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILLkvf", "doi": "10.1109/TVCG.2001.10000", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "2000 Reviewers List", "normalizedTitle": "2000 Reviewers List", "fno": "v0094", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "2001-01-01 00:00:00", "pubType": "trans", "pages": "94-96", "year": "2001", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0076", "articleId": "13rRUxDIth4", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYSWt1", "doi": "10.1109/TVCG.2016.2601607", "abstract": "Many types of virtual reality (VR) systems allow users to use natural, physical head movements to view a 3D environment. In some situations, such as when using systems that lack a fully surrounding display or when opting for convenient low-effort interaction, view control can be enabled through a combination of physical and virtual turns to view the environment, but the reduced realism could potentially interfere with the ability to maintain spatial orientation. One solution to this problem is to amplify head rotations such that smaller physical turns are mapped to larger virtual turns, allowing trainees to view the entire surrounding environment with small head movements. This solution is attractive because it allows semi-natural physical view control rather than requiring complete physical rotations or a fully-surrounding display. However, the effects of amplified head rotations on spatial orientation and many practical tasks are not well understood. In this paper, we present an experiment that evaluates the influence of amplified head rotation on 3D search, spatial orientation, and cybersickness. In the study, we varied the amount of amplification and also varied the type of display used (head-mounted display or surround-screen CAVE) for the VR search task. By evaluating participants first with amplification and then without, we were also able to study training transfer effects. The findings demonstrate the feasibility of using amplified head rotation to view 360 degrees of virtual space, but noticeable problems were identified when using high amplification with a head-mounted display. In addition, participants were able to more easily maintain a sense of spatial orientation when using the CAVE version of the application, which suggests that visibility of the user's body and awareness of the CAVE's physical environment may have contributed to the ability to use the amplification technique while keeping track of orientation.", "abstracts": [ { "abstractType": "Regular", "content": "Many types of virtual reality (VR) systems allow users to use natural, physical head movements to view a 3D environment. In some situations, such as when using systems that lack a fully surrounding display or when opting for convenient low-effort interaction, view control can be enabled through a combination of physical and virtual turns to view the environment, but the reduced realism could potentially interfere with the ability to maintain spatial orientation. One solution to this problem is to amplify head rotations such that smaller physical turns are mapped to larger virtual turns, allowing trainees to view the entire surrounding environment with small head movements. This solution is attractive because it allows semi-natural physical view control rather than requiring complete physical rotations or a fully-surrounding display. However, the effects of amplified head rotations on spatial orientation and many practical tasks are not well understood. In this paper, we present an experiment that evaluates the influence of amplified head rotation on 3D search, spatial orientation, and cybersickness. In the study, we varied the amount of amplification and also varied the type of display used (head-mounted display or surround-screen CAVE) for the VR search task. By evaluating participants first with amplification and then without, we were also able to study training transfer effects. The findings demonstrate the feasibility of using amplified head rotation to view 360 degrees of virtual space, but noticeable problems were identified when using high amplification with a head-mounted display. In addition, participants were able to more easily maintain a sense of spatial orientation when using the CAVE version of the application, which suggests that visibility of the user's body and awareness of the CAVE's physical environment may have contributed to the ability to use the amplification technique while keeping track of orientation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many types of virtual reality (VR) systems allow users to use natural, physical head movements to view a 3D environment. In some situations, such as when using systems that lack a fully surrounding display or when opting for convenient low-effort interaction, view control can be enabled through a combination of physical and virtual turns to view the environment, but the reduced realism could potentially interfere with the ability to maintain spatial orientation. One solution to this problem is to amplify head rotations such that smaller physical turns are mapped to larger virtual turns, allowing trainees to view the entire surrounding environment with small head movements. This solution is attractive because it allows semi-natural physical view control rather than requiring complete physical rotations or a fully-surrounding display. However, the effects of amplified head rotations on spatial orientation and many practical tasks are not well understood. In this paper, we present an experiment that evaluates the influence of amplified head rotation on 3D search, spatial orientation, and cybersickness. In the study, we varied the amount of amplification and also varied the type of display used (head-mounted display or surround-screen CAVE) for the VR search task. By evaluating participants first with amplification and then without, we were also able to study training transfer effects. The findings demonstrate the feasibility of using amplified head rotation to view 360 degrees of virtual space, but noticeable problems were identified when using high amplification with a head-mounted display. In addition, participants were able to more easily maintain a sense of spatial orientation when using the CAVE version of the application, which suggests that visibility of the user's body and awareness of the CAVE's physical environment may have contributed to the ability to use the amplification technique while keeping track of orientation.", "title": "Amplified Head Rotation in Virtual Reality and the Effects on 3D Search, Training Transfer, and Spatial Orientation", "normalizedTitle": "Amplified Head Rotation in Virtual Reality and the Effects on 3D Search, Training Transfer, and Spatial Orientation", "fno": "07547900", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Training", "Legged Locomotion", "Three Dimensional Displays", "Games", "Navigation", "Visualization", "Virtual Reality", "Spatial Orientation", "Rotation Amplification", "3 D Interaction", "Search", "Cybersickness" ], "authors": [ { "givenName": "Eric D.", "surname": "Ragan", "fullName": "Eric D. Ragan", "affiliation": "Texas A&M University, College Station, TX", "__typename": "ArticleAuthorType" }, { "givenName": "Siroberto", "surname": "Scerbo", "fullName": "Siroberto Scerbo", "affiliation": "Virginia Tech, Blacksburg, VA", "__typename": "ArticleAuthorType" }, { "givenName": "Felipe", "surname": "Bacim", "fullName": "Felipe Bacim", "affiliation": "Virginia Tech, Blacksburg, VA", "__typename": "ArticleAuthorType" }, { "givenName": "Doug A.", "surname": "Bowman", "fullName": "Doug A. Bowman", "affiliation": "Virginia Tech, Blacksburg, VA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1880-1895", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550220", "title": "Poster: Head motion transmission based on center of rotation", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550220/12OmNqJ8tlm", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460041", "title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460041/12OmNwF0BS2", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892227", "title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446495", "title": "Head-to-Body-Pose Classification in No-Pose VR Tracking Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446495/13bd1f3HvEZ", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446300", "title": "Human Compensation Strategies for Orientation Drifts", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446300/13bd1fdV4lD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/08/07501805", "title": "Extended LazyNav: Virtual 3D Ground Navigation for Large Displays and Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2017/08/07501805/13rRUyuegpb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699200", "title": "Effective Free Field of View Scene Exploration in VR and AR", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10075482", "title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/10075482/1LAuCOR3RE4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a504", "title": "Velocity Guided Amplification of View Rotation for Seated VR Scene Exploration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a504/1tnXyTs22BO", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523894", "title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07494624", "articleId": "13rRUxcbnHg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcbnHg", "doi": "10.1109/TVCG.2016.2582174", "abstract": "We present an analysis and visualization prototype using the concept of a flow topology graph (FTG) for characterization of flow in constrained networks, with a focus on discrete fracture networks (DFN), developed collaboratively by geoscientists and visualization scientists. Our method allows users to understand and evaluate flow and transport in DFN simulations by computing statistical distributions, segment paths of interest, and cluster particles based on their paths. The new approach enables domain scientists to evaluate the accuracy of the simulations, visualize features of interest, and compare multiple realizations over a specific domain of interest. Geoscientists can simulate complex transport phenomena modeling large sites for networks consisting of several thousand fractures without compromising the geometry of the network. However, few tools exist for performing higher-level analysis and visualization of simulated DFN data. The prototype system we present addresses this need. We demonstrate its effectiveness for increasingly complex examples of DFNs, covering two distinct use cases – hydrocarbon extraction from unconventional resources and transport of dissolved contaminant from a spent nuclear fuel repository.", "abstracts": [ { "abstractType": "Regular", "content": "We present an analysis and visualization prototype using the concept of a flow topology graph (FTG) for characterization of flow in constrained networks, with a focus on discrete fracture networks (DFN), developed collaboratively by geoscientists and visualization scientists. Our method allows users to understand and evaluate flow and transport in DFN simulations by computing statistical distributions, segment paths of interest, and cluster particles based on their paths. The new approach enables domain scientists to evaluate the accuracy of the simulations, visualize features of interest, and compare multiple realizations over a specific domain of interest. Geoscientists can simulate complex transport phenomena modeling large sites for networks consisting of several thousand fractures without compromising the geometry of the network. However, few tools exist for performing higher-level analysis and visualization of simulated DFN data. The prototype system we present addresses this need. We demonstrate its effectiveness for increasingly complex examples of DFNs, covering two distinct use cases – hydrocarbon extraction from unconventional resources and transport of dissolved contaminant from a spent nuclear fuel repository.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an analysis and visualization prototype using the concept of a flow topology graph (FTG) for characterization of flow in constrained networks, with a focus on discrete fracture networks (DFN), developed collaboratively by geoscientists and visualization scientists. Our method allows users to understand and evaluate flow and transport in DFN simulations by computing statistical distributions, segment paths of interest, and cluster particles based on their paths. The new approach enables domain scientists to evaluate the accuracy of the simulations, visualize features of interest, and compare multiple realizations over a specific domain of interest. Geoscientists can simulate complex transport phenomena modeling large sites for networks consisting of several thousand fractures without compromising the geometry of the network. However, few tools exist for performing higher-level analysis and visualization of simulated DFN data. The prototype system we present addresses this need. We demonstrate its effectiveness for increasingly complex examples of DFNs, covering two distinct use cases – hydrocarbon extraction from unconventional resources and transport of dissolved contaminant from a spent nuclear fuel repository.", "title": "Analysis and Visualization of Discrete Fracture Networks Using a Flow Topology Graph", "normalizedTitle": "Analysis and Visualization of Discrete Fracture Networks Using a Flow Topology Graph", "fno": "07494624", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Topology", "Analytical Models", "Network Topology", "Trajectory", "Computational Modeling", "Geometry", "Fracture Network Flow Analysis And Visualization", "Flow Topology Graph", "Topological Path Analysis", "Topological Trace Clustering", "Flow In Fractured Rock", "Discrete Fracture Network" ], "authors": [ { "givenName": "Garrett", "surname": "Aldrich", "fullName": "Garrett Aldrich", "affiliation": "Data Science at Scale Division, (CCS-7), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Jeffrey D.", "surname": "Hyman", "fullName": "Jeffrey D. Hyman", "affiliation": "Earth and Environmental Sciences Division, (EES-16), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Satish", "surname": "Karra", "fullName": "Satish Karra", "affiliation": "Earth and Environmental Sciences Division, (EES-16), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Carl W.", "surname": "Gable", "fullName": "Carl W. Gable", "affiliation": "Earth and Environmental Sciences Division, (EES-16), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Nataliia", "surname": "Makedonska", "fullName": "Nataliia Makedonska", "affiliation": "Earth and Environmental Sciences Division, (EES-16), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Hari", "surname": "Viswanathan", "fullName": "Hari Viswanathan", "affiliation": "Earth and Environmental Sciences Division, (EES-16), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Jonathan", "surname": "Woodring", "fullName": "Jonathan Woodring", "affiliation": "Data Science at Scale Division, (CCS-7), Los Alamos National Laboratory, Los Alamos, NM", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hamann", "fullName": "Bernd Hamann", "affiliation": "Department of Computer Science, Institute for Data Analysis and Visualization, University of California, Davis, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1896-1909", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2016/8942/0/8942a009", "title": "Feature Extraction and Visualization for Symbolic People Flow Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a009/12OmNAsk4Fh", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1993/3940/0/00398896", "title": "Visualizing results of transient flow simulations", "doi": null, "abstractUrl": "/proceedings-article/visual/1993/00398896/12OmNBQ2VVI", "parentPublication": { "id": "proceedings/visual/1993/3940/0", "title": "Proceedings Visualization '93", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2009/3859/2/3859b121", "title": "Study on Flow Unit Visualization", "doi": null, "abstractUrl": "/proceedings-article/iita/2009/3859b121/12OmNBRbkuk", "parentPublication": { "id": "proceedings/iita/2009/3859/2", "title": "2009 Third International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1999/5897/0/00809907", "title": "Collapsing flow topology using area metrics", "doi": null, "abstractUrl": "/proceedings-article/visual/1999/00809907/12OmNwvDQt3", "parentPublication": { "id": "proceedings/visual/1999/5897/0", "title": "Proceedings Visualization '99", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346329", "title": "Feature detection from vector quantities in a numerically simulated hypersonic flow field in combination with experimental flow visualization", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346329/12OmNx6PiB1", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970059", "title": "Collapsing Flow Topology Using Area Metrics", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970059/12OmNzVXNYe", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v0949", "title": "Visualization Tools for Vorticity Transport Analysis in Incompressible Flow", "doi": null, "abstractUrl": "/journal/tg/2006/05/v0949/13rRUwcS1CN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2020/02/09020213", "title": "Visibility, Topology, and Inertia: New Methods in Flow Visualization", "doi": null, "abstractUrl": "/magazine/cg/2020/02/09020213/1hS2P6PIkIU", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222254", "title": "Extraction and Visualization of Poincare Map Topology for Spacecraft Trajectory Design", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222254/1nTqoumGCS4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09325131", "title": "Enabling Crosscutting Visualization for Geoscience", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09325131/1qnR80v9yX6", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07547900", "articleId": "13rRUyYSWt1", "__typename": "AdjacentArticleType" }, "next": { "fno": "07491376", "articleId": "13rRUxBa5nr", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBa5nr", "doi": "10.1109/TVCG.2016.2569084", "abstract": "While ASCII art is a worldwide popular art form, automatic generating structure-based ASCII art from natural photographs remains challenging. The major challenge lies on extracting the perception-sensitive structure from the natural photographs so that a more concise ASCII art reproduction can be produced based on the structure. However, due to excessive amount of texture in natural photos, extracting perception-sensitive structure is not easy, especially when the structure may be weak and within the texture region. Besides, to fit different target text resolutions, the amount of the extracted structure should also be controllable. To tackle these challenges, we introduce a visual perception mechanism of non-classical receptive field modulation (non-CRF modulation) from physiological findings to this ASCII art application, and propose a new model of non-CRF modulation which can better separate the weak structure from the crowded texture, and also better control the scale of texture suppression. Thanks to our non-CRF model, more sensible ASCII art reproduction can be obtained. In addition, to produce more visually appealing ASCII arts, we propose a novel optimization scheme to obtain the optimal placement of proportional-font characters. We apply our method on a rich variety of images, and visually appealing ASCII art can be obtained in all cases.", "abstracts": [ { "abstractType": "Regular", "content": "While ASCII art is a worldwide popular art form, automatic generating structure-based ASCII art from natural photographs remains challenging. The major challenge lies on extracting the perception-sensitive structure from the natural photographs so that a more concise ASCII art reproduction can be produced based on the structure. However, due to excessive amount of texture in natural photos, extracting perception-sensitive structure is not easy, especially when the structure may be weak and within the texture region. Besides, to fit different target text resolutions, the amount of the extracted structure should also be controllable. To tackle these challenges, we introduce a visual perception mechanism of non-classical receptive field modulation (non-CRF modulation) from physiological findings to this ASCII art application, and propose a new model of non-CRF modulation which can better separate the weak structure from the crowded texture, and also better control the scale of texture suppression. Thanks to our non-CRF model, more sensible ASCII art reproduction can be obtained. In addition, to produce more visually appealing ASCII arts, we propose a novel optimization scheme to obtain the optimal placement of proportional-font characters. We apply our method on a rich variety of images, and visually appealing ASCII art can be obtained in all cases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "While ASCII art is a worldwide popular art form, automatic generating structure-based ASCII art from natural photographs remains challenging. The major challenge lies on extracting the perception-sensitive structure from the natural photographs so that a more concise ASCII art reproduction can be produced based on the structure. However, due to excessive amount of texture in natural photos, extracting perception-sensitive structure is not easy, especially when the structure may be weak and within the texture region. Besides, to fit different target text resolutions, the amount of the extracted structure should also be controllable. To tackle these challenges, we introduce a visual perception mechanism of non-classical receptive field modulation (non-CRF modulation) from physiological findings to this ASCII art application, and propose a new model of non-CRF modulation which can better separate the weak structure from the crowded texture, and also better control the scale of texture suppression. Thanks to our non-CRF model, more sensible ASCII art reproduction can be obtained. In addition, to produce more visually appealing ASCII arts, we propose a novel optimization scheme to obtain the optimal placement of proportional-font characters. We apply our method on a rich variety of images, and visually appealing ASCII art can be obtained in all cases.", "title": "ASCII Art Synthesis from Natural Photographs", "normalizedTitle": "ASCII Art Synthesis from Natural Photographs", "fno": "07491376", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Art", "Modulation", "Visualization", "Computational Modeling", "Optimization", "Detectors", "Smoothing Methods", "ASCII Art Synthesis", "Non Classical Receptive Field Modulation", "Texture Suppression" ], "authors": [ { "givenName": "Xuemiao", "surname": "Xu", "fullName": "Xuemiao Xu", "affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Linyuan", "surname": "Zhong", "fullName": "Linyuan Zhong", "affiliation": "South China University of Technology, GuangZhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Minshan", "surname": "Xie", "fullName": "Minshan Xie", "affiliation": "South China University of Technology, GuangZhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xueting", "surname": "Liu", "fullName": "Xueting Liu", "affiliation": "Chinese University of Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Jing", "surname": "Qin", "fullName": "Jing Qin", "affiliation": "School of Nursing, the Hong Kong Polytechnic University, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Tien-Tsin", "surname": "Wong", "fullName": "Tien-Tsin Wong", "affiliation": "Chinese University of Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1910-1923", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/malware/2008/3288/0/04690859", "title": "Image spam — ASCII to the rescue!", "doi": null, "abstractUrl": "/proceedings-article/malware/2008/04690859/12OmNAio6Zo", "parentPublication": { "id": "proceedings/malware/2008/3288/0", "title": "2008 3rd International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2013/2796/0/06726897", "title": "ASCII Art Generation Using the Local Exhaustive Search on the GPU", "doi": null, "abstractUrl": "/proceedings-article/candar/2013/06726897/12OmNvlg8k1", "parentPublication": { "id": "proceedings/candar/2013/2796/0", "title": "2013 First International Symposium on Computing and Networking - Across Practical Development and Theoretical Research (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/075P1B22", "title": "Learning image-specific parameters for interactive segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/075P1B22/12OmNyvGyjD", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/03/ttg2014030436", "title": "Optimized Synthesis of Art Patterns and Layered Textures", "doi": null, "abstractUrl": "/journal/tg/2014/03/ttg2014030436/13rRUyfKIHO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07938357", "title": "Image Structure Retrieval via <inline-formula><tex-math notation=\"LaTeX\">Z_$L_0$_Z</tex-math></inline-formula> Minimization", "doi": null, "abstractUrl": "/journal/tg/2018/07/07938357/13rRUygT7fi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486465", "title": "Structure-Texture Decomposition via Joint Structure Discovery and Texture Smoothing", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486465/14jQfP4XL27", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08585158", "title": "Saliency-Aware Texture Smoothing", "doi": null, "abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400b338", "title": "Linking Art through Human Poses", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400b338/1h81qkiMPkY", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2021/3263/0/326300a106", "title": "Design of online interactive education system of art course based on B/S architecture", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2021/326300a106/1vb9iSLa9VK", "parentPublication": { "id": "proceedings/icsgea/2021/3263/0", "title": "2021 6th International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a296", "title": "Design and implementation of immersive ink art", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a296/1vg7DnnY38k", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07494624", "articleId": "13rRUxcbnHg", "__typename": "AdjacentArticleType" }, "next": { "fno": "07536161", "articleId": "13rRUxBJhvC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesWS", "name": "ttg201708-07491376s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07491376s1.zip", "extension": "zip", "size": "11.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBJhvC", "doi": "10.1109/TVCG.2016.2598570", "abstract": "Decorating the surfaces of 3D printed objects with color textures is still not readily available in most consumer-level or even high-end 3D printers. Existing techniques such as hydrographics color transfer suffer from the issues of air pockets in concave regions and discoloration in overly stretched regions. We propose a novel thermoforming-based coloring technique to alleviate these problems as well as to simplify the overall procedure. Thermoforming is a widely used technique in industry for plastic thin shell product manufacturing by pressing heated plastic sheets onto molds using atmospheric pressure. We attach on the transparent plastic sheet a precomputed color pattern decal prior to heating, and adhere it to 3D printed models treated as the molds in thermoforming. The 3D models are thus decorated with the desired color texture, as well as a thin, polished protective cover. The precomputation involves a physical simulation of the thermoforming process to compute the correct color pattern on the plastic sheet, and the vent hole layout on the 3D model for air pocket elimination. We demonstrate the effectiveness and accuracy of our computational model and our prototype thermoforming surface coloring system through physical experiments.", "abstracts": [ { "abstractType": "Regular", "content": "Decorating the surfaces of 3D printed objects with color textures is still not readily available in most consumer-level or even high-end 3D printers. Existing techniques such as hydrographics color transfer suffer from the issues of air pockets in concave regions and discoloration in overly stretched regions. We propose a novel thermoforming-based coloring technique to alleviate these problems as well as to simplify the overall procedure. Thermoforming is a widely used technique in industry for plastic thin shell product manufacturing by pressing heated plastic sheets onto molds using atmospheric pressure. We attach on the transparent plastic sheet a precomputed color pattern decal prior to heating, and adhere it to 3D printed models treated as the molds in thermoforming. The 3D models are thus decorated with the desired color texture, as well as a thin, polished protective cover. The precomputation involves a physical simulation of the thermoforming process to compute the correct color pattern on the plastic sheet, and the vent hole layout on the 3D model for air pocket elimination. We demonstrate the effectiveness and accuracy of our computational model and our prototype thermoforming surface coloring system through physical experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Decorating the surfaces of 3D printed objects with color textures is still not readily available in most consumer-level or even high-end 3D printers. Existing techniques such as hydrographics color transfer suffer from the issues of air pockets in concave regions and discoloration in overly stretched regions. We propose a novel thermoforming-based coloring technique to alleviate these problems as well as to simplify the overall procedure. Thermoforming is a widely used technique in industry for plastic thin shell product manufacturing by pressing heated plastic sheets onto molds using atmospheric pressure. We attach on the transparent plastic sheet a precomputed color pattern decal prior to heating, and adhere it to 3D printed models treated as the molds in thermoforming. The 3D models are thus decorated with the desired color texture, as well as a thin, polished protective cover. The precomputation involves a physical simulation of the thermoforming process to compute the correct color pattern on the plastic sheet, and the vent hole layout on the 3D model for air pocket elimination. We demonstrate the effectiveness and accuracy of our computational model and our prototype thermoforming surface coloring system through physical experiments.", "title": "Coloring 3D Printed Surfaces by Thermoforming", "normalizedTitle": "Coloring 3D Printed Surfaces by Thermoforming", "fno": "07536161", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Solid Modeling", "Atmospheric Modeling", "Computational Modeling", "Three Dimensional Displays", "Thermoforming", "Plastics", "Color", "3 D Printing", "Thermoforming", "Thermoplastic Sheet Simulation", "Texture Mapping" ], "authors": [ { "givenName": "Yizhong", "surname": "Zhang", "fullName": "Yizhong Zhang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Mengminwei Building, Zijingang Campus, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yiying", "surname": "Tong", "fullName": "Yiying Tong", "affiliation": "Computer Science and Engineering Department, Michigan State University, 428 S. Shaw Lane, Room 3115, MI", "__typename": "ArticleAuthorType" }, { "givenName": "Kun", "surname": "Zhou", "fullName": "Kun Zhou", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Mengminwei Building, Zijingang Campus, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1924-1935", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457a791", "title": "SurfNet: Generating 3D Shape Surfaces Using Deep Residual Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a791/12OmNCwlaeO", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671808", "title": "Augmenting markerless complex 3D objects by combining geometrical and color edge information", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671808/12OmNxYbSX3", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2016/0811/0/0811a024", "title": "Finite Element Simulation of 2.5/3D Shaped and Rigid Electronic Circuits", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2016/0811a024/12OmNxw5B86", "parentPublication": { "id": "proceedings/cgiv/2016/0811/0", "title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/06/mcg2013060036", "title": "Fabricating 3D Figurines with Personalized Faces", "doi": null, "abstractUrl": "/magazine/cg/2013/06/mcg2013060036/13rRUzp02qx", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600b496", "title": "Photorealistic Monocular 3D Reconstruction of Humans Wearing Clothing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600b496/1H0LbGobg6A", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c252", "title": "3D Scene Painting via Semantic Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c252/1H1lSPqCX04", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09275396", "title": "Deep Exemplar-Based Color Transfer for 3D Model", "doi": null, "abstractUrl": "/journal/tg/2022/08/09275396/1pcOtl2sFuU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455963", "title": "A No-Reference Visual Quality Metric For 3D Color Meshes", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455963/1uCgrVwWrbq", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a216", "title": "The Algorithm and Application of Fast Surface Slice and Color Acquisition for Color 3D Printing", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a216/1xqyJROWDeg", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b062", "title": "Point2color: 3D Point Cloud Colorization Using a Conditional Generative Network and Differentiable Rendering for Airborne LiDAR", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b062/1yJYtFBEASI", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07491376", "articleId": "13rRUxBa5nr", "__typename": "AdjacentArticleType" }, "next": { "fno": "07516722", "articleId": "13rRUwh80uD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesPz", "name": "ttg201708-07536161s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07536161s1.zip", "extension": "zip", "size": "38.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwh80uD", "doi": "10.1109/TVCG.2016.2592906", "abstract": "Wall-displays allow multiple users to simultaneously view and analyze large amounts of information, such as the increasingly complex graphs present in domains like biology or social network analysis. We focus on how pairs explore graphs on a touch enabled wall-display using two techniques, both adapted for collaboration: a basic localized selection, and a propagation selection technique that uses the idea of diffusion/transmission from an origin node. We assess in a controlled experiment the impact of selection technique on a shortest path identification task. Pairs consistently divided space even if the task is not spatially divisible, and for the basic selection technique that has a localized visual effect, it led to parallel work that negatively impacted accuracy. The large visual footprint of the propagation technique led to close coordination, improving speed and accuracy for complex graphs only. We then observed the use of propagation on additional graph topology tasks, confirming pair strategies on spatial division and coordination.", "abstracts": [ { "abstractType": "Regular", "content": "Wall-displays allow multiple users to simultaneously view and analyze large amounts of information, such as the increasingly complex graphs present in domains like biology or social network analysis. We focus on how pairs explore graphs on a touch enabled wall-display using two techniques, both adapted for collaboration: a basic localized selection, and a propagation selection technique that uses the idea of diffusion/transmission from an origin node. We assess in a controlled experiment the impact of selection technique on a shortest path identification task. Pairs consistently divided space even if the task is not spatially divisible, and for the basic selection technique that has a localized visual effect, it led to parallel work that negatively impacted accuracy. The large visual footprint of the propagation technique led to close coordination, improving speed and accuracy for complex graphs only. We then observed the use of propagation on additional graph topology tasks, confirming pair strategies on spatial division and coordination.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Wall-displays allow multiple users to simultaneously view and analyze large amounts of information, such as the increasingly complex graphs present in domains like biology or social network analysis. We focus on how pairs explore graphs on a touch enabled wall-display using two techniques, both adapted for collaboration: a basic localized selection, and a propagation selection technique that uses the idea of diffusion/transmission from an origin node. We assess in a controlled experiment the impact of selection technique on a shortest path identification task. Pairs consistently divided space even if the task is not spatially divisible, and for the basic selection technique that has a localized visual effect, it led to parallel work that negatively impacted accuracy. The large visual footprint of the propagation technique led to close coordination, improving speed and accuracy for complex graphs only. We then observed the use of propagation on additional graph topology tasks, confirming pair strategies on spatial division and coordination.", "title": "Evaluating Multi-User Selection for Exploring Graph Topology on Wall-Displays", "normalizedTitle": "Evaluating Multi-User Selection for Exploring Graph Topology on Wall-Displays", "fno": "07516722", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Collaboration", "Topology", "Navigation", "Data Visualization", "Keyboards", "Mice", "Wall Displays", "Multi User Interaction", "Graph Visualization", "Selection Techniques", "Co Located Collaboration" ], "authors": [ { "givenName": "Arnaud", "surname": "Prouzeau", "fullName": "Arnaud Prouzeau", "affiliation": "Univ Paris-Sud & CNRS (LRI), Inria", "__typename": "ArticleAuthorType" }, { "givenName": "Anastasia", "surname": "Bezerianos", "fullName": "Anastasia Bezerianos", "affiliation": "Univ Paris-Sud & CNRS (LRI), Inria", "__typename": "ArticleAuthorType" }, { "givenName": "Olivier", "surname": "Chapuis", "fullName": "Olivier Chapuis", "affiliation": "Univ Paris-Sud & CNRS (LRI), Inria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1936-1951", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/nbis/2009/3767/0/3767a612", "title": "Study on Realistic Communication Technology with Tiled Displays Wall", "doi": null, "abstractUrl": "/proceedings-article/nbis/2009/3767a612/12OmNqBKUf2", "parentPublication": { "id": "proceedings/nbis/2009/3767/0", "title": "2009 International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/infvis/2003/8154/0/01249002", "title": "Exploding the frame: designing for wall-size computer displays", "doi": null, "abstractUrl": "/proceedings-article/infvis/2003/01249002/12OmNrYCXId", "parentPublication": { "id": "proceedings/infvis/2003/8154/0", "title": "IEEE Symposium on Information Visualization 2003", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2015/1725/0/07151687", "title": "Interactive display conglomeration on the wall", "doi": null, "abstractUrl": "/proceedings-article/wevr/2015/07151687/12OmNs0C9Ln", "parentPublication": { "id": "proceedings/wevr/2015/1725/0", "title": "2015 IEEE 1st Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/blocks-and-beyond/2015/8367/0/07369005", "title": "Position paper: Towards making block-based programming accessible for blind users", "doi": null, "abstractUrl": "/proceedings-article/blocks-and-beyond/2015/07369005/12OmNx8Ousc", "parentPublication": { "id": "proceedings/blocks-and-beyond/2015/8367/0", "title": "2015 IEEE Blocks and Beyond Workshop (Blocks and Beyond)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2003/2055/0/01249002", "title": "Exploding the frame: designing for wall-size computer displays", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2003/01249002/12OmNy50ggl", "parentPublication": { "id": "proceedings/ieee-infovis/2003/2055/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a366", "title": "A Gesture Control Framework Targeting High-Resolution Video Wall Displays", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a366/12OmNzgwmQF", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122516", "title": "Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122516/13rRUwwJWFM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2017/0443/0/08103479", "title": "Investigating uni-stroke gesture input for diagram editors on large wall-mounted touchscreens", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2017/08103479/17D45XDIXT3", "parentPublication": { "id": "proceedings/vlhcc/2017/0443/0", "title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797980", "title": "Exploring Scalable WorkSpace Based on Virtual and Physical Movable Wall", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797980/1cJ14PgErfi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a011", "title": "Collaborative Visual Analysis with Multi-level Information Sharing Using a Wall-Size Display and See-Through HMDs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a011/1cMF7IJ33Lq", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07536161", "articleId": "13rRUxBJhvC", "__typename": "AdjacentArticleType" }, "next": { "fno": "07501805", "articleId": "13rRUyuegpb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgxq", "name": "ttg201708-07516722s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07516722s1.zip", "extension": "zip", "size": "34.6 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyuegpb", "doi": "10.1109/TVCG.2016.2586071", "abstract": "This paper presents the extended work on LazyNav, a head-free, eyes-free and hands-free mid-air ground navigation control model presented at the IEEE 3D User Interfaces (3DUI) 2015, in particular with a new application to the head-mounted display (HMD). Our mid-air interaction metaphor makes use of only a single pair of the remaining tracked body elements to tailor the navigation. Therefore, the user can navigate in the scene while still being able to perform other interactions with her hands and head, e.g., carrying a bag, grasping a cup of coffee, or observing the content by moving her eyes and locally rotating her head. We design several body motions for navigation by considering the use of non-critical body parts and develop assumptions about ground navigation techniques. Through the user studies, we investigate the motions that are easy to discover, easy to control, socially acceptable, accurate and not tiring. Finally, we evaluate the desired ground navigation features with a prototype application in both a large display (LD) and a HMD navigation scenarios. We highlight several recommendations for designing a particular mid-air ground navigation technique for a LD and a HMD.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents the extended work on LazyNav, a head-free, eyes-free and hands-free mid-air ground navigation control model presented at the IEEE 3D User Interfaces (3DUI) 2015, in particular with a new application to the head-mounted display (HMD). Our mid-air interaction metaphor makes use of only a single pair of the remaining tracked body elements to tailor the navigation. Therefore, the user can navigate in the scene while still being able to perform other interactions with her hands and head, e.g., carrying a bag, grasping a cup of coffee, or observing the content by moving her eyes and locally rotating her head. We design several body motions for navigation by considering the use of non-critical body parts and develop assumptions about ground navigation techniques. Through the user studies, we investigate the motions that are easy to discover, easy to control, socially acceptable, accurate and not tiring. Finally, we evaluate the desired ground navigation features with a prototype application in both a large display (LD) and a HMD navigation scenarios. We highlight several recommendations for designing a particular mid-air ground navigation technique for a LD and a HMD.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents the extended work on LazyNav, a head-free, eyes-free and hands-free mid-air ground navigation control model presented at the IEEE 3D User Interfaces (3DUI) 2015, in particular with a new application to the head-mounted display (HMD). Our mid-air interaction metaphor makes use of only a single pair of the remaining tracked body elements to tailor the navigation. Therefore, the user can navigate in the scene while still being able to perform other interactions with her hands and head, e.g., carrying a bag, grasping a cup of coffee, or observing the content by moving her eyes and locally rotating her head. We design several body motions for navigation by considering the use of non-critical body parts and develop assumptions about ground navigation techniques. Through the user studies, we investigate the motions that are easy to discover, easy to control, socially acceptable, accurate and not tiring. Finally, we evaluate the desired ground navigation features with a prototype application in both a large display (LD) and a HMD navigation scenarios. We highlight several recommendations for designing a particular mid-air ground navigation technique for a LD and a HMD.", "title": "Extended LazyNav: Virtual 3D Ground Navigation for Large Displays and Head-Mounted Displays", "normalizedTitle": "Extended LazyNav: Virtual 3D Ground Navigation for Large Displays and Head-Mounted Displays", "fno": "07501805", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Navigation", "Three Dimensional Displays", "Hip", "Tracking", "Space Vehicles", "Legged Locomotion", "Sensors", "3 D User Interface", "Spatial Interaction", "Virtual Reality", "Navigation" ], "authors": [ { "givenName": "Parinya", "surname": "Punpongsanon", "fullName": "Parinya Punpongsanon", "affiliation": "Graduate School of Engineering Science, Osaka University, Toyonaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Emilie", "surname": "Guy", "fullName": "Emilie Guy", "affiliation": "LTCI, CNRS, Telecom ParisTech, University Paris-Saclay, Paris, France", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Graduate School of Engineering Science, Osaka University, Toyonaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Graduate School of Engineering Science, Osaka University, Toyonaka, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Tamy", "surname": "Boubekeur", "fullName": "Tamy Boubekeur", "affiliation": "LTCI, CNRS, Telecom ParisTech, University Paris-Saclay, Paris, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1952-1963", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643561", "title": "Effects of a retroreflective screen on depth perception in a head-mounted projection display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643561/12OmNB9bvby", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131725", "title": "LazyNav: 3D ground navigation with non-critical body parts", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131725/12OmNBCqbId", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460041", "title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460041/12OmNwF0BS2", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223389", "title": "The effect of head mounted display weight and locomotion method on the perceived naturalness of virtual walking speeds", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223389/12OmNwqft3l", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892227", "title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549395", "title": "Flexible and general redirected walking for head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a649", "title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523894", "title": "Head-Mounted Display with Increased Downward Field of View Improves Presence and Sense of Self-Location", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523894/1wpqkPb7CSY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a413", "title": "Selective Foveated Ray Tracing for Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07516722", "articleId": "13rRUwh80uD", "__typename": "AdjacentArticleType" }, "next": { "fno": "07530932", "articleId": "13rRUxASubC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYet2x", "name": "ttg201708-07501805s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07501805s1.zip", "extension": "zip", "size": "1.57 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASubC", "doi": "10.1109/TVCG.2016.2597827", "abstract": "Multi-destination maps are a kind of navigation maps aimed to guide visitors to multiple destinations within a region, which can be of great help to urban visitors. However, they have not been developed in the current online map service. To address this issue, we introduce a novel layout model designed especially for generating multi-destination maps, which considers the global and local layout of a multi-destination map. We model the layout problem as a graph drawing that satisfies a set of hard and soft constraints. In the global layout phase, we balance the scale factor between ROIs. In the local layout phase, we make all edges have good visibility and optimize the map layout to preserve the relative length and angle of roads. We also propose a perturbation-based optimization method to find an optimal layout in the complex solution space. The multi-destination maps generated by our system are potential feasible on the modern mobile devices and our result can show an overview and a detail view of the whole map at the same time. In addition, we perform a user study to evaluate the effectiveness of our method, and the results prove that the multi-destination maps achieve our goals well.", "abstracts": [ { "abstractType": "Regular", "content": "Multi-destination maps are a kind of navigation maps aimed to guide visitors to multiple destinations within a region, which can be of great help to urban visitors. However, they have not been developed in the current online map service. To address this issue, we introduce a novel layout model designed especially for generating multi-destination maps, which considers the global and local layout of a multi-destination map. We model the layout problem as a graph drawing that satisfies a set of hard and soft constraints. In the global layout phase, we balance the scale factor between ROIs. In the local layout phase, we make all edges have good visibility and optimize the map layout to preserve the relative length and angle of roads. We also propose a perturbation-based optimization method to find an optimal layout in the complex solution space. The multi-destination maps generated by our system are potential feasible on the modern mobile devices and our result can show an overview and a detail view of the whole map at the same time. In addition, we perform a user study to evaluate the effectiveness of our method, and the results prove that the multi-destination maps achieve our goals well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multi-destination maps are a kind of navigation maps aimed to guide visitors to multiple destinations within a region, which can be of great help to urban visitors. However, they have not been developed in the current online map service. To address this issue, we introduce a novel layout model designed especially for generating multi-destination maps, which considers the global and local layout of a multi-destination map. We model the layout problem as a graph drawing that satisfies a set of hard and soft constraints. In the global layout phase, we balance the scale factor between ROIs. In the local layout phase, we make all edges have good visibility and optimize the map layout to preserve the relative length and angle of roads. We also propose a perturbation-based optimization method to find an optimal layout in the complex solution space. The multi-destination maps generated by our system are potential feasible on the modern mobile devices and our result can show an overview and a detail view of the whole map at the same time. In addition, we perform a user study to evaluate the effectiveness of our method, and the results prove that the multi-destination maps achieve our goals well.", "title": "Generating Multi-Destination Maps", "normalizedTitle": "Generating Multi-Destination Maps", "fno": "07530932", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cartography", "Data Visualisation", "Graph Theory", "Mobile Computing", "Optimisation", "Multidestination Maps", "Online Map Service", "Graph Drawing", "Global Layout Phase", "Layout Optimization", "Mobile Devices", "Visualization", "Roads", "Layout", "Visualization", "Navigation", "Trajectory", "Optimization Methods", "Multi Destination Maps", "Visualization", "Layout Optimization", "Urban Network", "Traffic Visualization", "Geographic Geospatial Visualization" ], "authors": [ { "givenName": "Junsong", "surname": "Zhang", "fullName": "Junsong Zhang", "affiliation": "Cognitive Science Department, Mind, Art and Computation Group, Xiamen University, Xiamen, Fujian, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiepeng", "surname": "Fan", "fullName": "Jiepeng Fan", "affiliation": "Cognitive Science Department, Mind, Art and Computation Group, Xiamen University, Xiamen, Fujian, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhenshan", "surname": "Luo", "fullName": "Zhenshan Luo", "affiliation": "Cognitive Science Department, Mind, Art and Computation Group, Xiamen University, Xiamen, Fujian, P.R. China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1964-1976", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hpcc/2016/4297/0/07828526", "title": "Registration of Low Cost Maps within Large Scale MMS Maps", "doi": null, "abstractUrl": "/proceedings-article/hpcc/2016/07828526/12OmNApLGsF", "parentPublication": { "id": "proceedings/hpcc/2016/4297/0", "title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2016/1451/0/07465268", "title": "Visualization of origin-destination matrices using a connection barchart and coordinated maps", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2016/07465268/12OmNCga1Ul", "parentPublication": { "id": "proceedings/pacificvis/2016/1451/0", "title": "2016 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2016/8985/0/8985a219", "title": "A Map Database System for Route Navigation with Multiple Transit Points and Destination Points", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2016/8985a219/12OmNCxL9QL", "parentPublication": { "id": "proceedings/iiai-aai/2016/8985/0", "title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596123", "title": "Constrained optimization for disoccluding geographic landmarks in 3D urban maps", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596123/12OmNs0kywM", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/lcn/2016/2054/0/2054a339", "title": "Maximum Likelihood Topology Maps for Wireless Sensor Networks Using an Automated Robot", "doi": null, "abstractUrl": "/proceedings-article/lcn/2016/2054a339/12OmNx6PiyX", "parentPublication": { "id": "proceedings/lcn/2016/2054/0", "title": "2016 IEEE 41st Conference on Local Computer Networks (LCN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1988/0878/0/00028282", "title": "Representing a global map for a mobile robot with relational local maps from sensory data", "doi": null, "abstractUrl": "/proceedings-article/icpr/1988/00028282/12OmNxdm4Ax", "parentPublication": { "id": "proceedings/icpr/1988/0878/0", "title": "9th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2013/4909/0/06544830", "title": "Destination prediction by sub-trajectory synthesis and privacy protection against such prediction", "doi": null, "abstractUrl": "/proceedings-article/icde/2013/06544830/12OmNyKrHkH", "parentPublication": { "id": "proceedings/icde/2013/4909/0", "title": "2013 29th IEEE International Conference on Data Engineering (ICDE 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/09/06774478", "title": "Drawing Road Networks with Mental Maps", "doi": null, "abstractUrl": "/journal/tg/2014/09/06774478/13rRUwbs2b5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/08320795", "title": "Predominance Tag Maps", "doi": null, "abstractUrl": "/journal/tg/2018/06/08320795/13rRUwhHcJq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122528", "title": "Focus+Context Metro Maps", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122528/13rRUyY294B", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07501805", "articleId": "13rRUyuegpb", "__typename": "AdjacentArticleType" }, "next": { "fno": "07563865", "articleId": "13rRUNvya9q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesVs", "name": "ttg201708-07530932s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07530932s1.zip", "extension": "zip", "size": "14.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvya9q", "doi": "10.1109/TVCG.2016.2607714", "abstract": "The stated goal for visual data exploration is to operate at a rate that matches the pace of human data analysts, but the ever increasing amount of data has led to a fundamental problem: datasets are often too large to process within interactive time frames. Progressive analytics and visualizations have been proposed as potential solutions to this issue. By processing data incrementally in small chunks, progressive systems provide approximate query answers at interactive speeds that are then refined over time with increasing precision. We study how progressive visualizations affect users in exploratory settings in an experiment where we capture user behavior and knowledge discovery through interaction logs and think-aloud protocols. Our experiment includes three visualization conditions and different simulated dataset sizes. The visualization conditions are: (1) blocking, where results are displayed only after the entire dataset has been processed; (2) instantaneous, a hypothetical condition where results are shown almost immediately; and (3) progressive, where approximate results are displayed quickly and then refined over time. We analyze the data collected in our experiment and observe that users perform equally well with either instantaneous or progressive visualizations in key metrics, such as insight discovery rates and dataset coverage, while blocking visualizations have detrimental effects.", "abstracts": [ { "abstractType": "Regular", "content": "The stated goal for visual data exploration is to operate at a rate that matches the pace of human data analysts, but the ever increasing amount of data has led to a fundamental problem: datasets are often too large to process within interactive time frames. Progressive analytics and visualizations have been proposed as potential solutions to this issue. By processing data incrementally in small chunks, progressive systems provide approximate query answers at interactive speeds that are then refined over time with increasing precision. We study how progressive visualizations affect users in exploratory settings in an experiment where we capture user behavior and knowledge discovery through interaction logs and think-aloud protocols. Our experiment includes three visualization conditions and different simulated dataset sizes. The visualization conditions are: (1) blocking, where results are displayed only after the entire dataset has been processed; (2) instantaneous, a hypothetical condition where results are shown almost immediately; and (3) progressive, where approximate results are displayed quickly and then refined over time. We analyze the data collected in our experiment and observe that users perform equally well with either instantaneous or progressive visualizations in key metrics, such as insight discovery rates and dataset coverage, while blocking visualizations have detrimental effects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The stated goal for visual data exploration is to operate at a rate that matches the pace of human data analysts, but the ever increasing amount of data has led to a fundamental problem: datasets are often too large to process within interactive time frames. Progressive analytics and visualizations have been proposed as potential solutions to this issue. By processing data incrementally in small chunks, progressive systems provide approximate query answers at interactive speeds that are then refined over time with increasing precision. We study how progressive visualizations affect users in exploratory settings in an experiment where we capture user behavior and knowledge discovery through interaction logs and think-aloud protocols. Our experiment includes three visualization conditions and different simulated dataset sizes. The visualization conditions are: (1) blocking, where results are displayed only after the entire dataset has been processed; (2) instantaneous, a hypothetical condition where results are shown almost immediately; and (3) progressive, where approximate results are displayed quickly and then refined over time. We analyze the data collected in our experiment and observe that users perform equally well with either instantaneous or progressive visualizations in key metrics, such as insight discovery rates and dataset coverage, while blocking visualizations have detrimental effects.", "title": "How Progressive Visualizations Affect Exploratory Analysis", "normalizedTitle": "How Progressive Visualizations Affect Exploratory Analysis", "fno": "07563865", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Prefetching", "Visualization", "Measurement", "Data Analysis", "Histograms", "Time Factors", "Exploratory Analysis", "Interactive Visualization", "Progressive Visualization", "Scalability", "Insight Based Evaluation" ], "authors": [ { "givenName": "Emanuel", "surname": "Zgraggen", "fullName": "Emanuel Zgraggen", "affiliation": "Computer Science Department, Brown University, Providence, RI", "__typename": "ArticleAuthorType" }, { "givenName": "Alex", "surname": "Galakatos", "fullName": "Alex Galakatos", "affiliation": "Computer Science Department, Brown University, Providence, RI", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Crotty", "fullName": "Andrew Crotty", "affiliation": "Computer Science Department, Brown University, Providence, RI", "__typename": "ArticleAuthorType" }, { "givenName": "Jean-Daniel", "surname": "Fekete", "fullName": "Jean-Daniel Fekete", "affiliation": "INRIA, Orsay, FR", "__typename": "ArticleAuthorType" }, { "givenName": "Tim", "surname": "Kraska", "fullName": "Tim Kraska", "affiliation": "Computer Science Department, Brown University, Providence, RI", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1977-1987", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/edoc/2015/9203/0/9203a068", "title": "A Description Framework for Data Visualizations in Enterprise Information Systems", "doi": null, "abstractUrl": "/proceedings-article/edoc/2015/9203a068/12OmNxd4tAO", "parentPublication": { "id": "proceedings/edoc/2015/9203/0", "title": "2015 IEEE 19th International Enterprise Distributed Object Computing Conference (EDOC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010060943", "title": "How Information Visualization Novices Construct Visualizations", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010060943/13rRUwInvAZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440860", "title": "Augmenting Visualizations with Interactive Data Facts to Facilitate Interpretation and Communication", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440860/17D45Vw15v5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440818", "title": "Looks Good To Me: Visualizations As Sanity Checks", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440818/17D45W2WyxG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2018/4235/0/08506578", "title": "Comparative Visualizations through Parameterization and Variability", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2018/08506578/17D45WaTki5", "parentPublication": { "id": "proceedings/vlhcc/2018/4235/0", "title": "2018 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904476", "title": "How Do Viewers Synthesize Conflicting Information from Data Visualizations?", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904476/1H1geNCdZIc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809730", "title": "P5: Portable Progressive Parallel Processing Pipelines for Interactive Data Analysis and Visualization", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809730/1cHE2tYwF7a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/07/08943144", "title": "ProReveal: Progressive Visual Analytics With Safeguards", "doi": null, "abstractUrl": "/journal/tg/2021/07/08943144/1g3bi26D34k", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a161", "title": "Evaluating Animated Transitions between Contiguous Visualizations for Streaming Big Data", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a161/1qRNZKxTd60", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09359504", "title": "A Progressive Approach to Scalar Field Topology", "doi": null, "abstractUrl": "/journal/tg/2021/06/09359504/1rlAQHG5pao", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07530932", "articleId": "13rRUxASubC", "__typename": "AdjacentArticleType" }, "next": { "fno": "07493610", "articleId": "13rRUwj7cpg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRFG", "name": "ttg201708-07563865s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07563865s1.zip", "extension": "zip", "size": "7.47 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwj7cpg", "doi": "10.1109/TVCG.2016.2582158", "abstract": "This work introduces a tool for interactive exploration and visualization using MetaTracts. MetaTracts is a novel method for extraction and visualization of individual fiber bundles and weaving patterns from X-ray computed tomography (XCT) scans of endless carbon fiber reinforced polymers (CFRPs). It is designed specifically to handle XCT scans of low resolutions where the individual fibers are barely visible, which makes extraction of fiber bundles a challenging problem. The proposed workflow is used to analyze unit cells of CFRP materials integrating a recurring weaving pattern. First, a coarse version of integral curves is used to trace sections of the individual fiber bundles in the woven CFRP materials. We call these sections MetaTracts. In the second step, these extracted fiber bundle sections are clustered using a two-step approach: first by orientation, then by proximity. The tool can generate volumetric representations as well as surface models of the extracted fiber bundles to be exported for further analysis. In addition a custom interactive tool for exploration and visual analysis of MetaTracts is designed. We evaluate the proposed workflow on a number of real world datasets and demonstrate that MetaTracts effectively and robustly identifies and extracts fiber bundles.", "abstracts": [ { "abstractType": "Regular", "content": "This work introduces a tool for interactive exploration and visualization using MetaTracts. MetaTracts is a novel method for extraction and visualization of individual fiber bundles and weaving patterns from X-ray computed tomography (XCT) scans of endless carbon fiber reinforced polymers (CFRPs). It is designed specifically to handle XCT scans of low resolutions where the individual fibers are barely visible, which makes extraction of fiber bundles a challenging problem. The proposed workflow is used to analyze unit cells of CFRP materials integrating a recurring weaving pattern. First, a coarse version of integral curves is used to trace sections of the individual fiber bundles in the woven CFRP materials. We call these sections MetaTracts. In the second step, these extracted fiber bundle sections are clustered using a two-step approach: first by orientation, then by proximity. The tool can generate volumetric representations as well as surface models of the extracted fiber bundles to be exported for further analysis. In addition a custom interactive tool for exploration and visual analysis of MetaTracts is designed. We evaluate the proposed workflow on a number of real world datasets and demonstrate that MetaTracts effectively and robustly identifies and extracts fiber bundles.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work introduces a tool for interactive exploration and visualization using MetaTracts. MetaTracts is a novel method for extraction and visualization of individual fiber bundles and weaving patterns from X-ray computed tomography (XCT) scans of endless carbon fiber reinforced polymers (CFRPs). It is designed specifically to handle XCT scans of low resolutions where the individual fibers are barely visible, which makes extraction of fiber bundles a challenging problem. The proposed workflow is used to analyze unit cells of CFRP materials integrating a recurring weaving pattern. First, a coarse version of integral curves is used to trace sections of the individual fiber bundles in the woven CFRP materials. We call these sections MetaTracts. In the second step, these extracted fiber bundle sections are clustered using a two-step approach: first by orientation, then by proximity. The tool can generate volumetric representations as well as surface models of the extracted fiber bundles to be exported for further analysis. In addition a custom interactive tool for exploration and visual analysis of MetaTracts is designed. We evaluate the proposed workflow on a number of real world datasets and demonstrate that MetaTracts effectively and robustly identifies and extracts fiber bundles.", "title": "Interactive Exploration and Visualization Using MetaTracts extracted from Carbon Fiber Reinforced Composites", "normalizedTitle": "Interactive Exploration and Visualization Using MetaTracts extracted from Carbon Fiber Reinforced Composites", "fno": "07493610", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Carbon", "Diffusion Tensor Imaging", "Weaving", "Visualization", "X Ray Imaging", "Fabrics", "Three Dimensional Displays", "Meta Tracts", "Fiber Bundle Extraction", "Analysis And Visualization", "Carbon Fiber Reinforced Polymers", "X Ray Computed Tomography", "Interactive Visual Exploration And Analysis" ], "authors": [ { "givenName": "Arindam", "surname": "Bhattacharya", "fullName": "Arindam Bhattacharya", "affiliation": "Ohio State University, Columbus, OH", "__typename": "ArticleAuthorType" }, { "givenName": "Johannes", "surname": "Weissenböck", "fullName": "Johannes Weissenböck", "affiliation": "University of Applied Sciences Upper Austria, Wels, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Rephael", "surname": "Wenger", "fullName": "Rephael Wenger", "affiliation": "Ohio State University, Columbus, OH", "__typename": "ArticleAuthorType" }, { "givenName": "Artem", "surname": "Amirkhanov", "fullName": "Artem Amirkhanov", "affiliation": "University of Applied Sciences Upper Austria, Wels, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Johann", "surname": "Kastner", "fullName": "Johann Kastner", "affiliation": "University of Applied Sciences Upper Austria, Wels, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Christoph", "surname": "Heinzl", "fullName": "Christoph Heinzl", "affiliation": "University of Applied Sciences Upper Austria, Wels, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "1988-2002", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2012/4905/0/4905b014", "title": "A Similarity Model and Segmentation Algorithm for White Matter Fiber Tracts", "doi": null, "abstractUrl": "/proceedings-article/icdm/2012/4905b014/12OmNBpVPYF", "parentPublication": { "id": "proceedings/icdm/2012/4905/0", "title": "2012 IEEE 12th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/3/3962e382", "title": "Simulation Study on the Characteristics of Carbon-Fiber-Reinforced Plastics in Electromagnetic Tomography Nondestructive Evaluation Systems", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962e382/12OmNqBtiZG", "parentPublication": { "id": "proceedings/icmtma/2010/3962/3", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660008", "title": "Fast and Reproducible Fiber Bundle Selection in DTI Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660008/12OmNrY3LpD", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156377", "title": "MetaTracts - A method for robust extraction and visualization of carbon fiber bundles in fiber reinforced composites", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156377/12OmNrYCXXM", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbac-pad/2009/3857/0/3857a101", "title": "Design and Implementation of Brain Fiber Tracking for GPUs and PC Clusters", "doi": null, "abstractUrl": "/proceedings-article/sbac-pad/2009/3857a101/12OmNxWLTEB", "parentPublication": { "id": "proceedings/sbac-pad/2009/3857/0", "title": "Computer Architecture and High Performance Computing, Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532778", "title": "Fast and reproducible fiber bundle selection in DTI visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532778/12OmNy4r3RG", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/05/ttg2008051044", "title": "Identifying White-Matter Fiber Bundles in DTI Data Using an Automated Proximity-Based Fiber-Clustering Method", "doi": null, "abstractUrl": "/journal/tg/2008/05/ttg2008051044/13rRUILLkDI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeas/2022/6305/0/630500a178", "title": "Intralaminar Crack Detection for Carbon Fiber Reinforced Polymers Based on Laser-Line Thermography", "doi": null, "abstractUrl": "/proceedings-article/icmeas/2022/630500a178/1I8wEqwX9ks", "parentPublication": { "id": "proceedings/icmeas/2022/6305/0", "title": "2022 8th International Conference on Mechanical Engineering and Automation Science (ICMEAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiea/2020/8288/0/828800a771", "title": "Finite Element Analysis of the Thermal Conductivity and the Specific Heat of Carbon Fiber Reinforced Plastic (CFRP) Composites", "doi": null, "abstractUrl": "/proceedings-article/aiea/2020/828800a771/1nTul2L8zjG", "parentPublication": { "id": "proceedings/aiea/2020/8288/0", "title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifeea/2020/9627/0/962700a239", "title": "Tension Fatigue Behavior of Carbon Fiber Reinforced Resin Matrix Composites under Spectrum Load", "doi": null, "abstractUrl": "/proceedings-article/ifeea/2020/962700a239/1rvCGNgKmE8", "parentPublication": { "id": "proceedings/ifeea/2020/9627/0", "title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07563865", "articleId": "13rRUNvya9q", "__typename": "AdjacentArticleType" }, "next": { "fno": "07530838", "articleId": "13rRUxOdD2K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgL9", "name": "ttg201708-07493610s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07493610s1.zip", "extension": "zip", "size": "12.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxOdD2K", "doi": "10.1109/TVCG.2016.2597830", "abstract": "Procedural modeling techniques can produce high quality visual content through complex rule sets. However, controlling the outputs of these techniques for design purposes is often notoriously difficult for users due to the large number of parameters involved in these rule sets and also their non-linear relationship to the resulting content. To circumvent this problem, we present a sketch-based approach to procedural modeling. Given an approximate and abstract hand-drawn 2D sketch provided by a user, our algorithm automatically computes a set of procedural model parameters, which in turn yield multiple, detailed output shapes that resemble the user's input sketch. The user can then select an output shape, or further modify the sketch to explore alternative ones. At the heart of our approach is a deep Convolutional Neural Network (CNN) that is trained to map sketches to procedural model parameters. The network is trained by large amounts of automatically generated synthetic line drawings. By using an intuitive medium, i.e., freehand sketching as input, users are set free from manually adjusting procedural model parameters, yet they are still able to create high quality content. We demonstrate the accuracy and efficacy of our method in a variety of procedural modeling scenarios including design of man-made and organic shapes.", "abstracts": [ { "abstractType": "Regular", "content": "Procedural modeling techniques can produce high quality visual content through complex rule sets. However, controlling the outputs of these techniques for design purposes is often notoriously difficult for users due to the large number of parameters involved in these rule sets and also their non-linear relationship to the resulting content. To circumvent this problem, we present a sketch-based approach to procedural modeling. Given an approximate and abstract hand-drawn 2D sketch provided by a user, our algorithm automatically computes a set of procedural model parameters, which in turn yield multiple, detailed output shapes that resemble the user's input sketch. The user can then select an output shape, or further modify the sketch to explore alternative ones. At the heart of our approach is a deep Convolutional Neural Network (CNN) that is trained to map sketches to procedural model parameters. The network is trained by large amounts of automatically generated synthetic line drawings. By using an intuitive medium, i.e., freehand sketching as input, users are set free from manually adjusting procedural model parameters, yet they are still able to create high quality content. We demonstrate the accuracy and efficacy of our method in a variety of procedural modeling scenarios including design of man-made and organic shapes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Procedural modeling techniques can produce high quality visual content through complex rule sets. However, controlling the outputs of these techniques for design purposes is often notoriously difficult for users due to the large number of parameters involved in these rule sets and also their non-linear relationship to the resulting content. To circumvent this problem, we present a sketch-based approach to procedural modeling. Given an approximate and abstract hand-drawn 2D sketch provided by a user, our algorithm automatically computes a set of procedural model parameters, which in turn yield multiple, detailed output shapes that resemble the user's input sketch. The user can then select an output shape, or further modify the sketch to explore alternative ones. At the heart of our approach is a deep Convolutional Neural Network (CNN) that is trained to map sketches to procedural model parameters. The network is trained by large amounts of automatically generated synthetic line drawings. By using an intuitive medium, i.e., freehand sketching as input, users are set free from manually adjusting procedural model parameters, yet they are still able to create high quality content. We demonstrate the accuracy and efficacy of our method in a variety of procedural modeling scenarios including design of man-made and organic shapes.", "title": "Shape Synthesis from Sketches via Procedural Models and Convolutional Networks", "normalizedTitle": "Shape Synthesis from Sketches via Procedural Models and Convolutional Networks", "fno": "07530838", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Shape", "Computational Modeling", "Three Dimensional Displays", "Two Dimensional Displays", "Neural Networks", "Solid Modeling", "Computer Architecture", "Shape Synthesis", "Convolutional Neural Networks", "Procedural Modeling", "Sketch Based Modeling" ], "authors": [ { "givenName": "Haibin", "surname": "Huang", "fullName": "Haibin Huang", "affiliation": "University of Massachusetts Amherst, Amherst, MA", "__typename": "ArticleAuthorType" }, { "givenName": "Evangelos", "surname": "Kalogerakis", "fullName": "Evangelos Kalogerakis", "affiliation": "University of Massachusetts Amherst, Amherst, MA", "__typename": "ArticleAuthorType" }, { "givenName": "Ersin", "surname": "Yumer", "fullName": "Ersin Yumer", "affiliation": "Adobe Research, San Jose, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Radomir", "surname": "Mech", "fullName": "Radomir Mech", "affiliation": "Adobe Research, San Jose, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "2003-2013", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2017/2610/0/261001a067", "title": "3D Shape Reconstruction from Sketches via Multi-view Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a067/12OmNCu4nbZ", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2016/4400/0/4400a261", "title": "3D Model Retrieval Based on Hand Drawn Sketches Using LDA Model", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a261/12OmNqyUUvj", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2017/2219/0/2219a031", "title": "CAD Shape Grammar: Procedural Generation for Massive CAD Model", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2017/2219a031/12OmNy6qfJ2", "parentPublication": { "id": "proceedings/sibgrapi/2017/2219/0", "title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a311", "title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a066", "title": "Enhancing Sketching and Sculpting for Shape Modeling", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a066/17D45WWzW7i", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a033", "title": "Facial Expression Editing in Face Sketch Using Shape Space Theory", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a033/17D45XacGkk", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2019/4637/0/463700a029", "title": "A Survey of Procedural Dungeon Generation", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2019/463700a029/1fHHpYoajCg", "parentPublication": { "id": "proceedings/sbgames/2019/4637/0", "title": "2019 18th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2016/4847/0/07900083", "title": "3D sketch-based 3D model retrieval with convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/icpr/2016/07900083/1gysq8EnfHi", "parentPublication": { "id": "proceedings/icpr/2016/4847/0", "title": "2016 23rd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09007505", "title": "Sketch Augmentation-Driven Shape Retrieval Learning Framework Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2021/08/09007505/1hJKlMJzueI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/10/09095367", "title": "PICO: Procedural Iterative Constrained Optimizer for Geometric Modeling", "doi": null, "abstractUrl": "/journal/tg/2021/10/09095367/1jVMiYPPf0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07493610", "articleId": "13rRUwj7cpg", "__typename": "AdjacentArticleType" }, "next": { "fno": "07544591", "articleId": "13rRUyfbwqP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyfbwqP", "doi": "10.1109/TVCG.2016.2600594", "abstract": "In this paper, we propose a framework for automatically producing thumbnails from stereo image pairs. It has two components focusing respectively on stereo saliency detection and stereo thumbnail generation. The first component analyzes stereo saliency through various saliency stimuli, stereoscopic perception and the relevance between two stereo views. The second component uses stereo saliency to guide stereo thumbnail generation. We develop two types of thumbnail generation methods, both changing image size automatically. The first method is called content-persistent cropping (CPC), which aims at cropping stereo images for display devices with different aspect ratios while preserving as much content as possible. The second method is an object-aware cropping method (OAC) for generating the smallest possible thumbnail pair that retains the most important content only and facilitates quick visual exploration of a stereo image database. Quantitative and qualitative experimental evaluations demonstrate promising performance of our thumbnail generation methods in comparison to state-of-the-art algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a framework for automatically producing thumbnails from stereo image pairs. It has two components focusing respectively on stereo saliency detection and stereo thumbnail generation. The first component analyzes stereo saliency through various saliency stimuli, stereoscopic perception and the relevance between two stereo views. The second component uses stereo saliency to guide stereo thumbnail generation. We develop two types of thumbnail generation methods, both changing image size automatically. The first method is called content-persistent cropping (CPC), which aims at cropping stereo images for display devices with different aspect ratios while preserving as much content as possible. The second method is an object-aware cropping method (OAC) for generating the smallest possible thumbnail pair that retains the most important content only and facilitates quick visual exploration of a stereo image database. Quantitative and qualitative experimental evaluations demonstrate promising performance of our thumbnail generation methods in comparison to state-of-the-art algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a framework for automatically producing thumbnails from stereo image pairs. It has two components focusing respectively on stereo saliency detection and stereo thumbnail generation. The first component analyzes stereo saliency through various saliency stimuli, stereoscopic perception and the relevance between two stereo views. The second component uses stereo saliency to guide stereo thumbnail generation. We develop two types of thumbnail generation methods, both changing image size automatically. The first method is called content-persistent cropping (CPC), which aims at cropping stereo images for display devices with different aspect ratios while preserving as much content as possible. The second method is an object-aware cropping method (OAC) for generating the smallest possible thumbnail pair that retains the most important content only and facilitates quick visual exploration of a stereo image database. Quantitative and qualitative experimental evaluations demonstrate promising performance of our thumbnail generation methods in comparison to state-of-the-art algorithms.", "title": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection", "normalizedTitle": "Stereoscopic Thumbnail Creation via Efficient Stereo Saliency Detection", "fno": "07544591", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Stereo Image Processing", "Detection Algorithms", "Image Edge Detection", "Estimation", "Visualization", "Distortion", "Computer Science", "Stereoscopic Thumbnails", "Stereo Saliency", "Image Cropping" ], "authors": [ { "givenName": "Wenguan", "surname": "Wang", "fullName": "Wenguan Wang", "affiliation": "Beijing Laboratory of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Jianbing", "surname": "Shen", "fullName": "Jianbing Shen", "affiliation": "Beijing Laboratory of Intelligent Information Technology, School of Computer Science, Beijing Institute of Technology, Beijing, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Yizhou", "surname": "Yu", "fullName": "Yizhou Yu", "affiliation": "College of Computer Science and Technology, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": "Department of Computer Science, University of California at Davis, Davis, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "2014-2027", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aipr/2010/8833/0/05759692", "title": "Pre-attentive detection of depth saliency using stereo vision", "doi": null, "abstractUrl": "/proceedings-article/aipr/2010/05759692/12OmNB836JG", "parentPublication": { "id": "proceedings/aipr/2010/8833/0", "title": "2010 IEEE 39th Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890709", "title": "Learning visual saliency for stereoscopic images", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890709/12OmNqIhFMD", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a253", "title": "Automatic Thumbnail Generation Based on Visual Representativeness and Foreground Recognizability", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a253/12OmNvAiSNK", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890321", "title": "Learning to detect stereo saliency", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890321/12OmNvjgWv5", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607587", "title": "Making stereo photo cropping easy", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607587/12OmNvk7K65", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/312P3A01", "title": "Edge-preserving photometric stereo via depth fusion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/312P3A01/12OmNwwd2RX", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/08099928", "title": "Fast-At: Fast Automatic Thumbnail Generation Using Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/08099928/12OmNxR5UGZ", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2013/3022/0/3022a022", "title": "Saliency Cut in Stereo Images", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a022/12OmNxwWoGA", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/058P1B05", "title": "Leveraging stereopsis for saliency analysis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/058P1B05/12OmNy50g5B", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0/08726759", "title": "An Improved Automatic Thumbnail Generation Algorithm Based on Interpolation Technique", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2018/08726759/1axfscz4YoM", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2018/7975/0", "title": "2018 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07530838", "articleId": "13rRUxOdD2K", "__typename": "AdjacentArticleType" }, "next": { "fno": "07552504", "articleId": "13rRUxd2aZ6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwoxSj5", "title": "Aug.", "year": "2017", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxd2aZ6", "doi": "10.1109/TVCG.2016.2603178", "abstract": "The rising quantity and complexity of data creates a need to design and optimize data processing pipelines—the set of data processing steps, parameters and algorithms that perform operations on the data. Visualization can support this process but, although there are many examples of systems for visual parameter analysis, there remains a need to systematically assess users’ requirements and match those requirements to exemplar visualization methods. This article presents a new characterization of the requirements for pipeline design and optimization. This characterization is based on both a review of the literature and first-hand assessment of eight application case studies. We also match these requirements with exemplar functionality provided by existing visualization tools. Thus, we provide end-users and visualization developers with a way of identifying functionality that addresses data processing problems in an application. We also identify seven future challenges for visualization research that are not met by the capabilities of today's systems.", "abstracts": [ { "abstractType": "Regular", "content": "The rising quantity and complexity of data creates a need to design and optimize data processing pipelines—the set of data processing steps, parameters and algorithms that perform operations on the data. Visualization can support this process but, although there are many examples of systems for visual parameter analysis, there remains a need to systematically assess users’ requirements and match those requirements to exemplar visualization methods. This article presents a new characterization of the requirements for pipeline design and optimization. This characterization is based on both a review of the literature and first-hand assessment of eight application case studies. We also match these requirements with exemplar functionality provided by existing visualization tools. Thus, we provide end-users and visualization developers with a way of identifying functionality that addresses data processing problems in an application. We also identify seven future challenges for visualization research that are not met by the capabilities of today's systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The rising quantity and complexity of data creates a need to design and optimize data processing pipelines—the set of data processing steps, parameters and algorithms that perform operations on the data. Visualization can support this process but, although there are many examples of systems for visual parameter analysis, there remains a need to systematically assess users’ requirements and match those requirements to exemplar visualization methods. This article presents a new characterization of the requirements for pipeline design and optimization. This characterization is based on both a review of the literature and first-hand assessment of eight application case studies. We also match these requirements with exemplar functionality provided by existing visualization tools. Thus, we provide end-users and visualization developers with a way of identifying functionality that addresses data processing problems in an application. We also identify seven future challenges for visualization research that are not met by the capabilities of today's systems.", "title": "Visualization System Requirements for Data Processing Pipeline Design and Optimization", "normalizedTitle": "Visualization System Requirements for Data Processing Pipeline Design and Optimization", "fno": "07552504", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Pipelines", "Data Visualization", "Optimization", "Data Processing", "Visualization", "Computational Modeling", "Algorithm Design And Analysis", "Visualization Systems", "Requirement Analysis", "Data Processing Pipelines" ], "authors": [ { "givenName": "Tatiana", "surname": "von Landesberger", "fullName": "Tatiana von Landesberger", "affiliation": "TU Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Dieter W.", "surname": "Fellner", "fullName": "Dieter W. Fellner", "affiliation": "TU Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Roy A.", "surname": "Ruddle", "fullName": "Roy A. Ruddle", "affiliation": "University of Leeds, Leeds, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2017-08-01 00:00:00", "pubType": "trans", "pages": "2028-2041", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/rev/2007/3248/0/32480011", "title": "On Requirements Visualization", "doi": null, "abstractUrl": "/proceedings-article/rev/2007/32480011/12OmNAXxX6v", "parentPublication": { "id": "proceedings/rev/2007/3248/0", "title": "Requirements Engineering Visualization, First International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isav/2016/3872/0/07836401", "title": "Visualization and Analysis Requirements for In Situ Processing for a Large-Scale Fusion Simulation Code", "doi": null, "abstractUrl": "/proceedings-article/isav/2016/07836401/12OmNAjO6EE", "parentPublication": { "id": "proceedings/isav/2016/3872/0", "title": "2016 Second Workshop on In-Situ Infrastructures for Enabling Extreme-Scale Analysis and Visualization (ISAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2016/2846/0/07752429", "title": "ARC: A pipeline approach enabling large-scale graph visualization", "doi": null, "abstractUrl": "/proceedings-article/asonam/2016/07752429/12OmNB0FxhW", "parentPublication": { "id": "proceedings/asonam/2016/2846/0", "title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a228", "title": "A Concurrent Architecture Proposal for Information Visualization Pipeline", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a228/12OmNxZ2Glk", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/re/2016/4121/0/4121a006", "title": "Requirements Engineering Visualization: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/re/2016/4121a006/12OmNyRxFzB", "parentPublication": { "id": "proceedings/re/2016/4121/0", "title": "2016 IEEE 24th International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017610", "title": "Activity-Centered Domain Characterization for Problem-Driven Scientific Visualization", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017610/13rRUwhHcQX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2016/3593/0/07982324", "title": "Using OpenDX to Teach the Concept of Visualization Pipeline", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2016/07982324/17D45Wda7h4", "parentPublication": { "id": "proceedings/cse-euc/2016/3593/0", "title": "2016 19th IEEE Intl Conference on Computational Science and Engineering (CSE), IEEE 14th Intl Conference on Embedded and Ubiquitous Computing (EUC), and 15th Intl Symposium on Distributed Computing and Applications for Business Engineering (DCABES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669719", "title": "A Fast-Processing Pipeline for Three-dimensional Visualization of Acute Ischemic Stroke lesion topography", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669719/1A9VVWEHKeY", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2022/9156/0/09966395", "title": "A Prototype for Pipeline-Composable Task-Based Visualization Algorithms", "doi": null, "abstractUrl": "/proceedings-article/ldav/2022/09966395/1IT0CzlpPHy", "parentPublication": { "id": "proceedings/ldav/2022/9156/0", "title": "2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/5555/01/10081451", "title": "Application of Mathematical Optimization in Data Visualization and Visual Analytics: A Survey", "doi": null, "abstractUrl": "/journal/bd/5555/01/10081451/1LR5GZAvPiM", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07544591", "articleId": "13rRUyfbwqP", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesOz", "name": "ttg201708-07552504s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201708-07552504s1.zip", "extension": "zip", "size": "571 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwd9CFY", "doi": "10.1109/TVCG.1998.10000", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Special Issue on Visualization", "normalizedTitle": "Special Issue on Visualization", "fno": "v0097", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Hans", "surname": "Hagen", "fullName": "Hans Hagen", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "97", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "v0098", "articleId": "13rRUxBa55T", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBa55T", "doi": "10.1109/2945.694952", "abstract": "Abstract—New challenges on vector field visualization emerge as time-dependent numerical simulations become ubiquitous in the field of computational fluid dynamics (CFD). To visualize data generated from these simulations, traditional techniques, such as displaying particle traces, can only reveal flow phenomena in preselected local regions and, thus, are unable to track the evolution of global flow features over time. This paper presents a new algorithm, called UFLIC (Unsteady Flow LIC), to visualize vector data in unsteady flow fields. Our algorithm extends a texture synthesis technique, called Line Integral Convolution (LIC), by devising a new convolution algorithm that uses a time-accurate value scattering scheme to model the texture advection. In addition, our algorithm maintains the coherence of the flow animation by successively updating the convolution results over time. Furthermore, we propose a parallel UFLIC algorithm that can achieve high load-balancing for multiprocessor computers with shared memory architecture. We demonstrate the effectiveness of our new algorithm by presenting image snapshots from several CFD case studies.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—New challenges on vector field visualization emerge as time-dependent numerical simulations become ubiquitous in the field of computational fluid dynamics (CFD). To visualize data generated from these simulations, traditional techniques, such as displaying particle traces, can only reveal flow phenomena in preselected local regions and, thus, are unable to track the evolution of global flow features over time. This paper presents a new algorithm, called UFLIC (Unsteady Flow LIC), to visualize vector data in unsteady flow fields. Our algorithm extends a texture synthesis technique, called Line Integral Convolution (LIC), by devising a new convolution algorithm that uses a time-accurate value scattering scheme to model the texture advection. In addition, our algorithm maintains the coherence of the flow animation by successively updating the convolution results over time. Furthermore, we propose a parallel UFLIC algorithm that can achieve high load-balancing for multiprocessor computers with shared memory architecture. We demonstrate the effectiveness of our new algorithm by presenting image snapshots from several CFD case studies.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—New challenges on vector field visualization emerge as time-dependent numerical simulations become ubiquitous in the field of computational fluid dynamics (CFD). To visualize data generated from these simulations, traditional techniques, such as displaying particle traces, can only reveal flow phenomena in preselected local regions and, thus, are unable to track the evolution of global flow features over time. This paper presents a new algorithm, called UFLIC (Unsteady Flow LIC), to visualize vector data in unsteady flow fields. Our algorithm extends a texture synthesis technique, called Line Integral Convolution (LIC), by devising a new convolution algorithm that uses a time-accurate value scattering scheme to model the texture advection. In addition, our algorithm maintains the coherence of the flow animation by successively updating the convolution results over time. Furthermore, we propose a parallel UFLIC algorithm that can achieve high load-balancing for multiprocessor computers with shared memory architecture. We demonstrate the effectiveness of our new algorithm by presenting image snapshots from several CFD case studies.", "title": "A New Line Integral Convolution Algorithm for Visualizing Time-Varying Flow Fields", "normalizedTitle": "A New Line Integral Convolution Algorithm for Visualizing Time-Varying Flow Fields", "fno": "v0098", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Flow Visualization", "Vector Field Visualization", "Image Convolution", "Line Integral Convolution", "Flow Animation", "Unsteady Flows", "Texture Synthesis", "Parallel Algorithm" ], "authors": [ { "givenName": "Han-Wei", "surname": "Shen", "fullName": "Han-Wei Shen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "David L.", "surname": "Kao", "fullName": "David L. Kao", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "98-108", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0097", "articleId": "13rRUwd9CFY", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0109", "articleId": "13rRUxNEqPA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxNEqPA", "doi": "10.1109/2945.694953", "abstract": "Abstract—We present our results on the visualization of nonlinear vector field topology. The underlying mathematics is done in Clifford algebra, a system describing geometry by extending the usual vector space by a multiplication of vectors. We started with the observation that all known algorithms for vector field topology are based on piecewise linear or bilinear approximation, and that these methods destroy the local topology if nonlinear behavior is present. Our algorithm looks for such situations, chooses an appropriate polynomial approximation in these areas, and, finally, visualizes the topology. This overcomes the problem, and the algorithm is still very fast because we are using linear approximation outside these small but important areas. The paper contains a detailed description of the algorithm and a basic introduction to Clifford algebra.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present our results on the visualization of nonlinear vector field topology. The underlying mathematics is done in Clifford algebra, a system describing geometry by extending the usual vector space by a multiplication of vectors. We started with the observation that all known algorithms for vector field topology are based on piecewise linear or bilinear approximation, and that these methods destroy the local topology if nonlinear behavior is present. Our algorithm looks for such situations, chooses an appropriate polynomial approximation in these areas, and, finally, visualizes the topology. This overcomes the problem, and the algorithm is still very fast because we are using linear approximation outside these small but important areas. The paper contains a detailed description of the algorithm and a basic introduction to Clifford algebra.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present our results on the visualization of nonlinear vector field topology. The underlying mathematics is done in Clifford algebra, a system describing geometry by extending the usual vector space by a multiplication of vectors. We started with the observation that all known algorithms for vector field topology are based on piecewise linear or bilinear approximation, and that these methods destroy the local topology if nonlinear behavior is present. Our algorithm looks for such situations, chooses an appropriate polynomial approximation in these areas, and, finally, visualizes the topology. This overcomes the problem, and the algorithm is still very fast because we are using linear approximation outside these small but important areas. The paper contains a detailed description of the algorithm and a basic introduction to Clifford algebra.", "title": "Visualizing Nonlinear Vector Field Topology", "normalizedTitle": "Visualizing Nonlinear Vector Field Topology", "fno": "v0109", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Vector Field Topology", "Clifford Algebra", "Visualization" ], "authors": [ { "givenName": "Gerik", "surname": "Scheuermann", "fullName": "Gerik Scheuermann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Heinz", "surname": "Krüger", "fullName": "Heinz Krüger", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Martin", "surname": "Menzel", "fullName": "Martin Menzel", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Alyn P.", "surname": "Rockwood", "fullName": "Alyn P. Rockwood", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "109-116", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0098", "articleId": "13rRUxBa55T", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0117", "articleId": "13rRUxYIN3X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYIN3X", "doi": "10.1109/2945.694954", "abstract": "Abstract—Numerical simulation of physical phenomena is now an accepted way of scientific inquiry. However, the field is still evolving, with a profusion of new solution and grid-generation techniques being continuously proposed. Concurrent and retrospective visualization are being used to validate the results, compare them among themselves and with experimental data, and browse through large scientific databases. There exists a need for representation schemes which allow access of structures in an increasing order of smoothness (or decreasing order of significance). We describe our methods on datasets obtained from curvilinear grids. Our target application required visualization of a computational simulation performed on a very remote supercomputer. Since no grid adaptation was performed, it was not deemed necessary to simplify or compress the grid. In essence, we treat the solution as if it were in the computational domain. Inherent to the identification of significant structures is determining the location of the scale coherent structures and assigning saliency values to them [22], [23]. Scale coherent structures are obtained as a result of combining the coefficients of a wavelet transform across scales. The result of this operation is a correlation mask that delineates regions containing significant structures. A spatial subdivision (e.g., octree) is used to delineate regions of interest. The mask values in these subdivided regions are used as a measure of information content. Later, another wavelet transform is conducted within each subdivided region and the coefficients are sorted based on a perceptual function with bandpass characteristics. This allows for ranking of structures based on the order of significance, giving rise to an adaptive and embedded representation scheme. We demonstrate our methods on two datasets from computational field simulations. Essentially, we show how our methods allow the ranked access of significant structures. We also compare our adaptive representation scheme with a fixed blocksize scheme.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Numerical simulation of physical phenomena is now an accepted way of scientific inquiry. However, the field is still evolving, with a profusion of new solution and grid-generation techniques being continuously proposed. Concurrent and retrospective visualization are being used to validate the results, compare them among themselves and with experimental data, and browse through large scientific databases. There exists a need for representation schemes which allow access of structures in an increasing order of smoothness (or decreasing order of significance). We describe our methods on datasets obtained from curvilinear grids. Our target application required visualization of a computational simulation performed on a very remote supercomputer. Since no grid adaptation was performed, it was not deemed necessary to simplify or compress the grid. In essence, we treat the solution as if it were in the computational domain. Inherent to the identification of significant structures is determining the location of the scale coherent structures and assigning saliency values to them [22], [23]. Scale coherent structures are obtained as a result of combining the coefficients of a wavelet transform across scales. The result of this operation is a correlation mask that delineates regions containing significant structures. A spatial subdivision (e.g., octree) is used to delineate regions of interest. The mask values in these subdivided regions are used as a measure of information content. Later, another wavelet transform is conducted within each subdivided region and the coefficients are sorted based on a perceptual function with bandpass characteristics. This allows for ranking of structures based on the order of significance, giving rise to an adaptive and embedded representation scheme. We demonstrate our methods on two datasets from computational field simulations. Essentially, we show how our methods allow the ranked access of significant structures. We also compare our adaptive representation scheme with a fixed blocksize scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Numerical simulation of physical phenomena is now an accepted way of scientific inquiry. However, the field is still evolving, with a profusion of new solution and grid-generation techniques being continuously proposed. Concurrent and retrospective visualization are being used to validate the results, compare them among themselves and with experimental data, and browse through large scientific databases. There exists a need for representation schemes which allow access of structures in an increasing order of smoothness (or decreasing order of significance). We describe our methods on datasets obtained from curvilinear grids. Our target application required visualization of a computational simulation performed on a very remote supercomputer. Since no grid adaptation was performed, it was not deemed necessary to simplify or compress the grid. In essence, we treat the solution as if it were in the computational domain. Inherent to the identification of significant structures is determining the location of the scale coherent structures and assigning saliency values to them [22], [23]. Scale coherent structures are obtained as a result of combining the coefficients of a wavelet transform across scales. The result of this operation is a correlation mask that delineates regions containing significant structures. A spatial subdivision (e.g., octree) is used to delineate regions of interest. The mask values in these subdivided regions are used as a measure of information content. Later, another wavelet transform is conducted within each subdivided region and the coefficients are sorted based on a perceptual function with bandpass characteristics. This allows for ranking of structures based on the order of significance, giving rise to an adaptive and embedded representation scheme. We demonstrate our methods on two datasets from computational field simulations. Essentially, we show how our methods allow the ranked access of significant structures. We also compare our adaptive representation scheme with a fixed blocksize scheme.", "title": "Structure-Significant Representation of Structured Datasets", "normalizedTitle": "Structure-Significant Representation of Structured Datasets", "fno": "v0117", "hasPdf": true, "idPrefix": "tg", "keywords": [ "CR Categories And Subject Descriptors I 3 2 Computer Graphics Graphics Systems I 3 8 Computer Graphics Applications I 4 2 Image Processing Compression Coding", "Wavelet Transform", "Structure Detection", "Human Visual System", "Progressive Transmission" ], "authors": [ { "givenName": "Raghu", "surname": "Machiraju", "fullName": "Raghu Machiraju", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Zhifan", "surname": "Zhu", "fullName": "Zhifan Zhu", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Bryan", "surname": "Fry", "fullName": "Bryan Fry", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Robert", "surname": "Moorhead", "fullName": "Robert Moorhead", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "117-132", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0109", "articleId": "13rRUxNEqPA", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0133", "articleId": "13rRUxlgxO8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxlgxO8", "doi": "10.1109/2945.694955", "abstract": "Abstract—We present a topology simplifying approach that can be used for genus reductions, removal of protuberances, and repair of cracks in polygonal models in a unified framework. Our work is complementary to the existing work on geometry simplification of polygonal datasets and we demonstrate that using topology and geometry simplifications together yields superior multiresolution hierarchies than is possible by using either of them alone. Our approach can also address the important issue of repair of cracks in polygonal models, as well as for rapid identification and removal of protuberances based on internal accessibility in polygonal models. Our approach is based on identifying holes and cracks by extending the concept of α-shapes to polygonal meshes under the L∞ distance metric. We then generate valid triangulations to fill them using the intuitive notion of sweeping an L∞ cube over the identified regions.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present a topology simplifying approach that can be used for genus reductions, removal of protuberances, and repair of cracks in polygonal models in a unified framework. Our work is complementary to the existing work on geometry simplification of polygonal datasets and we demonstrate that using topology and geometry simplifications together yields superior multiresolution hierarchies than is possible by using either of them alone. Our approach can also address the important issue of repair of cracks in polygonal models, as well as for rapid identification and removal of protuberances based on internal accessibility in polygonal models. Our approach is based on identifying holes and cracks by extending the concept of α-shapes to polygonal meshes under the L∞ distance metric. We then generate valid triangulations to fill them using the intuitive notion of sweeping an L∞ cube over the identified regions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present a topology simplifying approach that can be used for genus reductions, removal of protuberances, and repair of cracks in polygonal models in a unified framework. Our work is complementary to the existing work on geometry simplification of polygonal datasets and we demonstrate that using topology and geometry simplifications together yields superior multiresolution hierarchies than is possible by using either of them alone. Our approach can also address the important issue of repair of cracks in polygonal models, as well as for rapid identification and removal of protuberances based on internal accessibility in polygonal models. Our approach is based on identifying holes and cracks by extending the concept of α-shapes to polygonal meshes under the L∞ distance metric. We then generate valid triangulations to fill them using the intuitive notion of sweeping an L∞ cube over the identified regions.", "title": "Topology Simplification for Polygonal Virtual Environments", "normalizedTitle": "Topology Simplification for Polygonal Virtual Environments", "fno": "v0133", "hasPdf": true, "idPrefix": "tg", "keywords": [ "CR Categories And Subject Descriptors I 3 3 Computer Graphics Picture Image Generation Display Algorithms I 3 5 Computer Graphics Computational Geometry And Object Modeling Curve", "Surface", "Solid", "And Object Representations", "Hierarchical Approximation", "Model Simplification", "Levels Of Detail Generation", "Shape Approximation", "Geometric Modeling", "Topology Simplification", "CAD Model Repair" ], "authors": [ { "givenName": "Jihad", "surname": "El-Sana", "fullName": "Jihad El-Sana", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Amitabh", "surname": "Varshney", "fullName": "Amitabh Varshney", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "133-144", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0117", "articleId": "13rRUxYIN3X", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0145", "articleId": "13rRUy0qnGc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy0qnGc", "doi": "10.1109/2945.694956", "abstract": "Abstract—We present a method to produce a hierarchy of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes Z_${\\cal M}_0, {\\cal M}_1, {\\cal M}_2, ..., {\\cal M}_n,$_Z where each mesh Z_${\\cal M}_i$_Z can be transformed to mesh Z_${\\cal M}_{i+1}$_Z through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation and for supplying the points to which the triangles are collapsed. The algorithm produces a limited number of intermediate meshes by selecting, at each step, a number of triangles that can be collapsed simultaneously. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present a method to produce a hierarchy of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes ${\\cal M}_0, {\\cal M}_1, {\\cal M}_2, ..., {\\cal M}_n,$ where each mesh ${\\cal M}_i$ can be transformed to mesh ${\\cal M}_{i+1}$ through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation and for supplying the points to which the triangles are collapsed. The algorithm produces a limited number of intermediate meshes by selecting, at each step, a number of triangles that can be collapsed simultaneously. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present a method to produce a hierarchy of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes - where each mesh - can be transformed to mesh - through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation and for supplying the points to which the triangles are collapsed. The algorithm produces a limited number of intermediate meshes by selecting, at each step, a number of triangles that can be collapsed simultaneously. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.", "title": "Constructing Hierarchies for Triangle Meshes", "normalizedTitle": "Constructing Hierarchies for Triangle Meshes", "fno": "v0145", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Mesh Simplification", "Triangle Meshes", "Level Of Detail Representation", "Shape Approximation", "Multiresolution" ], "authors": [ { "givenName": "Tran S.", "surname": "Gieng", "fullName": "Tran S. Gieng", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hamann", "fullName": "Bernd Hamann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Kenneth I.", "surname": "Joy", "fullName": "Kenneth I. Joy", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Gregory L.", "surname": "Schussman", "fullName": "Gregory L. Schussman", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Issac J.", "surname": "Trotts", "fullName": "Issac J. Trotts", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "145-161", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0133", "articleId": "13rRUxlgxO8", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0162", "articleId": "13rRUyfbwqs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyfbwqs", "doi": "10.1109/2945.694983", "abstract": "Abstract—This paper presents a system and the associated algorithms for repairing the boundary representation of CAD models. Two types of errors are considered: topological errors, i.e., aggregate errors, like zero-volume parts, duplicate or missing parts, inconsistent surface orientation, etc., and geometric errors, i.e., numerical imprecision errors, like cracks or overlaps of geometry. The output of our system describes a set of clean and consistent two-manifolds (possibly with boundaries) with derived adjacencies. Such solid representation enables the application of a variety of rendering and analysis algorithms, e.g., finite-element analysis, radiosity computation, model simplification, and solid free-form fabrication. The algorithms described here were originally designed to correct errors in polygonal B-Reps. We also present an extension for spline surfaces.Central to our system is a procedure for inferring local adjacencies of edges. The geometric representation of topologically-adjacent edges are merged to evolve a set of two-manifolds. Aggregate errors are discovered during the merging step. Unfortunately, there are many ambiguous situations where errors admit more than one valid solution. Our system proposes an object-repairing process based on a set of user-tunable heuristics. The system also allows the user to override the algorithm's decisions in a repair-visualization step. In essence, this visualization step presents an organized and intuitive way for the user to explore the space of valid solutions and to select the correct one.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper presents a system and the associated algorithms for repairing the boundary representation of CAD models. Two types of errors are considered: topological errors, i.e., aggregate errors, like zero-volume parts, duplicate or missing parts, inconsistent surface orientation, etc., and geometric errors, i.e., numerical imprecision errors, like cracks or overlaps of geometry. The output of our system describes a set of clean and consistent two-manifolds (possibly with boundaries) with derived adjacencies. Such solid representation enables the application of a variety of rendering and analysis algorithms, e.g., finite-element analysis, radiosity computation, model simplification, and solid free-form fabrication. The algorithms described here were originally designed to correct errors in polygonal B-Reps. We also present an extension for spline surfaces.Central to our system is a procedure for inferring local adjacencies of edges. The geometric representation of topologically-adjacent edges are merged to evolve a set of two-manifolds. Aggregate errors are discovered during the merging step. Unfortunately, there are many ambiguous situations where errors admit more than one valid solution. Our system proposes an object-repairing process based on a set of user-tunable heuristics. The system also allows the user to override the algorithm's decisions in a repair-visualization step. In essence, this visualization step presents an organized and intuitive way for the user to explore the space of valid solutions and to select the correct one.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper presents a system and the associated algorithms for repairing the boundary representation of CAD models. Two types of errors are considered: topological errors, i.e., aggregate errors, like zero-volume parts, duplicate or missing parts, inconsistent surface orientation, etc., and geometric errors, i.e., numerical imprecision errors, like cracks or overlaps of geometry. The output of our system describes a set of clean and consistent two-manifolds (possibly with boundaries) with derived adjacencies. Such solid representation enables the application of a variety of rendering and analysis algorithms, e.g., finite-element analysis, radiosity computation, model simplification, and solid free-form fabrication. The algorithms described here were originally designed to correct errors in polygonal B-Reps. We also present an extension for spline surfaces.Central to our system is a procedure for inferring local adjacencies of edges. The geometric representation of topologically-adjacent edges are merged to evolve a set of two-manifolds. Aggregate errors are discovered during the merging step. Unfortunately, there are many ambiguous situations where errors admit more than one valid solution. Our system proposes an object-repairing process based on a set of user-tunable heuristics. The system also allows the user to override the algorithm's decisions in a repair-visualization step. In essence, this visualization step presents an organized and intuitive way for the user to explore the space of valid solutions and to select the correct one.", "title": "RSVP: A Geometric Toolkit for Controlled Repair of Solid Models", "normalizedTitle": "RSVP: A Geometric Toolkit for Controlled Repair of Solid Models", "fno": "v0162", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Model Repair", "Edge Matching" ], "authors": [ { "givenName": "Gill", "surname": "Barequet", "fullName": "Gill Barequet", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Christian A.", "surname": "Duncan", "fullName": "Christian A. Duncan", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Subodh", "surname": "Kumar", "fullName": "Subodh Kumar", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "162-177", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0145", "articleId": "13rRUy0qnGc", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0178", "articleId": "13rRUwdIOUz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBuL16y", "title": "April-June", "year": "1998", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "4", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwdIOUz", "doi": "10.1109/2945.694987", "abstract": "Abstract—This paper describes three new results for volume rendering algorithms utilizing splatting. First, an antialiasing extension to the basic splatting algorithm is introduced that mitigates the spatial aliasing for high-resolution volumes. Aliasing can be severe for high-resolution volumes or volumes where a high depth of field leads to converging samples along the perspective axis. Next, an analysis of the common approximation errors in the splatting process for perspective viewing is presented. In this context, we give different implementations, distinguished by efficiency and accuracy, for adding the splat contributions to the image plane. We then present new results in controlling the splatting errors and also show their behavior in the framework of our new antialiasing technique. Finally, current work in progress on extensions to splatting for temporal antialiasing is demonstrated. Here, we present a simple but highly effective scheme for adding motion blur to fast moving volumes.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper describes three new results for volume rendering algorithms utilizing splatting. First, an antialiasing extension to the basic splatting algorithm is introduced that mitigates the spatial aliasing for high-resolution volumes. Aliasing can be severe for high-resolution volumes or volumes where a high depth of field leads to converging samples along the perspective axis. Next, an analysis of the common approximation errors in the splatting process for perspective viewing is presented. In this context, we give different implementations, distinguished by efficiency and accuracy, for adding the splat contributions to the image plane. We then present new results in controlling the splatting errors and also show their behavior in the framework of our new antialiasing technique. Finally, current work in progress on extensions to splatting for temporal antialiasing is demonstrated. Here, we present a simple but highly effective scheme for adding motion blur to fast moving volumes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper describes three new results for volume rendering algorithms utilizing splatting. First, an antialiasing extension to the basic splatting algorithm is introduced that mitigates the spatial aliasing for high-resolution volumes. Aliasing can be severe for high-resolution volumes or volumes where a high depth of field leads to converging samples along the perspective axis. Next, an analysis of the common approximation errors in the splatting process for perspective viewing is presented. In this context, we give different implementations, distinguished by efficiency and accuracy, for adding the splat contributions to the image plane. We then present new results in controlling the splatting errors and also show their behavior in the framework of our new antialiasing technique. Finally, current work in progress on extensions to splatting for temporal antialiasing is demonstrated. Here, we present a simple but highly effective scheme for adding motion blur to fast moving volumes.", "title": "Splatting Errors and Antialiasing", "normalizedTitle": "Splatting Errors and Antialiasing", "fno": "v0178", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Volume Rendering", "Splatting", "Direct Volume Rendering", "Resampling", "Reconstruction", "Antialiasing", "Perspective Projection", "Motion Blur" ], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Torsten", "surname": "Möller", "fullName": "Torsten Möller", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "J. Edward", "surname": "Swan II", "fullName": "J. Edward Swan II", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Roger", "surname": "Crawfis", "fullName": "Roger Crawfis", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Naeem", "surname": "Shareef", "fullName": "Naeem Shareef", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Roni", "surname": "Yagel", "fullName": "Roni Yagel", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "02", "pubDate": "1998-04-01 00:00:00", "pubType": "trans", "pages": "178-191", "year": "1998", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0162", "articleId": "13rRUyfbwqs", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6POrvEWs", "doi": "10.1109/TVCG.2020.2978985", "abstract": "Presents a listing of the editorial board, board of governors, current staff, committee members, and/or society editors for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents a listing of the editorial board, board of governors, current staff, committee members, and/or society editors for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents a listing of the editorial board, board of governors, current staff, committee members, and/or society editors for this issue of the publication.", "title": "IEEE Transactions on Visualization and Computer Graphics", "normalizedTitle": "IEEE Transactions on Visualization and Computer Graphics", "fno": "09052074", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "i-i", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "09052077", "articleId": "1iE6Nu1neNi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6Nu1neNi", "doi": "10.1109/TVCG.2020.2978986", "abstract": "Presents the table of contents for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the table of contents for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the table of contents for this issue of the publication.", "title": "Contents", "normalizedTitle": "Contents", "fno": "09052077", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "ii-iii", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052074", "articleId": "1iE6POrvEWs", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052628", "articleId": "1iFLKo4ODvO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iFLKo4ODvO", "doi": "10.1109/TVCG.2020.2978971", "abstract": "Welcome to the 9th <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG</italic>) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22&#x2013;26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG VR</italic> special issue mark a major highlight of the year.", "abstracts": [ { "abstractType": "Regular", "content": "Welcome to the 9th <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG</italic>) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22&#x2013;26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG VR</italic> special issue mark a major highlight of the year.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Welcome to the 9th IEEE Transactions on Visualization and Computer Graphics (TVCG) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22–26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the IEEE TVCG VR special issue mark a major highlight of the year.", "title": "Introducing the IEEE Virtual Reality 2020 Special Issue", "normalizedTitle": "Introducing the IEEE Virtual Reality 2020 Special Issue", "fno": "09052628", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Special Issues And Sections", "Virtual Reality", "Meetings" ], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": "Stony Brook University", "__typename": "ArticleAuthorType" }, { "givenName": "Doug", "surname": "Bowman", "fullName": "Doug Bowman", "affiliation": "Virginia Tech, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "iv-v", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2014/04/ttg2014040vi", "title": "Message from the Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010001", "title": "Guest Editor's Introduction Special Section on the Virtual Reality Conference (VR)", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010001/13rRUwIF6l4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08053887", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676185", "title": "Introducing the IEEE Virtual Reality 2019 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676185/18NkgxdV8sM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754285", "title": "IEEE VR 2022 Introducing the Special Issue", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754285/1CpcIar9LS8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754286", "title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08855105", "title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09254194", "title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09405571", "title": "Introducing the IEEE Virtual Reality 2021 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09591492", "title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09052077", "articleId": "1iE6Nu1neNi", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052630", "articleId": "1iFLLHpsBfW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iFLLHpsBfW", "doi": "10.1109/TVCG.2020.2978987", "abstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present a subset of papers from the 27th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020), held March 22–26, 2020, in Atlanta, Georgia. ", "abstracts": [ { "abstractType": "Regular", "content": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present a subset of papers from the 27th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020), held March 22–26, 2020, in Atlanta, Georgia. ", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present a subset of papers from the 27th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020), held March 22–26, 2020, in Atlanta, Georgia. ", "title": "Preface", "normalizedTitle": "Preface", "fno": "09052630", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Special Issues And Sections", "Meetings", "Visualization" ], "authors": [ { "givenName": "Maud", "surname": "Marchal", "fullName": "Maud Marchal", "affiliation": "University Rennes, INSA/IRISA, France", "__typename": "ArticleAuthorType" }, { "givenName": "Joseph L.", "surname": "Gabbard", "fullName": "Joseph L. Gabbard", "affiliation": "Virginia Tech, United States", "__typename": "ArticleAuthorType" }, { "givenName": "Joaquim", "surname": "Jorge", "fullName": "Joaquim Jorge", "affiliation": "Instituto Superior Técnico, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Torsten W.", "surname": "Kuhlen", "fullName": "Torsten W. Kuhlen", "affiliation": "RWTH Aachen University, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Anthony", "surname": "Steed", "fullName": "Anthony Steed", "affiliation": "University College London, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "vi-vi", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2014/04/ttg2014040vi", "title": "Message from the Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08053887", "title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754286", "title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09766260", "title": "Guest Editors&#x0027; Introduction: Special Section on IEEE PacificVis 2022", "doi": null, "abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08855105", "title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09052628", "title": "Introducing the IEEE Virtual Reality 2020 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/06/09082802", "title": "Guest Editors&#x2019; Introduction: Special Section on IEEE PacificVis 2020", "doi": null, "abstractUrl": "/journal/tg/2020/06/09082802/1jrTVLo1tpC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09254194", "title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09405571", "title": "Introducing the IEEE Virtual Reality 2021 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09430173", "title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021", "doi": null, "abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09052628", "articleId": "1iFLKo4ODvO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052079", "articleId": "1iE6OiVOHcI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6OiVOHcI", "doi": "10.1109/TVCG.2020.2978988", "abstract": "Presents the Technical Committee for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the Technical Committee for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the Technical Committee for this issue of the publication.", "title": "IEEE Visualization and Graphics Technical Committee (VGTC): http://vgtc.org/", "normalizedTitle": "IEEE Visualization and Graphics Technical Committee (VGTC): http://vgtc.org/", "fno": "09052079", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "vii-vii", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052630", "articleId": "1iFLLHpsBfW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052076", "articleId": "1iE6Rr6A3QY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6Rr6A3QY", "doi": "10.1109/TVCG.2020.2978990", "abstract": "Provides a listing of current committee members and society officers.", "abstracts": [ { "abstractType": "Regular", "content": "Provides a listing of current committee members and society officers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provides a listing of current committee members and society officers.", "title": "Conference Committee", "normalizedTitle": "Conference Committee", "fno": "09052076", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "viii-viii", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052079", "articleId": "1iE6OiVOHcI", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052075", "articleId": "1iE6RKhDJJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6RKhDJJe", "doi": "10.1109/TVCG.2020.2978995", "abstract": "Presents a listing of the CS International program committee for journal papers.", "abstracts": [ { "abstractType": "Regular", "content": "Presents a listing of the CS International program committee for journal papers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents a listing of the CS International program committee for journal papers.", "title": "International Program Committee for Journal Papers", "normalizedTitle": "International Program Committee for Journal Papers", "fno": "09052075", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "ix-ix", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052076", "articleId": "1iE6Rr6A3QY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052078", "articleId": "1iE6QAEcbtK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iE6QAEcbtK", "doi": "10.1109/TVCG.2020.2978992", "abstract": "Presents a listing of reviewers who contributed to this publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents a listing of reviewers who contributed to this publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents a listing of reviewers who contributed to this publication.", "title": "Paper Reviewers for Journal Papers", "normalizedTitle": "Paper Reviewers for Journal Papers", "fno": "09052078", "hasPdf": true, "idPrefix": "tg", "keywords": [ "IEEE Publishing" ], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "x-xi", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052075", "articleId": "1iE6RKhDJJe", "__typename": "AdjacentArticleType" }, "next": { "fno": "09052629", "articleId": "1iFLK7FLSsE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iFLK7FLSsE", "doi": "10.1109/TVCG.2020.2978997", "abstract": "The 2019 VGTC Virtual Reality Best Dissertation Award goes to Qi Sun, a 2018 graduate from Stony Brook University, for his dissertation entitled, &#x201c;Computational Methods for Immersive Perception.&#x201d;", "abstracts": [ { "abstractType": "Regular", "content": "The 2019 VGTC Virtual Reality Best Dissertation Award goes to Qi Sun, a 2018 graduate from Stony Brook University, for his dissertation entitled, &#x201c;Computational Methods for Immersive Perception.&#x201d;", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The 2019 VGTC Virtual Reality Best Dissertation Award goes to Qi Sun, a 2018 graduate from Stony Brook University, for his dissertation entitled, “Computational Methods for Immersive Perception.”", "title": "The 2019 VGTC Virtual Reality Best Dissertation Award", "normalizedTitle": "The 2019 VGTC Virtual Reality Best Dissertation Award", "fno": "09052629", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Awards" ], "authors": [ { "givenName": "Qi", "surname": "Sun", "fullName": "Qi Sun", "affiliation": "Adobe Research, San Jose", "__typename": "ArticleAuthorType" }, { "givenName": "Mahdi", "surname": "Azmandian", "fullName": "Mahdi Azmandian", "affiliation": "The University of Southern California", "__typename": "ArticleAuthorType" }, { "givenName": "James", "surname": "Baumeister", "fullName": "James Baumeister", "affiliation": "University of South Australia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "xii-xiii", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09052078", "articleId": "1iE6QAEcbtK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998297", "articleId": "1hrXhk9mu9W", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXhk9mu9W", "doi": "10.1109/TVCG.2020.2973051", "abstract": "Virtual reality systems typically allow users to physically walk and turn, but virtual environments (VEs) often exceed the available walking space. Teleporting has become a common user interface, whereby the user aims a laser pointer to indicate the desired location, and sometimes orientation, in the VE before being transported without self-motion cues. This study evaluated the influence of rotational self-motion cues on spatial updating performance when teleporting, and whether the importance of rotational cues varies across movement scale and environment scale. Participants performed a triangle completion task by teleporting along two outbound path legs before pointing to the unmarked path origin. Rotational self-motion reduced overall errors across all levels of movement scale and environment scale, though it also introduced a slight bias toward under-rotation. The importance of rotational self-motion was exaggerated when navigating large triangles and when the surrounding environment was large. Navigating a large triangle within a small VE brought participants closer to surrounding landmarks and boundaries, which led to greater reliance on piloting (landmark-based navigation) and therefore reduced-but did not eliminate-the impact of rotational self-motion cues. These results indicate that rotational self-motion cues are important when teleporting, and that navigation can be improved by enabling piloting.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual reality systems typically allow users to physically walk and turn, but virtual environments (VEs) often exceed the available walking space. Teleporting has become a common user interface, whereby the user aims a laser pointer to indicate the desired location, and sometimes orientation, in the VE before being transported without self-motion cues. This study evaluated the influence of rotational self-motion cues on spatial updating performance when teleporting, and whether the importance of rotational cues varies across movement scale and environment scale. Participants performed a triangle completion task by teleporting along two outbound path legs before pointing to the unmarked path origin. Rotational self-motion reduced overall errors across all levels of movement scale and environment scale, though it also introduced a slight bias toward under-rotation. The importance of rotational self-motion was exaggerated when navigating large triangles and when the surrounding environment was large. Navigating a large triangle within a small VE brought participants closer to surrounding landmarks and boundaries, which led to greater reliance on piloting (landmark-based navigation) and therefore reduced-but did not eliminate-the impact of rotational self-motion cues. These results indicate that rotational self-motion cues are important when teleporting, and that navigation can be improved by enabling piloting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual reality systems typically allow users to physically walk and turn, but virtual environments (VEs) often exceed the available walking space. Teleporting has become a common user interface, whereby the user aims a laser pointer to indicate the desired location, and sometimes orientation, in the VE before being transported without self-motion cues. This study evaluated the influence of rotational self-motion cues on spatial updating performance when teleporting, and whether the importance of rotational cues varies across movement scale and environment scale. Participants performed a triangle completion task by teleporting along two outbound path legs before pointing to the unmarked path origin. Rotational self-motion reduced overall errors across all levels of movement scale and environment scale, though it also introduced a slight bias toward under-rotation. The importance of rotational self-motion was exaggerated when navigating large triangles and when the surrounding environment was large. Navigating a large triangle within a small VE brought participants closer to surrounding landmarks and boundaries, which led to greater reliance on piloting (landmark-based navigation) and therefore reduced-but did not eliminate-the impact of rotational self-motion cues. These results indicate that rotational self-motion cues are important when teleporting, and that navigation can be improved by enabling piloting.", "title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating", "normalizedTitle": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating", "fno": "08998297", "hasPdf": true, "idPrefix": "tg", "keywords": [ "User Interfaces", "Virtual Reality", "Virtual Environments", "Path Scale", "Environment Scale", "Virtual Reality Systems", "Common User Interface", "Rotational Self Motion Cues", "Spatial Updating Performance", "Rotational Self Motion Reduced Overall Errors", "Rotational Cues", "Legged Locomotion", "Visualization", "Task Analysis", "Navigation", "Space Exploration", "Virtual Environments", "Cognition", "Navigation", "Spatial Cognition", "Virtual Reality", "Teleporting" ], "authors": [ { "givenName": "Jonathan W.", "surname": "Kelly", "fullName": "Jonathan W. Kelly", "affiliation": "Iowa State University, Ames, IA", "__typename": "ArticleAuthorType" }, { "givenName": "Alec G.", "surname": "Ostrander", "fullName": "Alec G. Ostrander", "affiliation": "Iowa State University, Ames, IA", "__typename": "ArticleAuthorType" }, { "givenName": "Alex F.", "surname": "Lim", "fullName": "Alex F. Lim", "affiliation": "Iowa State University, Ames, IA", "__typename": "ArticleAuthorType" }, { "givenName": "Lucia A.", "surname": "Cherep", "fullName": "Lucia A. Cherep", "affiliation": "Iowa State University, Ames, IA", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen B.", "surname": "Gilbert", "fullName": "Stephen B. Gilbert", "affiliation": "Iowa State University, Ames, IA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1841-1850", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549382", "title": "Poster: Do walking motions enhance visually induced self-motion illusions in virtual reality?", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549382/12OmNBr4eym", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892303", "title": "Object location memory error in virtual and real environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892303/12OmNx7ouWn", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504752", "title": "Disguising rotational gain for redirected walking in virtual reality: Effect of visual density", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504752/12OmNyr8YkS", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/07/07955099", "title": "Collision Avoidance Behavior between Walkers: Global and Local Motion Cues", "doi": null, "abstractUrl": "/journal/tg/2018/07/07955099/13rRUxcbnHk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/06/08554159", "title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness", "doi": null, "abstractUrl": "/journal/tg/2020/06/08554159/17D45WB0qbp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714054", "title": "Remote research on locomotion interfaces for virtual reality: Replication of a lab-based study on teleporting interfaces", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714054/1B0XZAXWaIg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797756", "title": "Field of View and Forward Motion Discrimination in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797756/1cJ0UegDTgY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798017", "title": "Simulated Reference Frame Effects on Steering, Jumping and Sliding", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798017/1cJ0YUTkHao", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a391", "title": "The Effectiveness of Locomotion Interfaces Depends on Self-Motion Cues, Environmental Cues, and the Individual", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a391/1tnXFgLAfSw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a201", "title": "Manipulating Rotational Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a201/1yfxMXu7XhK", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09052629", "articleId": "1iFLK7FLSsE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998337", "articleId": "1hrXgdu8Bkk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXgdu8Bkk", "doi": "10.1109/TVCG.2020.2973076", "abstract": "Point clouds-based 3D human pose estimation that aims to recover the 3D locations of human skeleton joints plays an important role in many AR/VR applications. The success of existing methods is generally built upon large scale data annotated with 3D human joints. However, it is a labor-intensive and error-prone process to annotate 3D human joints from input depth images or point clouds, due to the self-occlusion between body parts as well as the tedious annotation process on 3D point clouds. Meanwhile, it is easier to construct human pose datasets with 2D human joint annotations on depth images. To address this problem, we present a weakly supervised adversarial learning framework for 3D human pose estimation from point clouds. Compared to existing 3D human pose estimation methods from depth images or point clouds, we exploit both the weakly supervised data with only annotations of 2D human joints and fully supervised data with annotations of 3D human joints. In order to relieve the human pose ambiguity due to weak supervision, we adopt adversarial learning to ensure the recovered human pose is valid. Instead of using either 2D or 3D representations of depth images in previous methods, we exploit both point clouds and the input depth image. We adopt 2D CNN to extract 2D human joints from the input depth image, 2D human joints aid us in obtaining the initial 3D human joints and selecting effective sampling points that could reduce the computation cost of 3D human pose regression using point clouds network. The used point clouds network can narrow down the domain gap between the network input i.e. point clouds and 3D joints. Thanks to weakly supervised adversarial learning framework, our method can achieve accurate 3D human pose from point clouds. Experiments on the ITOP dataset and EVAL dataset demonstrate that our method can achieve state-of-the-art performance efficiently.", "abstracts": [ { "abstractType": "Regular", "content": "Point clouds-based 3D human pose estimation that aims to recover the 3D locations of human skeleton joints plays an important role in many AR/VR applications. The success of existing methods is generally built upon large scale data annotated with 3D human joints. However, it is a labor-intensive and error-prone process to annotate 3D human joints from input depth images or point clouds, due to the self-occlusion between body parts as well as the tedious annotation process on 3D point clouds. Meanwhile, it is easier to construct human pose datasets with 2D human joint annotations on depth images. To address this problem, we present a weakly supervised adversarial learning framework for 3D human pose estimation from point clouds. Compared to existing 3D human pose estimation methods from depth images or point clouds, we exploit both the weakly supervised data with only annotations of 2D human joints and fully supervised data with annotations of 3D human joints. In order to relieve the human pose ambiguity due to weak supervision, we adopt adversarial learning to ensure the recovered human pose is valid. Instead of using either 2D or 3D representations of depth images in previous methods, we exploit both point clouds and the input depth image. We adopt 2D CNN to extract 2D human joints from the input depth image, 2D human joints aid us in obtaining the initial 3D human joints and selecting effective sampling points that could reduce the computation cost of 3D human pose regression using point clouds network. The used point clouds network can narrow down the domain gap between the network input i.e. point clouds and 3D joints. Thanks to weakly supervised adversarial learning framework, our method can achieve accurate 3D human pose from point clouds. Experiments on the ITOP dataset and EVAL dataset demonstrate that our method can achieve state-of-the-art performance efficiently.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Point clouds-based 3D human pose estimation that aims to recover the 3D locations of human skeleton joints plays an important role in many AR/VR applications. The success of existing methods is generally built upon large scale data annotated with 3D human joints. However, it is a labor-intensive and error-prone process to annotate 3D human joints from input depth images or point clouds, due to the self-occlusion between body parts as well as the tedious annotation process on 3D point clouds. Meanwhile, it is easier to construct human pose datasets with 2D human joint annotations on depth images. To address this problem, we present a weakly supervised adversarial learning framework for 3D human pose estimation from point clouds. Compared to existing 3D human pose estimation methods from depth images or point clouds, we exploit both the weakly supervised data with only annotations of 2D human joints and fully supervised data with annotations of 3D human joints. In order to relieve the human pose ambiguity due to weak supervision, we adopt adversarial learning to ensure the recovered human pose is valid. Instead of using either 2D or 3D representations of depth images in previous methods, we exploit both point clouds and the input depth image. We adopt 2D CNN to extract 2D human joints from the input depth image, 2D human joints aid us in obtaining the initial 3D human joints and selecting effective sampling points that could reduce the computation cost of 3D human pose regression using point clouds network. The used point clouds network can narrow down the domain gap between the network input i.e. point clouds and 3D joints. Thanks to weakly supervised adversarial learning framework, our method can achieve accurate 3D human pose from point clouds. Experiments on the ITOP dataset and EVAL dataset demonstrate that our method can achieve state-of-the-art performance efficiently.", "title": "Weakly Supervised Adversarial Learning for 3D Human Pose Estimation from Point Clouds", "normalizedTitle": "Weakly Supervised Adversarial Learning for 3D Human Pose Estimation from Point Clouds", "fno": "08998337", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Convolutional Neural Nets", "Image Representation", "Pose Estimation", "Stereo Image Processing", "Supervised Learning", "Human Skeleton Joints", "Depth Images", "Weakly Supervised Adversarial Learning Framework", "Point Clouds Based 3 D Human Pose Estimation", "2 D CNN", "2 D Human Joints", "2 D Representations", "3 D Representations", "Three Dimensional Displays", "Two Dimensional Displays", "Pose Estimation", "Heating Systems", "Proposals", "Training Data", "Computers", "Human Pose Estimation", "Point Clouds", "Depth Map" ], "authors": [ { "givenName": "Zihao", "surname": "Zhang", "fullName": "Zihao Zhang", "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, University of Chinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Lei", "surname": "Hu", "fullName": "Lei Hu", "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, University of Chinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoming", "surname": "Deng", "fullName": "Xiaoming Deng", "affiliation": "Bejing Key Laboratory of Human Computer Interactions, Institute of Software, Chinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Shihong", "surname": "Xia", "fullName": "Shihong Xia", "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences, University of Chinese Academy of Sciences", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1851-1859", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032c621", "title": "Compositional Human Pose Regression", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c621/12OmNqBtiU5", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034a805", "title": "Generating Multiple Diverse Hypotheses for Human 3D Pose Consistent with 2D Joint Detections", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034a805/12OmNxFsmrY", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a398", "title": "Towards 3D Human Pose Estimation in the Wild: A Weakly-Supervised Approach", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a398/12OmNy3iFgU", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f255", "title": "3D Human Pose Estimation in the Wild by Adversarial Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f255/17D45WHONlv", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c344", "title": "HEMlets Pose: Learning Part-Centric Heatmap Triplets for Accurate 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c344/1hQqygVk4TK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c197", "title": "Fusing Wearable IMUs With Multi-View Images for Human Pose Estimation: A Geometric Approach", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c197/1m3nJvdaX2U", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800g855", "title": "PandaNet: Anchor-Based Single-Shot Multi-Person 3D Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800g855/1m3nlxOySyY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212996", "title": "3D Human Pose Estimation with Adversarial Learning", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212996/1nHRTVvYYVi", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09320561", "title": "HEMlets PoSh: Learning Part-Centric Heatmap Triplets for 3D Human Pose and Shape Estimation", "doi": null, "abstractUrl": "/journal/tp/2022/06/09320561/1qkwANyEXq8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a898", "title": "Error Bounds of Projection Models in Weakly Supervised 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a898/1qyxlY5L8jK", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998297", "articleId": "1hrXhk9mu9W", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998307", "articleId": "1hpPBi8EjJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPBi8EjJe", "doi": "10.1109/TVCG.2020.2973474", "abstract": "We analyzed the design space of group navigation tasks in distributed virtual environments and present a framework consisting of techniques to form groups, distribute responsibilities, navigate together, and eventually split up again. To improve joint navigation, our work focused on an extension of the Multi-Ray Jumping technique that allows adjusting the spatial formation of two distributed users as part of the target specification process. The results of a quantitative user study showed that these adjustments lead to significant improvements in joint two-user travel, which is evidenced by more efficient travel sequences and lower task loads imposed on the navigator and the passenger. In a qualitative expert review involving all four stages of group navigation, we confirmed the effective and efficient use of our technique in a more realistic use-case scenario and concluded that remote collaboration benefits from fluent transitions between individual and group navigation.", "abstracts": [ { "abstractType": "Regular", "content": "We analyzed the design space of group navigation tasks in distributed virtual environments and present a framework consisting of techniques to form groups, distribute responsibilities, navigate together, and eventually split up again. To improve joint navigation, our work focused on an extension of the Multi-Ray Jumping technique that allows adjusting the spatial formation of two distributed users as part of the target specification process. The results of a quantitative user study showed that these adjustments lead to significant improvements in joint two-user travel, which is evidenced by more efficient travel sequences and lower task loads imposed on the navigator and the passenger. In a qualitative expert review involving all four stages of group navigation, we confirmed the effective and efficient use of our technique in a more realistic use-case scenario and concluded that remote collaboration benefits from fluent transitions between individual and group navigation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We analyzed the design space of group navigation tasks in distributed virtual environments and present a framework consisting of techniques to form groups, distribute responsibilities, navigate together, and eventually split up again. To improve joint navigation, our work focused on an extension of the Multi-Ray Jumping technique that allows adjusting the spatial formation of two distributed users as part of the target specification process. The results of a quantitative user study showed that these adjustments lead to significant improvements in joint two-user travel, which is evidenced by more efficient travel sequences and lower task loads imposed on the navigator and the passenger. In a qualitative expert review involving all four stages of group navigation, we confirmed the effective and efficient use of our technique in a more realistic use-case scenario and concluded that remote collaboration benefits from fluent transitions between individual and group navigation.", "title": "Getting There Together: Group Navigation in Distributed Virtual Environments", "normalizedTitle": "Getting There Together: Group Navigation in Distributed Virtual Environments", "fno": "08998307", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Groupware", "User Interfaces", "Virtual Reality", "Individual Navigation", "Group Navigation", "Remote Collaboration", "Realistic Use Case Scenario", "Travel Sequences", "Joint Two User Travel", "Navigator", "Two User Travel", "Quantitative User Study", "Distributed Users", "Multiray Jumping Technique", "Joint Navigation", "Group Navigation Tasks", "Distributed Virtual Environments", "Navigation", "Virtual Environments", "Collaboration", "Task Analysis", "Teleportation", "Avatars", "Virtual Reality", "Collaborative Virtual Environments", "Remote Collaboration", "Group Navigation", "Teleportation", "Jumping" ], "authors": [ { "givenName": "Tim", "surname": "Weissker", "fullName": "Tim Weissker", "affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar", "__typename": "ArticleAuthorType" }, { "givenName": "Pauline", "surname": "Bimberg", "fullName": "Pauline Bimberg", "affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Froehlich", "fullName": "Bernd Froehlich", "affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1860-1870", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/sive/2014/5781/0/07006288", "title": "Reproducible sonification for virtual navigation", "doi": null, "abstractUrl": "/proceedings-article/sive/2014/07006288/12OmNAtaS0G", "parentPublication": { "id": "proceedings/sive/2014/5781/0", "title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184223", "title": "A gaming interface using body gestures for collaborative navigation", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184223/12OmNwwMf5A", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a556", "title": "Group WiM: A Group Navigation Technique for Collaborative Virtual Reality Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a556/1CJdXqzjctO", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049698", "title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a082", "title": "WiM-Based Group Navigation for Collaborative Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a082/1KmFfzv6fWo", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797807", "title": "Multi-Ray Jumping: Comprehensible Group Navigation for Collocated Users in Immersive Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797807/1cJ0MXFzine", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2021/04/09351621", "title": "Improved Speaker and Navigator for Vision-and-Language Navigation", "doi": null, "abstractUrl": "/magazine/mu/2021/04/09351621/1r50rE3jRZe", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382870", "title": "Group Navigation for Guided Tours in Distributed Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382870/1saZCxsOG9q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a363", "title": "An Overview of Group Navigation in Multi-User Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a363/1tnXytVyks8", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700d742", "title": "Auto-Navigator: Decoupled Neural Architecture Search for Visual Navigation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700d742/1uqGAyRc3Go", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998337", "articleId": "1hrXgdu8Bkk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998139", "articleId": "1hrXe0Hbv0I", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEguSwO6rK", "name": "ttg202005-08998307s1-supp1-2973474.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998307s1-supp1-2973474.mp4", "extension": "mp4", "size": "55 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXe0Hbv0I", "doi": "10.1109/TVCG.2020.2973443", "abstract": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.", "abstracts": [ { "abstractType": "Regular", "content": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Occlusion is a powerful visual cue that is crucial for depth perception and realism in optical see-through augmented reality (OST-AR). However, existing OST-AR systems additively overlay physical and digital content with beam combiners - an approach that does not easily support mutual occlusion, resulting in virtual objects that appear semi-transparent and unrealistic. In this work, we propose a new type of occlusion-capable OST-AR system. Rather than additively combining the real and virtual worlds, we employ a single digital micromirror device (DMD) to merge the respective light paths in a multiplicative manner. This unique approach allows us to simultaneously block light incident from the physical scene on a pixel-by-pixel basis while also modulating the light emitted by a light-emitting diode (LED) to display digital content. Our technique builds on mixed binary/continuous factorization algorithms to optimize time-multiplexed binary DMD patterns and their corresponding LED colors to approximate a target augmented reality (AR) scene. In simulations and with a prototype benchtop display, we demonstrate hard-edge occlusions, plausible shadows, and also gaze-contingent optimization of this novel display mode, which only requires a single spatial light modulator.", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "normalizedTitle": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "fno": "08998139", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "LED Displays", "Micromirrors", "Spatial Light Modulators", "Gaze Contingent Optimization", "LED Colors", "Mixed Binary Continuous Factorization Algorithms", "Optical See Through Augmented Reality Display", "Single Spatial Light Modulator Occlusion", "Factored Occlusion", "Display Mode", "Hard Edge Occlusions", "Prototype Benchtop Display", "Augmented Reality Scene", "Time Multiplexed Binary DMD Patterns", "Digital Content", "Light Emitting Diode", "Pixel By Pixel Basis", "Physical Scene", "Light Incident", "Light Paths", "Single Digital Micromirror Device", "Virtual Worlds", "Real Worlds", "Occlusion Capable OST AR System", "Virtual Objects", "Mutual Occlusion", "Beam Combiners", "Depth Perception", "Image Color Analysis", "Optical Diffraction", "Mirrors", "Light Emitting Diodes", "Optical Imaging", "Augmented Reality", "Modulation", "Augmented Reality", "Computational Displays", "Mutual Occlusion" ], "authors": [ { "givenName": "Brooke", "surname": "Krajancich", "fullName": "Brooke Krajancich", "affiliation": "Stanford University", "__typename": "ArticleAuthorType" }, { "givenName": "Nitish", "surname": "Padmanaban", "fullName": "Nitish Padmanaban", "affiliation": "Stanford University", "__typename": "ArticleAuthorType" }, { "givenName": "Gordon", "surname": "Wetzstein", "fullName": "Gordon Wetzstein", "affiliation": "Stanford University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1871-1879", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402574", "title": "Occlusion capable optical see-through head-mounted display using freeform optics", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007218", "title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a800", "title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a389", "title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050791", "title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a237", "title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08827571", "title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics", "doi": null, "abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09416829", "title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display", "doi": null, "abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a422", "title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998307", "articleId": "1hpPBi8EjJe", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998133", "articleId": "1hrXcnyAOzu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXcnyAOzu", "doi": "10.1109/TVCG.2020.2973052", "abstract": "The gaze behavior of virtual avatars is critical to social presence and perceived eye contact during social interactions in Virtual Reality. Virtual Reality headsets are being designed with integrated eye tracking to enable compelling virtual social interactions. This paper shows that the near infra-red cameras used in eye tracking capture eye images that contain iris patterns of the user. Because iris patterns are a gold standard biometric, the current technology places the user's biometric identity at risk. Our first contribution is an optical defocus based hardware solution to remove the iris biometric from the stream of eye tracking images. We characterize the performance of this solution with different internal parameters. Our second contribution is a psychophysical experiment with a same-different task that investigates the sensitivity of users to a virtual avatar's eye movements when this solution is applied. By deriving detection threshold values, our findings provide a range of defocus parameters where the change in eye movements would go unnoticed in a conversational setting. Our third contribution is a perceptual study to determine the impact of defocus parameters on the perceived eye contact, attentiveness, naturalness, and truthfulness of the avatar. Thus, if a user wishes to protect their iris biometric, our approach provides a solution that balances biometric protection while preventing their conversation partner from perceiving a difference in the user's virtual avatar. This work is the first to develop secure eye tracking configurations for VR/AR/XR applications and motivates future work in the area.", "abstracts": [ { "abstractType": "Regular", "content": "The gaze behavior of virtual avatars is critical to social presence and perceived eye contact during social interactions in Virtual Reality. Virtual Reality headsets are being designed with integrated eye tracking to enable compelling virtual social interactions. This paper shows that the near infra-red cameras used in eye tracking capture eye images that contain iris patterns of the user. Because iris patterns are a gold standard biometric, the current technology places the user's biometric identity at risk. Our first contribution is an optical defocus based hardware solution to remove the iris biometric from the stream of eye tracking images. We characterize the performance of this solution with different internal parameters. Our second contribution is a psychophysical experiment with a same-different task that investigates the sensitivity of users to a virtual avatar's eye movements when this solution is applied. By deriving detection threshold values, our findings provide a range of defocus parameters where the change in eye movements would go unnoticed in a conversational setting. Our third contribution is a perceptual study to determine the impact of defocus parameters on the perceived eye contact, attentiveness, naturalness, and truthfulness of the avatar. Thus, if a user wishes to protect their iris biometric, our approach provides a solution that balances biometric protection while preventing their conversation partner from perceiving a difference in the user's virtual avatar. This work is the first to develop secure eye tracking configurations for VR/AR/XR applications and motivates future work in the area.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The gaze behavior of virtual avatars is critical to social presence and perceived eye contact during social interactions in Virtual Reality. Virtual Reality headsets are being designed with integrated eye tracking to enable compelling virtual social interactions. This paper shows that the near infra-red cameras used in eye tracking capture eye images that contain iris patterns of the user. Because iris patterns are a gold standard biometric, the current technology places the user's biometric identity at risk. Our first contribution is an optical defocus based hardware solution to remove the iris biometric from the stream of eye tracking images. We characterize the performance of this solution with different internal parameters. Our second contribution is a psychophysical experiment with a same-different task that investigates the sensitivity of users to a virtual avatar's eye movements when this solution is applied. By deriving detection threshold values, our findings provide a range of defocus parameters where the change in eye movements would go unnoticed in a conversational setting. Our third contribution is a perceptual study to determine the impact of defocus parameters on the perceived eye contact, attentiveness, naturalness, and truthfulness of the avatar. Thus, if a user wishes to protect their iris biometric, our approach provides a solution that balances biometric protection while preventing their conversation partner from perceiving a difference in the user's virtual avatar. This work is the first to develop secure eye tracking configurations for VR/AR/XR applications and motivates future work in the area.", "title": "The Security-Utility Trade-off for Iris Authentication and Eye Animation for Social Virtual Avatars", "normalizedTitle": "The Security-Utility Trade-off for Iris Authentication and Eye Animation for Social Virtual Avatars", "fno": "08998133", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Computer Animation", "Gaze Tracking", "Iris Recognition", "Security Of Data", "Virtual Reality", "Security Utility Trade Off", "Iris Authentication", "Eye Animation", "Gaze Behavior", "Virtual Avatar", "Social Presence", "Virtual Reality Headsets", "Integrated Eye Tracking", "Virtual Social Interactions", "Infrared Cameras", "Eye Tracking", "Iris Patterns", "Gold Standard Biometric", "Biometric Identity", "Optical Defocus Based Hardware Solution", "Iris Biometric", "Eye Tracking Images", "Defocus Parameters", "Eye Movements", "Biometric Protection", "Secure Eye Tracking Configurations", "Social Virtual Avatars", "Iris Recognition", "Avatars", "Gaze Tracking", "Cameras", "Privacy", "Animation", "Security", "Eye Tracking", "Iris Recognition", "Animated Avatars", "Eye Movements" ], "authors": [ { "givenName": "Brendan", "surname": "John", "fullName": "Brendan John", "affiliation": "University of Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Sophie", "surname": "Jörg", "fullName": "Sophie Jörg", "affiliation": "Clemson University", "__typename": "ArticleAuthorType" }, { "givenName": "Sanjeev", "surname": "Koppal", "fullName": "Sanjeev Koppal", "affiliation": "University of Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Eakta", "surname": "Jain", "fullName": "Eakta Jain", "affiliation": "University of Florida", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1880-1890", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccce/2014/7635/0/7635a009", "title": "Template Matching Techniques for Iris Recognition System", "doi": null, "abstractUrl": "/proceedings-article/iccce/2014/7635a009/12OmNA0MZ04", "parentPublication": { "id": "proceedings/iccce/2014/7635/0", "title": "2014 International Conference on Computer & Communication Engineering (ICCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a201", "title": "On the Fusion of Periocular and Iris Biometrics in Non-ideal Imagery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a201/12OmNB0Fxi6", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paap/2011/4575/0/4575a110", "title": "Utilizing Dark Features for Iris Recognition in Less Constrained Environments", "doi": null, "abstractUrl": "/proceedings-article/paap/2011/4575a110/12OmNynJMXu", "parentPublication": { "id": "proceedings/paap/2011/4575/0", "title": "Parallel Architectures, Algorithms and Programming, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2018/01/07350227", "title": "Negative Iris Recognition", "doi": null, "abstractUrl": "/journal/tq/2018/01/07350227/13rRUxly96T", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499061", "title": "Iris Liveness Detection: A Survey", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499061/17D45XeKgyz", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/i-span/2018/8534/0/853400a067", "title": "Biological Features De-identification in Iris Images", "doi": null, "abstractUrl": "/proceedings-article/i-span/2018/853400a067/17D45Xtvpdu", "parentPublication": { "id": "proceedings/i-span/2018/8534/0", "title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08818661", "title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation", "doi": null, "abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a655", "title": "A Multi-Modal Gaze Tracking Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a655/1ehBL8sk06I", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090461", "title": "Front Camera Eye Tracking For Mobile VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998139", "articleId": "1hrXe0Hbv0I", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998145", "articleId": "1hpPCGSeWXu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPCGSeWXu", "doi": "10.1109/TVCG.2020.2973057", "abstract": "We present a sensor-fusion method that exploits a depth camera and a gyroscope to track the articulation of a hand in the presence of excessive motion blur. In case of slow and smooth hand motions, the existing methods estimate the hand pose fairly accurately and robustly, despite challenges due to the high dimensionality of the problem, self-occlusions, uniform appearance of hand parts, etc. However, the accuracy of hand pose estimation drops considerably for fast-moving hands because the depth image is severely distorted due to motion blur. Moreover, when hands move fast, the actual hand pose is far from the one estimated in the previous frame, therefore the assumption of temporal continuity on which tracking methods rely, is not valid. In this paper, we track fast-moving hands with the combination of a gyroscope and a depth camera. As a first step, we calibrate a depth camera and a gyroscope attached to a hand so as to identify their time and pose offsets. Following that, we fuse the rotation information of the calibrated gyroscope with model-based hierarchical particle filter tracking. A series of quantitative and qualitative experiments demonstrate that the proposed method performs more accurately and robustly in the presence of motion blur, when compared to state of the art algorithms, especially in the case of very fast hand rotations.", "abstracts": [ { "abstractType": "Regular", "content": "We present a sensor-fusion method that exploits a depth camera and a gyroscope to track the articulation of a hand in the presence of excessive motion blur. In case of slow and smooth hand motions, the existing methods estimate the hand pose fairly accurately and robustly, despite challenges due to the high dimensionality of the problem, self-occlusions, uniform appearance of hand parts, etc. However, the accuracy of hand pose estimation drops considerably for fast-moving hands because the depth image is severely distorted due to motion blur. Moreover, when hands move fast, the actual hand pose is far from the one estimated in the previous frame, therefore the assumption of temporal continuity on which tracking methods rely, is not valid. In this paper, we track fast-moving hands with the combination of a gyroscope and a depth camera. As a first step, we calibrate a depth camera and a gyroscope attached to a hand so as to identify their time and pose offsets. Following that, we fuse the rotation information of the calibrated gyroscope with model-based hierarchical particle filter tracking. A series of quantitative and qualitative experiments demonstrate that the proposed method performs more accurately and robustly in the presence of motion blur, when compared to state of the art algorithms, especially in the case of very fast hand rotations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a sensor-fusion method that exploits a depth camera and a gyroscope to track the articulation of a hand in the presence of excessive motion blur. In case of slow and smooth hand motions, the existing methods estimate the hand pose fairly accurately and robustly, despite challenges due to the high dimensionality of the problem, self-occlusions, uniform appearance of hand parts, etc. However, the accuracy of hand pose estimation drops considerably for fast-moving hands because the depth image is severely distorted due to motion blur. Moreover, when hands move fast, the actual hand pose is far from the one estimated in the previous frame, therefore the assumption of temporal continuity on which tracking methods rely, is not valid. In this paper, we track fast-moving hands with the combination of a gyroscope and a depth camera. As a first step, we calibrate a depth camera and a gyroscope attached to a hand so as to identify their time and pose offsets. Following that, we fuse the rotation information of the calibrated gyroscope with model-based hierarchical particle filter tracking. A series of quantitative and qualitative experiments demonstrate that the proposed method performs more accurately and robustly in the presence of motion blur, when compared to state of the art algorithms, especially in the case of very fast hand rotations.", "title": "3D Hand Tracking in the Presence of Excessive Motion Blur", "normalizedTitle": "3D Hand Tracking in the Presence of Excessive Motion Blur", "fno": "08998145", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Gyroscopes", "Image Motion Analysis", "Image Sensors", "Object Tracking", "Particle Filtering Numerical Methods", "Pose Estimation", "Sensor Fusion", "Stereo Image Processing", "Excessive Motion Blur", "Sensor Fusion Method", "Depth Camera", "Model Based Hierarchical Particle Filter Tracking", "Fast Hand Rotations", "3 D Hand Tracking", "Gyroscope", "Hand Pose Estimation", "Motion Blur", "Tracking", "Cameras", "Gyroscopes", "Three Dimensional Displays", "Pose Estimation", "Robustness", "Solid Modeling", "3 D Hand Tracking", "3 D Hand Pose Estimation", "Sensor Fusion", "Depth Camera", "Gyroscope", "Motion Blur" ], "authors": [ { "givenName": "Gabyong", "surname": "Park", "fullName": "Gabyong Park", "affiliation": "KAIST, Daejeon, S. Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Antonis", "surname": "Argyros", "fullName": "Antonis Argyros", "affiliation": "Institute of Computer Science, FORTH, Heraklion, Greece", "__typename": "ArticleAuthorType" }, { "givenName": "Juyoung", "surname": "Lee", "fullName": "Juyoung Lee", "affiliation": "KAIST, Daejeon, S. Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Woontack", "surname": "Woo", "fullName": "Woontack Woo", "affiliation": "KAIST, Daejeon, S. Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1891-1901", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851d593", "title": "Robust 3D Hand Pose Estimation in Single Depth Images: From Single-View CNN to Multi-View CNNs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d593/12OmNASraXC", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f679", "title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d123", "title": "Learning Hand Articulations by Hallucinating Heat Distribution", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d123/12OmNCcKQwN", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d142", "title": "Robust Hand Pose Estimation during the Interaction with an Unknown Object", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d142/12OmNvHoQq3", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscv/2017/4062/0/08054904", "title": "Hand pose estimation based on deep learning depth map for hand gesture recognition", "doi": null, "abstractUrl": "/proceedings-article/iscv/2017/08054904/12OmNwFid2e", "parentPublication": { "id": "proceedings/iscv/2017/4062/0", "title": "2017 Intelligent Systems and Computer Vision (ISCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890556", "title": "A robust tracking algorithm for 3D hand gesture with rapid hand motion through deep learning", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890556/12OmNwGIcxU", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c636", "title": "Depth-Based 3D Hand Pose Estimation: From Current Achievements to Future Goals", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c636/17D45W2Wyyl", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b914", "title": "Gyroscope-Aided Motion Deblurring with Deep Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b914/18j8KfzbuFy", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3253", "title": "GOAL: Generating 4D Whole-Body Motion for Hand-Object Grasping", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3253/1H1jAb3RmsU", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c866", "title": "3D Hand Pose Estimation from RGB Using Privileged Learning with Depth Data", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c866/1i5mvFudr68", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998133", "articleId": "1hrXcnyAOzu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998375", "articleId": "1hpPBdSWXTi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgziqRPjy", "name": "ttg202005-08998145s1-supp1-2973057.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998145s1-supp1-2973057.mp4", "extension": "mp4", "size": "33.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPBdSWXTi", "doi": "10.1109/TVCG.2020.2973473", "abstract": "We conduct novel analyses of users' gaze behaviors in dynamic virtual scenes and, based on our analyses, we present a novel CNN-based model called DGaze for gaze prediction in HMD-based applications. We first collect 43 users' eye tracking data in 5 dynamic scenes under free-viewing conditions. Next, we perform statistical analysis of our data and observe that dynamic object positions, head rotation velocities, and salient regions are correlated with users' gaze positions. Based on our analysis, we present a CNN-based model (DGaze) that combines object position sequence, head velocity sequence, and saliency features to predict users' gaze positions. Our model can be applied to predict not only realtime gaze positions but also gaze positions in the near future and can achieve better performance than prior method. In terms of realtime prediction, DGaze achieves a 22.0% improvement over prior method in dynamic scenes and obtains an improvement of 9.5% in static scenes, based on using the angular distance as the evaluation metric. We also propose a variant of our model called DGaze_ET that can be used to predict future gaze positions with higher precision by combining accurate past gaze data gathered using an eye tracker. We further analyze our CNN architecture and verify the effectiveness of each component in our model. We apply DGaze to gaze-contingent rendering and a game, and also present the evaluation results from a user study.", "abstracts": [ { "abstractType": "Regular", "content": "We conduct novel analyses of users' gaze behaviors in dynamic virtual scenes and, based on our analyses, we present a novel CNN-based model called DGaze for gaze prediction in HMD-based applications. We first collect 43 users' eye tracking data in 5 dynamic scenes under free-viewing conditions. Next, we perform statistical analysis of our data and observe that dynamic object positions, head rotation velocities, and salient regions are correlated with users' gaze positions. Based on our analysis, we present a CNN-based model (DGaze) that combines object position sequence, head velocity sequence, and saliency features to predict users' gaze positions. Our model can be applied to predict not only realtime gaze positions but also gaze positions in the near future and can achieve better performance than prior method. In terms of realtime prediction, DGaze achieves a 22.0% improvement over prior method in dynamic scenes and obtains an improvement of 9.5% in static scenes, based on using the angular distance as the evaluation metric. We also propose a variant of our model called DGaze_ET that can be used to predict future gaze positions with higher precision by combining accurate past gaze data gathered using an eye tracker. We further analyze our CNN architecture and verify the effectiveness of each component in our model. We apply DGaze to gaze-contingent rendering and a game, and also present the evaluation results from a user study.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We conduct novel analyses of users' gaze behaviors in dynamic virtual scenes and, based on our analyses, we present a novel CNN-based model called DGaze for gaze prediction in HMD-based applications. We first collect 43 users' eye tracking data in 5 dynamic scenes under free-viewing conditions. Next, we perform statistical analysis of our data and observe that dynamic object positions, head rotation velocities, and salient regions are correlated with users' gaze positions. Based on our analysis, we present a CNN-based model (DGaze) that combines object position sequence, head velocity sequence, and saliency features to predict users' gaze positions. Our model can be applied to predict not only realtime gaze positions but also gaze positions in the near future and can achieve better performance than prior method. In terms of realtime prediction, DGaze achieves a 22.0% improvement over prior method in dynamic scenes and obtains an improvement of 9.5% in static scenes, based on using the angular distance as the evaluation metric. We also propose a variant of our model called DGaze_ET that can be used to predict future gaze positions with higher precision by combining accurate past gaze data gathered using an eye tracker. We further analyze our CNN architecture and verify the effectiveness of each component in our model. We apply DGaze to gaze-contingent rendering and a game, and also present the evaluation results from a user study.", "title": "DGaze: CNN-Based Gaze Prediction in Dynamic Scenes", "normalizedTitle": "DGaze: CNN-Based Gaze Prediction in Dynamic Scenes", "fno": "08998375", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Convolutional Neural Nets", "Feature Extraction", "Gaze Tracking", "Helmet Mounted Displays", "Human Computer Interaction", "Rendering Computer Graphics", "Statistical Analysis", "Virtual Reality", "Dynamic Virtual Scenes", "HMD Based Applications", "Dynamic Object Positions", "Head Rotation Velocities", "Object Position Sequence", "Head Velocity Sequence", "Realtime Gaze Positions", "D Gaze ET", "CNN Architecture", "Gaze Contingent Rendering", "CNN Based Gaze Prediction Model", "Users Eye Tracking Data", "Statistical Analysis", "Helmet Mounted Displays", "Predictive Models", "Gaze Tracking", "Solid Modeling", "Head", "Analytical Models", "Data Models", "Rendering Computer Graphics", "Gaze Prediction", "Convolutional Neural Network", "Eye Tracking", "Dynamic Scene", "Gaze Contingent Rendering", "Virtual Reality" ], "authors": [ { "givenName": "Zhiming", "surname": "Hu", "fullName": "Zhiming Hu", "affiliation": "Peking University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Sheng", "surname": "Li", "fullName": "Sheng Li", "affiliation": "Peking University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Congyi", "surname": "Zhang", "fullName": "Congyi Zhang", "affiliation": "Peking University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Kangrui", "surname": "Yi", "fullName": "Kangrui Yi", "affiliation": "Peking University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guoping", "surname": "Wang", "fullName": "Guoping Wang", "affiliation": "Peking University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Dinesh", "surname": "Manocha", "fullName": "Dinesh Manocha", "affiliation": "University of Maryland, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1902-1911", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vs-games/2017/5812/0/08056614", "title": "Serious gaze", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056614/12OmNwDACge", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a131", "title": "A Simplified 3D Gaze Tracking Technology with Stereo Vision", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a131/12OmNwqft0F", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d162", "title": "Monocular Free-Head 3D Gaze Tracking with Deep Learning and Geometry Constraints", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d162/12OmNxbmSBT", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2017/1235/0/08457962", "title": "Gaze Tracking in 3D Space with a Convolution Neural Network “See What I See”", "doi": null, "abstractUrl": "/proceedings-article/aipr/2017/08457962/13xI8AAc3rr", "parentPublication": { "id": "proceedings/aipr/2017/1235/0", "title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f333", "title": "Gaze Prediction in Dynamic 360° Immersive Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f333/17D45VW8brT", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000c237", "title": "Light-Weight Head Pose Invariant Gaze Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643434", "title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090417", "title": "Gaze Analysis and Prediction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090417/1jIxqNN9Xqw", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom/2021/0418/0/09439113", "title": "GAZEL: Runtime Gaze Tracking for Smartphones", "doi": null, "abstractUrl": "/proceedings-article/percom/2021/09439113/1tTtABGgVJS", "parentPublication": { "id": "proceedings/percom/2021/0418/0", "title": "2021 IEEE International Conference on Pervasive Computing and Communications (PerCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998145", "articleId": "1hpPCGSeWXu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998401", "articleId": "1hrXgAAK6NW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgC4MvmUg", "name": "ttg202005-08998375s1-supp1-2973473.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998375s1-supp1-2973473.mp4", "extension": "mp4", "size": "107 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXgAAK6NW", "doi": "10.1109/TVCG.2020.2973059", "abstract": "Directivity and gain in microphone array systems for hearing aids or hearable devices allow users to acoustically enhance the information of a source of interest. This source is usually positioned directly in front. This feature is called acoustic beamforming. The current study aimed to improve users' interactions with beamforming via a virtual prototyping approach in immersive virtual environments (VEs). Eighteen participants took part in experimental sessions composed of a calibration procedure and a selective auditory attention voice-pairing task. Eight concurrent speakers were placed in an anechoic environment in two virtual reality (VR) scenarios. The scenarios were a purely virtual scenario and a realistic 360&#x00B0; audio-visual recording. Participants were asked to find an individual optimal parameterization for three different virtual beamformers: (i) head-guided, (ii) eye gaze-guided, and (iii) a novel interaction technique called dual beamformer, where head-guided is combined with an additional hand-guided beamformer. None of the participants were able to complete the task without a virtual beamformer (i.e., in normal hearing condition) due to the high complexity introduced by the experimental design. However, participants were able to correctly pair all speakers using all three proposed interaction metaphors. Providing superhuman hearing abilities in the form of a dual acoustic beamformer guided by head and hand movements resulted in statistically significant improvements in terms of pairing time, suggesting the task-relevance of interacting with multiple points of interests.", "abstracts": [ { "abstractType": "Regular", "content": "Directivity and gain in microphone array systems for hearing aids or hearable devices allow users to acoustically enhance the information of a source of interest. This source is usually positioned directly in front. This feature is called acoustic beamforming. The current study aimed to improve users' interactions with beamforming via a virtual prototyping approach in immersive virtual environments (VEs). Eighteen participants took part in experimental sessions composed of a calibration procedure and a selective auditory attention voice-pairing task. Eight concurrent speakers were placed in an anechoic environment in two virtual reality (VR) scenarios. The scenarios were a purely virtual scenario and a realistic 360&#x00B0; audio-visual recording. Participants were asked to find an individual optimal parameterization for three different virtual beamformers: (i) head-guided, (ii) eye gaze-guided, and (iii) a novel interaction technique called dual beamformer, where head-guided is combined with an additional hand-guided beamformer. None of the participants were able to complete the task without a virtual beamformer (i.e., in normal hearing condition) due to the high complexity introduced by the experimental design. However, participants were able to correctly pair all speakers using all three proposed interaction metaphors. Providing superhuman hearing abilities in the form of a dual acoustic beamformer guided by head and hand movements resulted in statistically significant improvements in terms of pairing time, suggesting the task-relevance of interacting with multiple points of interests.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Directivity and gain in microphone array systems for hearing aids or hearable devices allow users to acoustically enhance the information of a source of interest. This source is usually positioned directly in front. This feature is called acoustic beamforming. The current study aimed to improve users' interactions with beamforming via a virtual prototyping approach in immersive virtual environments (VEs). Eighteen participants took part in experimental sessions composed of a calibration procedure and a selective auditory attention voice-pairing task. Eight concurrent speakers were placed in an anechoic environment in two virtual reality (VR) scenarios. The scenarios were a purely virtual scenario and a realistic 360° audio-visual recording. Participants were asked to find an individual optimal parameterization for three different virtual beamformers: (i) head-guided, (ii) eye gaze-guided, and (iii) a novel interaction technique called dual beamformer, where head-guided is combined with an additional hand-guided beamformer. None of the participants were able to complete the task without a virtual beamformer (i.e., in normal hearing condition) due to the high complexity introduced by the experimental design. However, participants were able to correctly pair all speakers using all three proposed interaction metaphors. Providing superhuman hearing abilities in the form of a dual acoustic beamformer guided by head and hand movements resulted in statistically significant improvements in terms of pairing time, suggesting the task-relevance of interacting with multiple points of interests.", "title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming", "normalizedTitle": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming", "fno": "08998401", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Acoustic Signal Processing", "Array Signal Processing", "Calibration", "Gaze Tracking", "Handicapped Aids", "Hearing Aids", "Microphone Arrays", "Speech Processing", "Statistical Analysis", "Virtual Reality", "Statistical Significance", "Calibration Procedure", "Superhuman Hearing Abilities", "Eye Gaze Guided Beamformer", "Hand Guided Beamformer", "Interaction Metaphors", "Virtual Beamformer", "Realistic 360 X 00 B 0 Audio Visual Recording", "Virtual Reality Scenarios", "Anechoic Environment", "Concurrent Speakers", "Selective Auditory Attention Voice Pairing Task", "Immersive Virtual Environments", "Virtual Prototyping Approach", "Hearable Devices", "Hearing Aids", "Microphone Array Systems", "Acoustic Beamforming", "Artificial Hearing", "Auditory System", "Array Signal Processing", "Hearing Aids", "Acoustics", "Task Analysis", "Microphones", "Ear", "Virtual Prototyping", "Sonic Interactions", "Acoustic Beamforming", "Artificial Hearing", "Virtual Reality", "Multi Speaker Scenario" ], "authors": [ { "givenName": "Michele", "surname": "Geronazzo", "fullName": "Michele Geronazzo", "affiliation": "Department of Architecture, Design, and Media Technology, Aalborg University Copenhagen, Copenhagen, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Luis S.", "surname": "Vieira", "fullName": "Luis S. Vieira", "affiliation": "Khora Virtual Reality, Copenhagen, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Niels Christian", "surname": "Nilsson", "fullName": "Niels Christian Nilsson", "affiliation": "Department of Architecture, Design, and Media Technology, Aalborg University Copenhagen, Copenhagen, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Jesper", "surname": "Udesen", "fullName": "Jesper Udesen", "affiliation": "GN Audio A/S, Ballerup, Denmark", "__typename": "ArticleAuthorType" }, { "givenName": "Stefania", "surname": "Serafin", "fullName": "Stefania Serafin", "affiliation": "Department of Architecture, Design, and Media Technology, Aalborg University Copenhagen, Copenhagen, Denmark", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1912-1922", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/csci/2016/5510/0/07881327", "title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip", "doi": null, "abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM", "parentPublication": { "id": "proceedings/csci/2016/5510/0", "title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/4/01326750", "title": "A real time implementation and an evaluation of an optimal filtering technique for noise reduction in dual microphone hearing aids", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326750/12OmNzUPpsJ", "parentPublication": { "id": "proceedings/icassp/2004/8484/4", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446298", "title": "3D Tune-In: 3D-Games for Tuning and Learning About Hearing Aids", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446298/13bd1ftOBDg", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sive/2018/5713/0/08577076", "title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations", "doi": null, "abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f", "parentPublication": { "id": "proceedings/sive/2018/5713/0", "title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2018/9385/0/938500a296", "title": "Big Data Analytics in Healthcare: Design and Implementation for a Hearing Aid Case Study", "doi": null, "abstractUrl": "/proceedings-article/sitis/2018/938500a296/19RSs2R4Q5a", "parentPublication": { "id": "proceedings/sitis/2018/9385/0", "title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cncit/2022/5296/0/529600a014", "title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation", "doi": null, "abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76", "parentPublication": { "id": "proceedings/cncit/2022/5296/0", "title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a308", "title": "Predicting and Explaining Hearing Aid Usage Using Encoder-Decoder with Attention Mechanism and SHAP", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a308/1MeoG3BDZ7y", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2019/3851/0/385100a307", "title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data", "doi": null, "abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi", "parentPublication": { "id": "proceedings/services/2019/3851/2642-939X", "title": "2019 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sera/2019/0798/0/08886796", "title": "Identification of Difficult English Words for Assisting Hearing Impaired Children in Learning Language", "doi": null, "abstractUrl": "/proceedings-article/sera/2019/08886796/1ezRz8Cf3MI", "parentPublication": { "id": "proceedings/sera/2019/0798/0", "title": "2019 IEEE 17th International Conference on Software Engineering Research, Management and Applications (SERA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2020/2261/0/226100a241", "title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal", "doi": null, "abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu", "parentPublication": { "id": "proceedings/icise/2020/2261/0", "title": "2020 International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998375", "articleId": "1hpPBdSWXTi", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998353", "articleId": "1hpPDKs9c7C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPDKs9c7C", "doi": "10.1109/TVCG.2020.2973065", "abstract": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360&#x00B0; video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "abstracts": [ { "abstractType": "Regular", "content": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360&#x00B0; video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360° video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration", "normalizedTitle": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration", "fno": "08998353", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Avatars", "Groupware", "Rendering Computer Graphics", "High Fidelity Telecollaboration", "Spatial References", "Remote Collaboration", "AVT", "High Fidelity Telepresence", "Remote VR User", "Real World Collaboration Space", "Local AR User", "Omni Directional Scenes", "Remote VR Traveler", "VR Headset Experiences", "Real Time Rendering", "3 D Avatar", "Mixed Reality Use Case Scenarios", "Mixed Reality Collaboration Space", "Augmented Virtual Teleportation", "Spatial Presence", "Virtual Objects", "MRC Space", "Collaboration", "Visualization", "Avatars", "Telepresence", "Three Dimensional Displays", "Teleportation", "Telepresence", "Collaboration", "Real Time", "Mixed Reality", "360 Panoramic Video" ], "authors": [ { "givenName": "Taehyun", "surname": "Rhee", "fullName": "Taehyun Rhee", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Thompson", "fullName": "Stephen Thompson", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Medeiros", "fullName": "Daniel Medeiros", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Rafael", "surname": "dos Anjos", "fullName": "Rafael dos Anjos", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Chalmers", "fullName": "Andrew Chalmers", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1923-1933", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892290", "title": "Asymetric telecollaboration in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892290/12OmNwDACwE", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446561", "title": "Augmented VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446561/13bd1eSlysy", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a165", "title": "Effects of Sharing Real-Time Multi-Sensory Heart Rate Feedback in Different Immersive Collaborative Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a165/17D45VTRov4", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a956", "title": "[DC]Using Multimodal Input in Augmented Virtual Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a956/1CJcYgs1MY0", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a238", "title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a940", "title": "[DC] Improving Multi-User Interaction for Mixed Reality Telecollaboration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a940/1CJelpv0Txm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797719", "title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a022", "title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a377", "title": "Multisensory Teleportation in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a377/1tnXGQKSUPm", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a278", "title": "In Touch with Everyday Objects: Teleportation Techniques in Virtual Environments Supporting Tangibility", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a278/1tnXjaZXiw0", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998401", "articleId": "1hrXgAAK6NW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998348", "articleId": "1hrXedrZXos", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfOM15BqU", "name": "ttg202005-08998353s1-supp1-2973065.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998353s1-supp1-2973065.mp4", "extension": "mp4", "size": "207 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXedrZXos", "doi": "10.1109/TVCG.2020.2973054", "abstract": "Human gaze awareness is important for social and collaborative interactions. Recent technological advances in augmented reality (AR) displays and sensors provide us with the means to extend collaborative spaces with real-time dynamic AR indicators of one's gaze, for example via three-dimensional cursors or rays emanating from a partner's head. However, such gaze cues are only as useful as the quality of the underlying gaze estimation and the accuracy of the display mechanism. Depending on the type of the visualization, and the characteristics of the errors, AR gaze cues could either enhance or interfere with collaborations. In this paper, we present two human-subject studies in which we investigate the influence of angular and depth errors, target distance, and the type of gaze visualization on participants' performance and subjective evaluation during a collaborative task with a virtual human partner, where participants identified targets within a dynamically walking crowd. First, our results show that there is a significant difference in performance for the two gaze visualizations ray and cursor in conditions with simulated angular and depth errors: the ray visualization provided significantly faster response times and fewer errors compared to the cursor visualization. Second, our results show that under optimal conditions, among four different gaze visualization methods, a ray without depth information provides the worst performance and is rated lowest, while a combination of a ray and cursor with depth information is rated highest. We discuss the subjective and objective performance thresholds and provide guidelines for practitioners in this field.", "abstracts": [ { "abstractType": "Regular", "content": "Human gaze awareness is important for social and collaborative interactions. Recent technological advances in augmented reality (AR) displays and sensors provide us with the means to extend collaborative spaces with real-time dynamic AR indicators of one's gaze, for example via three-dimensional cursors or rays emanating from a partner's head. However, such gaze cues are only as useful as the quality of the underlying gaze estimation and the accuracy of the display mechanism. Depending on the type of the visualization, and the characteristics of the errors, AR gaze cues could either enhance or interfere with collaborations. In this paper, we present two human-subject studies in which we investigate the influence of angular and depth errors, target distance, and the type of gaze visualization on participants' performance and subjective evaluation during a collaborative task with a virtual human partner, where participants identified targets within a dynamically walking crowd. First, our results show that there is a significant difference in performance for the two gaze visualizations ray and cursor in conditions with simulated angular and depth errors: the ray visualization provided significantly faster response times and fewer errors compared to the cursor visualization. Second, our results show that under optimal conditions, among four different gaze visualization methods, a ray without depth information provides the worst performance and is rated lowest, while a combination of a ray and cursor with depth information is rated highest. We discuss the subjective and objective performance thresholds and provide guidelines for practitioners in this field.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human gaze awareness is important for social and collaborative interactions. Recent technological advances in augmented reality (AR) displays and sensors provide us with the means to extend collaborative spaces with real-time dynamic AR indicators of one's gaze, for example via three-dimensional cursors or rays emanating from a partner's head. However, such gaze cues are only as useful as the quality of the underlying gaze estimation and the accuracy of the display mechanism. Depending on the type of the visualization, and the characteristics of the errors, AR gaze cues could either enhance or interfere with collaborations. In this paper, we present two human-subject studies in which we investigate the influence of angular and depth errors, target distance, and the type of gaze visualization on participants' performance and subjective evaluation during a collaborative task with a virtual human partner, where participants identified targets within a dynamically walking crowd. First, our results show that there is a significant difference in performance for the two gaze visualizations ray and cursor in conditions with simulated angular and depth errors: the ray visualization provided significantly faster response times and fewer errors compared to the cursor visualization. Second, our results show that under optimal conditions, among four different gaze visualization methods, a ray without depth information provides the worst performance and is rated lowest, while a combination of a ray and cursor with depth information is rated highest. We discuss the subjective and objective performance thresholds and provide guidelines for practitioners in this field.", "title": "Effects of Depth Information on Visual Target Identification Task Performance in Shared Gaze Environments", "normalizedTitle": "Effects of Depth Information on Visual Target Identification Task Performance in Shared Gaze Environments", "fno": "08998348", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Cognition", "Data Visualisation", "Groupware", "Human Computer Interaction", "Human Factors", "Neurophysiology", "Visual Perception", "Fewer Errors", "Cursor Visualization", "Subjective Performance Thresholds", "Objective Performance Thresholds", "Gaze Visualization Methods", "Faster Response Times", "Ray Visualization", "Simulated Angular Depth Errors", "Gaze Visualizations Ray", "Dynamically Walking Crowd", "Virtual Human Partner", "Collaborative Task", "Subjective Evaluation", "Target Distance", "Human Subject Studies", "Collaborations", "AR Gaze Cues", "Display Mechanism", "Underlying Gaze Estimation", "Three Dimensional Cursors", "Real Time Dynamic AR Indicators", "Collaborative Spaces", "Sensors", "Collaborative Interactions", "Social Interactions", "Human Gaze Awareness", "Shared Gaze Environments", "Visual Target Identification Task Performance", "Depth Information", "Visualization", "Task Analysis", "Collaboration", "Three Dimensional Displays", "Real Time Systems", "Gaze Tracking", "Augmented Reality", "Shared Gaze", "Augmented Reality", "Depth Error", "Gaze Visualization", "Performance Measures" ], "authors": [ { "givenName": "Austin", "surname": "Erickson", "fullName": "Austin Erickson", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Nahal", "surname": "Norouzi", "fullName": "Nahal Norouzi", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Kangsoo", "surname": "Kim", "fullName": "Kangsoo Kim", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Joseph J.", "surname": "LaViola", "fullName": "Joseph J. LaViola", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Gerd", "surname": "Bruder", "fullName": "Gerd Bruder", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Gregory F.", "surname": "Welch", "fullName": "Gregory F. Welch", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1934-1944", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icat/2007/3056/0/30560280", "title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System", "doi": null, "abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv", "parentPublication": { "id": "proceedings/icat/2007/3056/0", "title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a788", "title": "Depth Compensation Model for Gaze Estimation in Sport Analysis", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a788/12OmNz2C1or", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wkdd/2009/3543/0/3543a594", "title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn", "parentPublication": { "id": "proceedings/wkdd/2009/3543/0", "title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/02/07414495", "title": "Fauxvea: Crowdsourcing Gaze Location Estimates for Visualization Analysis Tasks", "doi": null, "abstractUrl": "/journal/tg/2017/02/07414495/13rRUwInvyE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539334", "title": "GazeDx: Interactive Visual Analytics Framework for Comparative Gaze Analysis with Volumetric Medical Images", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539334/13rRUxjQyvp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a310", "title": "My Eyes Hurt: Effects of Jitter in 3D Gaze Tracking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a310/1CJdbzCNHUc", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a787", "title": "VRDoc: Gaze-based Interactions for VR Reading Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798273", "title": "Required Accuracy of Gaze Tracking for Varifocal Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798273/1cJ0T4CUJTq", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a117", "title": "Optical Gaze Tracking with Spatially-Sparse Single-Pixel Detectors", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a117/1pyswxBB73y", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a127", "title": "Gaze-Adaptive Subtitles Considering the Balance among Vertical/Horizontal and Depth of Eye Movement", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a127/1yeQQvE3OQo", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998353", "articleId": "1hpPDKs9c7C", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998141", "articleId": "1hrXeCqZ0cw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXeCqZ0cw", "doi": "10.1109/TVCG.2020.2973498", "abstract": "A common goal of human-subject experiments in virtual reality (VR) research is evaluating VR hardware and software for use by the general public. A core principle of human-subject research is that the sample included in a given study should be representative of the target population; otherwise, the conclusions drawn from the findings may be biased and may not generalize to the population of interest. In order to assess whether characteristics of participants in VR research are representative of the general public, we investigated participant demographic characteristics from human-subject experiments in the Proceedings of the IEEE Virtual Reality Conferences from 2015-2019. We also assessed the representation of female authors. In the 325 eligible manuscripts, which presented results from 365 human-subject experiments, we found evidence of significant underrepresentation of women as both participants and authors. To investigate whether this underrepresentation may bias researchers' findings, we then conducted a meta-analysis and meta-regression to assess whether demographic characteristics of study participants were associated with a common outcome evaluated in VR research: the change in simulator sickness following head-mounted display VR exposure. As expected, participants in VR studies using HMDs experienced small but significant increases in simulator sickness. However, across the included studies, the change in simulator sickness was systematically associated with the proportion of female participants. We discuss the negative implications of conducting experiments on non-representative samples and provide methodological recommendations for mitigating bias in future VR research.", "abstracts": [ { "abstractType": "Regular", "content": "A common goal of human-subject experiments in virtual reality (VR) research is evaluating VR hardware and software for use by the general public. A core principle of human-subject research is that the sample included in a given study should be representative of the target population; otherwise, the conclusions drawn from the findings may be biased and may not generalize to the population of interest. In order to assess whether characteristics of participants in VR research are representative of the general public, we investigated participant demographic characteristics from human-subject experiments in the Proceedings of the IEEE Virtual Reality Conferences from 2015-2019. We also assessed the representation of female authors. In the 325 eligible manuscripts, which presented results from 365 human-subject experiments, we found evidence of significant underrepresentation of women as both participants and authors. To investigate whether this underrepresentation may bias researchers' findings, we then conducted a meta-analysis and meta-regression to assess whether demographic characteristics of study participants were associated with a common outcome evaluated in VR research: the change in simulator sickness following head-mounted display VR exposure. As expected, participants in VR studies using HMDs experienced small but significant increases in simulator sickness. However, across the included studies, the change in simulator sickness was systematically associated with the proportion of female participants. We discuss the negative implications of conducting experiments on non-representative samples and provide methodological recommendations for mitigating bias in future VR research.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A common goal of human-subject experiments in virtual reality (VR) research is evaluating VR hardware and software for use by the general public. A core principle of human-subject research is that the sample included in a given study should be representative of the target population; otherwise, the conclusions drawn from the findings may be biased and may not generalize to the population of interest. In order to assess whether characteristics of participants in VR research are representative of the general public, we investigated participant demographic characteristics from human-subject experiments in the Proceedings of the IEEE Virtual Reality Conferences from 2015-2019. We also assessed the representation of female authors. In the 325 eligible manuscripts, which presented results from 365 human-subject experiments, we found evidence of significant underrepresentation of women as both participants and authors. To investigate whether this underrepresentation may bias researchers' findings, we then conducted a meta-analysis and meta-regression to assess whether demographic characteristics of study participants were associated with a common outcome evaluated in VR research: the change in simulator sickness following head-mounted display VR exposure. As expected, participants in VR studies using HMDs experienced small but significant increases in simulator sickness. However, across the included studies, the change in simulator sickness was systematically associated with the proportion of female participants. We discuss the negative implications of conducting experiments on non-representative samples and provide methodological recommendations for mitigating bias in future VR research.", "title": "Mind the Gap: The Underrepresentation of Female Participants and Authors in Virtual Reality Research", "normalizedTitle": "Mind the Gap: The Underrepresentation of Female Participants and Authors in Virtual Reality Research", "fno": "08998141", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Gender Issues", "Helmet Mounted Displays", "Virtual Reality", "IEEE Virtual Reality Conferences", "Simulator Sickness", "Head Mounted Display VR Exposure", "Female Participants", "Virtual Reality Research", "Sociology", "Statistics", "Virtual Reality", "Conferences", "Hardware", "Software", "Task Analysis", "Virtual Reality", "Gender", "Research Methods", "Meta Analysis", "Simulator Sickness", "Bias" ], "authors": [ { "givenName": "Tabitha C.", "surname": "Peck", "fullName": "Tabitha C. Peck", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Laura E.", "surname": "Sockol", "fullName": "Laura E. Sockol", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Sarah M.", "surname": "Hancock", "fullName": "Sarah M. Hancock", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1945-1954", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fie/2011/468/0/06143035", "title": "Problematizations of women's underrepresentation: Comparing educator interviews with the literature", "doi": null, "abstractUrl": "/proceedings-article/fie/2011/06143035/12OmNCfjeoS", "parentPublication": { "id": "proceedings/fie/2011/468/0", "title": "2011 Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2022/02/09779506", "title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset", "doi": null, "abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798158", "title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090580", "title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090490", "title": "Evaluation of Simulator Sickness for 360&#x00B0; Videos on an HMD Subject to Participants&#x2019; Experience with Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09124686", "title": "Stimulus Sampling With 360-Videos: Examining Head Movements, Arousal, Presence, Simulator Sickness, and Preference on a Large Sample of Participants and Videos", "doi": null, "abstractUrl": "/journal/ta/2022/03/09124686/1kVbwGkgqYg", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a735", "title": "[DC] Towards Universal VR Sickness Mitigation Strategies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a687", "title": "Who Are Virtual Reality Headset Owners? A Survey and Comparison of Headset Owners and Non-Owners", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a687/1tuB6Ibu8j6", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a426", "title": "Evaluation of Height and Speed Effects on the Comfort of VR Motion Picture Display", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a426/1ziPjdcz2Mg", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998348", "articleId": "1hrXedrZXos", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998570", "articleId": "1hx2DxYanDy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hx2DxYanDy", "doi": "10.1109/TVCG.2020.2973060", "abstract": "Redirected Walking (RDW) steering algorithms have traditionally relied on human-engineered logic. However, recent advances in reinforcement learning (RL) have produced systems that surpass human performance on a variety of control tasks. This paper investigates the potential of using RL to develop a novel reactive steering algorithm for RDW. Our approach uses RL to train a deep neural network that directly prescribes the rotation, translation, and curvature gains to transform a virtual environment given a user's position and orientation in the tracked space. We compare our learned algorithm to steer-to-center using simulated and real paths. We found that our algorithm outperforms steer-to-center on simulated paths, and found no significant difference on distance traveled on real paths. We demonstrate that when modeled as a continuous control problem, RDW is a suitable domain for RL, and moving forward, our general framework provides a promising path towards an optimal RDW steering algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "Redirected Walking (RDW) steering algorithms have traditionally relied on human-engineered logic. However, recent advances in reinforcement learning (RL) have produced systems that surpass human performance on a variety of control tasks. This paper investigates the potential of using RL to develop a novel reactive steering algorithm for RDW. Our approach uses RL to train a deep neural network that directly prescribes the rotation, translation, and curvature gains to transform a virtual environment given a user's position and orientation in the tracked space. We compare our learned algorithm to steer-to-center using simulated and real paths. We found that our algorithm outperforms steer-to-center on simulated paths, and found no significant difference on distance traveled on real paths. We demonstrate that when modeled as a continuous control problem, RDW is a suitable domain for RL, and moving forward, our general framework provides a promising path towards an optimal RDW steering algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Redirected Walking (RDW) steering algorithms have traditionally relied on human-engineered logic. However, recent advances in reinforcement learning (RL) have produced systems that surpass human performance on a variety of control tasks. This paper investigates the potential of using RL to develop a novel reactive steering algorithm for RDW. Our approach uses RL to train a deep neural network that directly prescribes the rotation, translation, and curvature gains to transform a virtual environment given a user's position and orientation in the tracked space. We compare our learned algorithm to steer-to-center using simulated and real paths. We found that our algorithm outperforms steer-to-center on simulated paths, and found no significant difference on distance traveled on real paths. We demonstrate that when modeled as a continuous control problem, RDW is a suitable domain for RL, and moving forward, our general framework provides a promising path towards an optimal RDW steering algorithm.", "title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning", "normalizedTitle": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning", "fno": "08998570", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Learning Artificial Intelligence", "Neural Nets", "Virtual Reality", "RL", "Reactive Steering Algorithm", "Deep Neural Network", "Learned Algorithm", "Steer To Center", "Simulated Paths", "Optimal RDW Steering Algorithm", "Reinforcement Learning", "Human Engineered Logic", "Human Performance", "Control Tasks", "Redirected Walking Steering Algorithms", "Legged Locomotion", "Learning Artificial Intelligence", "Prediction Algorithms", "Meters", "Tracking", "Heuristic Algorithms", "Space Exploration", "Virtual Reality", "Locomotion", "Redirected Walking", "Steering Algorithms", "Reinforcement Learning" ], "authors": [ { "givenName": "Ryan R.", "surname": "Strauss", "fullName": "Ryan R. Strauss", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Raghuram", "surname": "Ramanujan", "fullName": "Raghuram Ramanujan", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Becker", "fullName": "Andrew Becker", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Tabitha C.", "surname": "Peck", "fullName": "Tabitha C. Peck", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1955-1963", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892279", "title": "Curvature gains in redirected walking: A closer look", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892373", "title": "Application of redirected walking in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/11/ttg2013111872", "title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques", "doi": null, "abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a524", "title": "The Chaotic Behavior of Redirection &#x2013; Revisiting Simulations in Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a922", "title": "Robust Redirected Walking in the Wild", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10049511", "title": "Redirected Walking On Omnidirectional Treadmill", "doi": null, "abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798121", "title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797818", "title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089532", "title": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089532/1jIx7m6wYKc", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a201", "title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998141", "articleId": "1hrXeCqZ0cw", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998371", "articleId": "1hrXiia6v9C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXiia6v9C", "doi": "10.1109/TVCG.2020.2973061", "abstract": "Understanding the effects of hand proximity to objects and tasks is critical for hand-held and near-hand objects. Even though self-avatars have been shown to be beneficial for various tasks in virtual environments, little research has investigated the effect of avatar hand proximity on working memory. This paper presents a between-participants user study investigating the effects of self-avatars and physical hand proximity on a common working memory task, the Stroop interference task. Results show that participants felt embodied when a self-avatar was in the scene, and that the subjective level of embodiment decreased when a participant's hands were not collocated with the avatar's hands. Furthermore, a participant's physical hand placement was significantly related to Stroop interference: proximal hands produced a significant increase in accuracy compared to non-proximal hands. Surprisingly, Stroop interference was not mediated by the existence of a self-avatar or level of embodiment.", "abstracts": [ { "abstractType": "Regular", "content": "Understanding the effects of hand proximity to objects and tasks is critical for hand-held and near-hand objects. Even though self-avatars have been shown to be beneficial for various tasks in virtual environments, little research has investigated the effect of avatar hand proximity on working memory. This paper presents a between-participants user study investigating the effects of self-avatars and physical hand proximity on a common working memory task, the Stroop interference task. Results show that participants felt embodied when a self-avatar was in the scene, and that the subjective level of embodiment decreased when a participant's hands were not collocated with the avatar's hands. Furthermore, a participant's physical hand placement was significantly related to Stroop interference: proximal hands produced a significant increase in accuracy compared to non-proximal hands. Surprisingly, Stroop interference was not mediated by the existence of a self-avatar or level of embodiment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Understanding the effects of hand proximity to objects and tasks is critical for hand-held and near-hand objects. Even though self-avatars have been shown to be beneficial for various tasks in virtual environments, little research has investigated the effect of avatar hand proximity on working memory. This paper presents a between-participants user study investigating the effects of self-avatars and physical hand proximity on a common working memory task, the Stroop interference task. Results show that participants felt embodied when a self-avatar was in the scene, and that the subjective level of embodiment decreased when a participant's hands were not collocated with the avatar's hands. Furthermore, a participant's physical hand placement was significantly related to Stroop interference: proximal hands produced a significant increase in accuracy compared to non-proximal hands. Surprisingly, Stroop interference was not mediated by the existence of a self-avatar or level of embodiment.", "title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference", "normalizedTitle": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference", "fno": "08998371", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Cognition", "Neurophysiology", "Object Detection", "Hand Collocation", "Hand Held Objects", "Near Hand Objects", "Physical Hand Proximity", "Working Memory Task", "Stroop Interference Task", "Nonproximal Hands", "Virtual Environments", "Participant Physical Hand Placement", "Self Avatar Hand Proximity", "Task Analysis", "Avatars", "Interference", "Visualization", "Cognition", "Sensors", "Resists", "Virtual Reality", "Avatars", "Embodiment", "User Studies", "Cognition", "Stroop Interference Test" ], "authors": [ { "givenName": "Tabitha C.", "surname": "Peck", "fullName": "Tabitha C. Peck", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" }, { "givenName": "Altan", "surname": "Tutar", "fullName": "Altan Tutar", "affiliation": "Davidson College", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1964-1971", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223377", "title": "Avatar embodiment realism and virtual fitness training", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504689", "title": "The impact of a self-avatar on cognitive load in immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504689/12OmNviHKla", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892278", "title": "Bodiless embodiment: A descriptive survey of avatar bodily coherence in first-wave consumer VR applications", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892278/12OmNvnwVj4", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010106", "title": "Using Postural Synergies to Animate a Low-Dimensional Hand Avatar in Haptic Simulation", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010106/13rRUwInv4z", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798044", "title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797797", "title": "Working Memory Load Performance Based on Collocation of Virtual and Physical Hands", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797797/1cJ1gejsg2Q", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/07/08952604", "title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements", "doi": null, "abstractUrl": "/journal/tg/2021/07/08952604/1gqqhpSZ19S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a054", "title": "The Effects of Body Tracking Fidelity on Embodiment of an Inverse-Kinematic Avatar for Male Participants", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a054/1pyswgi4b7y", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a455", "title": "Correction of Avatar Hand Movements Supports Learning of a Motor Skill", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a455/1tuBfJZ11HG", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998570", "articleId": "1hx2DxYanDy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09005240", "articleId": "1hzNcOce8OQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hzNcOce8OQ", "doi": "10.1109/TVCG.2020.2973442", "abstract": "Optimizing rendering performance is critical for a wide variety of virtual reality (VR) applications. Foveated rendering is emerging as an indispensable technique for reconciling interactive frame rates with ever-higher head-mounted display resolutions. Here, we present a simple yet effective technique for further reducing the cost of foveated rendering by leveraging ocular dominance - the tendency of the human visual system to prefer scene perception from one eye over the other. Our new approach, eye-dominance-guided foveated rendering (EFR), renders the scene at a lower foveation level (with higher detail) for the dominant eye than the non-dominant eye. Compared with traditional foveated rendering, EFR can be expected to provide superior rendering performance while preserving the same level of perceived visual quality.", "abstracts": [ { "abstractType": "Regular", "content": "Optimizing rendering performance is critical for a wide variety of virtual reality (VR) applications. Foveated rendering is emerging as an indispensable technique for reconciling interactive frame rates with ever-higher head-mounted display resolutions. Here, we present a simple yet effective technique for further reducing the cost of foveated rendering by leveraging ocular dominance - the tendency of the human visual system to prefer scene perception from one eye over the other. Our new approach, eye-dominance-guided foveated rendering (EFR), renders the scene at a lower foveation level (with higher detail) for the dominant eye than the non-dominant eye. Compared with traditional foveated rendering, EFR can be expected to provide superior rendering performance while preserving the same level of perceived visual quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Optimizing rendering performance is critical for a wide variety of virtual reality (VR) applications. Foveated rendering is emerging as an indispensable technique for reconciling interactive frame rates with ever-higher head-mounted display resolutions. Here, we present a simple yet effective technique for further reducing the cost of foveated rendering by leveraging ocular dominance - the tendency of the human visual system to prefer scene perception from one eye over the other. Our new approach, eye-dominance-guided foveated rendering (EFR), renders the scene at a lower foveation level (with higher detail) for the dominant eye than the non-dominant eye. Compared with traditional foveated rendering, EFR can be expected to provide superior rendering performance while preserving the same level of perceived visual quality.", "title": "Eye-dominance-guided Foveated Rendering", "normalizedTitle": "Eye-dominance-guided Foveated Rendering", "fno": "09005240", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Gaze Tracking", "Helmet Mounted Displays", "Rendering Computer Graphics", "Virtual Reality", "Visual Perception", "Eye Dominance Guided Foveated Rendering", "Virtual Reality Applications", "Ever Higher Head Mounted Display Resolutions", "Ocular Dominance", "Nondominant Eye", "Human Visual System", "Scene Perception", "EFR", "Perceived Visual Quality", "Rendering Computer Graphics", "Visualization", "Kernel", "Visual Systems", "Pipelines", "Sensitivity", "Solid Modeling", "Virtual Reality", "Foveated Rendering", "Perception", "Gaze Contingent Rendering", "Ocular Dominance", "Eye Tracking" ], "authors": [ { "givenName": "Xiaoxu", "surname": "Meng", "fullName": "Xiaoxu Meng", "affiliation": "University of Maryland, College Park", "__typename": "ArticleAuthorType" }, { "givenName": "Ruofei", "surname": "Du", "fullName": "Ruofei Du", "affiliation": "Google LLC.", "__typename": "ArticleAuthorType" }, { "givenName": "Amitabh", "surname": "Varshney", "fullName": "Amitabh Varshney", "affiliation": "University of Maryland, College Park", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1972-1980", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/etvis/2016/4731/0/07851170", "title": "An analysis of eye-tracking data in foveated ray tracing", "doi": null, "abstractUrl": "/proceedings-article/etvis/2016/07851170/12OmNvT2pjL", "parentPublication": { "id": "proceedings/etvis/2016/4731/0", "title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2018/4195/0/08551511", "title": "Eye Tracking-Based 360 Vr Foveated/Tiled Video Rendering", "doi": null, "abstractUrl": "/proceedings-article/icmew/2018/08551511/17D45WK5AoH", "parentPublication": { "id": "proceedings/icmew/2018/4195/0", "title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a756", "title": "Rectangular Mapping-based Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a205", "title": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a205/1MNgQoZswDu", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a471", "title": "Locomotion-aware Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09007492", "title": "3D-Kernel Foveated Rendering for Light Fields", "doi": null, "abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccem/2019/6334/0/633400a032", "title": "Low-Cost Eye Tracking for Foveated Rendering Using Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/ccem/2019/633400a032/1iHT2m32LIY", "parentPublication": { "id": "proceedings/ccem/2019/6334/0", "title": "2019 IEEE International Conference on Cloud Computing in Emerging Markets (CCEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a001", "title": "Foveated Instant Radiosity", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382903", "title": "A Log-Rectilinear Transformation for Foveated 360-degree Video Streaming", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382903/1saZxiH9uaQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523836", "title": "Foveated Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998371", "articleId": "1hrXiia6v9C", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998293", "articleId": "1hrXiCmKkak", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXiCmKkak", "doi": "10.1109/TVCG.2020.2973064", "abstract": "Today's Virtual Reality (VR) displays are dramatically better than the head-worn displays offered 30 years ago, but today's displays remain nearly as bulky as their predecessors in the 1980's. Also, almost all consumer VR displays today provide 90-110 degrees field of view (FOV), which is much smaller than the human visual system's FOV which extends beyond 180 degrees horizontally. In this paper, we propose ThinVR as a new approach to simultaneously address the bulk and limited FOV of head-worn VR displays. ThinVR enables a head-worn VR display to provide 180 degrees horizontal FOV in a thin, compact form factor. Our approach is to replace traditional large optics with a curved microlens array of custom-designed heterogeneous lenslets and place these in front of a curved display. We found that heterogeneous optics were crucial to make this approach work, since over a wide FOV, many lenslets are viewed off the central axis. We developed a custom optimizer for designing custom heterogeneous lenslets to ensure a sufficient eyebox while reducing distortions. The contribution includes an analysis of the design space for curved microlens arrays, implementation of physical prototypes, and an assessment of the image quality, eyebox, FOV, reduction in volume and pupil swim distortion. To our knowledge, this is the first work to demonstrate and analyze the potential for curved, heterogeneous microlens arrays to enable compact, wide FOV head-worn VR displays.", "abstracts": [ { "abstractType": "Regular", "content": "Today's Virtual Reality (VR) displays are dramatically better than the head-worn displays offered 30 years ago, but today's displays remain nearly as bulky as their predecessors in the 1980's. Also, almost all consumer VR displays today provide 90-110 degrees field of view (FOV), which is much smaller than the human visual system's FOV which extends beyond 180 degrees horizontally. In this paper, we propose ThinVR as a new approach to simultaneously address the bulk and limited FOV of head-worn VR displays. ThinVR enables a head-worn VR display to provide 180 degrees horizontal FOV in a thin, compact form factor. Our approach is to replace traditional large optics with a curved microlens array of custom-designed heterogeneous lenslets and place these in front of a curved display. We found that heterogeneous optics were crucial to make this approach work, since over a wide FOV, many lenslets are viewed off the central axis. We developed a custom optimizer for designing custom heterogeneous lenslets to ensure a sufficient eyebox while reducing distortions. The contribution includes an analysis of the design space for curved microlens arrays, implementation of physical prototypes, and an assessment of the image quality, eyebox, FOV, reduction in volume and pupil swim distortion. To our knowledge, this is the first work to demonstrate and analyze the potential for curved, heterogeneous microlens arrays to enable compact, wide FOV head-worn VR displays.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Today's Virtual Reality (VR) displays are dramatically better than the head-worn displays offered 30 years ago, but today's displays remain nearly as bulky as their predecessors in the 1980's. Also, almost all consumer VR displays today provide 90-110 degrees field of view (FOV), which is much smaller than the human visual system's FOV which extends beyond 180 degrees horizontally. In this paper, we propose ThinVR as a new approach to simultaneously address the bulk and limited FOV of head-worn VR displays. ThinVR enables a head-worn VR display to provide 180 degrees horizontal FOV in a thin, compact form factor. Our approach is to replace traditional large optics with a curved microlens array of custom-designed heterogeneous lenslets and place these in front of a curved display. We found that heterogeneous optics were crucial to make this approach work, since over a wide FOV, many lenslets are viewed off the central axis. We developed a custom optimizer for designing custom heterogeneous lenslets to ensure a sufficient eyebox while reducing distortions. The contribution includes an analysis of the design space for curved microlens arrays, implementation of physical prototypes, and an assessment of the image quality, eyebox, FOV, reduction in volume and pupil swim distortion. To our knowledge, this is the first work to demonstrate and analyze the potential for curved, heterogeneous microlens arrays to enable compact, wide FOV head-worn VR displays.", "title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays", "normalizedTitle": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays", "fno": "08998293", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Helmet Mounted Displays", "Image Processing", "Microlenses", "Optical Arrays", "Optical Design Techniques", "Optical Distortion", "Optical Images", "Virtual Reality", "Field Of View Virtual Reality Near Eye Displays", "Wide Field Of View Head Worn Virtual Reality Displays", "Compact Field Of View Head Worn Virtual Reality Displays", "Curved Microlens Arrays", "Curved Display", "Custom Designed Heterogeneous Lenslets", "Human Visual System", "Heterogeneous Microlens Arrays", "Lenses", "Microoptics", "Prototypes", "Optical Imaging", "Optical Diffraction", "Optical Distortion", "Computational Display", "Lenslets", "Wide Field Of View", "Head Worn Display" ], "authors": [ { "givenName": "Joshua", "surname": "Ratcliff", "fullName": "Joshua Ratcliff", "affiliation": "Intel Labs", "__typename": "ArticleAuthorType" }, { "givenName": "Alexey", "surname": "Supikov", "fullName": "Alexey Supikov", "affiliation": "Intel Labs", "__typename": "ArticleAuthorType" }, { "givenName": "Santiago", "surname": "Alfaro", "fullName": "Santiago Alfaro", "affiliation": "Intel Labs", "__typename": "ArticleAuthorType" }, { "givenName": "Ronald", "surname": "Azuma", "fullName": "Ronald Azuma", "affiliation": "Intel Labs", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1981-1990", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icisce/2016/2535/0/2535b449", "title": "The Study of Optical Links Establishment with Ultra-Wide FOV Acquisition Scheme in FSO Network", "doi": null, "abstractUrl": "/proceedings-article/icisce/2016/2535b449/12OmNAJm0qq", "parentPublication": { "id": "proceedings/icisce/2016/2535/0", "title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460264", "title": "A visual marker for precise pose estimation based on a microlens array", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460264/12OmNC2OSLm", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581856", "title": "Optical properties of a Si binary optic microlens for infrared ray", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581856/12OmNqC2uVO", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d757", "title": "A Wide-Field-of-View Monocentric Light Field Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d757/12OmNzCWG6X", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a746", "title": "Depth Reduction in Light-Field Head-Mounted Displays by Generating Intermediate Images as Virtual Images", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a746/1CJcGN8dsS4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a259", "title": "OSTNet: Calibration Method for Optical See-Through Head-Mounted Displays via Non-Parametric Distortion Map Generation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a259/1gysj1o4L16", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089608", "title": "Angular Dependence of the Spatial Resolution in Virtual Reality Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089608/1jIxaeEdNkc", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a301", "title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09383112", "title": "Beaming Displays", "doi": null, "abstractUrl": "/journal/tg/2021/05/09383112/1saZzKxYSqI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09384477", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "doi": null, "abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09005240", "articleId": "1hzNcOce8OQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998301", "articleId": "1hpPBqG2djy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfG202JeU", "name": "ttg202005-08998293s1-tvcg-2973064-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998293s1-tvcg-2973064-mm.zip", "extension": "zip", "size": "66.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPBqG2djy", "doi": "10.1109/TVCG.2020.2973058", "abstract": "We present a new method to capture the acoustic characteristics of real-world rooms using commodity devices, and use the captured characteristics to generate similar sounding sources with virtual models. Given the captured audio and an approximate geometric model of a real-world room, we present a novel learning-based method to estimate its acoustic material properties. Our approach is based on deep neural networks that estimate the reverberation time and equalization of the room from recorded audio. These estimates are used to compute material properties related to room reverberation using a novel material optimization objective. We use the estimated acoustic material characteristics for audio rendering using interactive geometric sound propagation and highlight the performance on many real-world scenarios. We also perform a user study to evaluate the perceptual similarity between the recorded sounds and our rendered audio.", "abstracts": [ { "abstractType": "Regular", "content": "We present a new method to capture the acoustic characteristics of real-world rooms using commodity devices, and use the captured characteristics to generate similar sounding sources with virtual models. Given the captured audio and an approximate geometric model of a real-world room, we present a novel learning-based method to estimate its acoustic material properties. Our approach is based on deep neural networks that estimate the reverberation time and equalization of the room from recorded audio. These estimates are used to compute material properties related to room reverberation using a novel material optimization objective. We use the estimated acoustic material characteristics for audio rendering using interactive geometric sound propagation and highlight the performance on many real-world scenarios. We also perform a user study to evaluate the perceptual similarity between the recorded sounds and our rendered audio.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a new method to capture the acoustic characteristics of real-world rooms using commodity devices, and use the captured characteristics to generate similar sounding sources with virtual models. Given the captured audio and an approximate geometric model of a real-world room, we present a novel learning-based method to estimate its acoustic material properties. Our approach is based on deep neural networks that estimate the reverberation time and equalization of the room from recorded audio. These estimates are used to compute material properties related to room reverberation using a novel material optimization objective. We use the estimated acoustic material characteristics for audio rendering using interactive geometric sound propagation and highlight the performance on many real-world scenarios. We also perform a user study to evaluate the perceptual similarity between the recorded sounds and our rendered audio.", "title": "Scene-Aware Audio Rendering via Deep Acoustic Analysis", "normalizedTitle": "Scene-Aware Audio Rendering via Deep Acoustic Analysis", "fno": "08998301", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Acoustic Signal Processing", "Audio Signal Processing", "Learning Artificial Intelligence", "Neural Nets", "Optimisation", "Reverberation", "Acoustic Material Characteristics", "Sounding Sources", "Deep Acoustic Analysis", "Rendered Audio", "Interactive Geometric Sound Propagation", "Room Reverberation", "Equalization", "Reverberation Time", "Deep Neural Networks", "Learning Based Method", "Approximate Geometric Model", "Captured Audio", "Virtual Models", "Commodity Devices", "Real World Room", "Scene Aware Audio Rendering", "Acoustics", "Optimization", "Rendering Computer Graphics", "Visualization", "Acoustic Materials", "Frequency Estimation", "Estimation", "Audio Rendering", "Audio Learning", "Material Optimization" ], "authors": [ { "givenName": "Zhenyu", "surname": "Tang", "fullName": "Zhenyu Tang", "affiliation": "University of Maryland", "__typename": "ArticleAuthorType" }, { "givenName": "Nicholas J.", "surname": "Bryan", "fullName": "Nicholas J. Bryan", "affiliation": "Adobe Research", "__typename": "ArticleAuthorType" }, { "givenName": "Dingzeyu", "surname": "Li", "fullName": "Dingzeyu Li", "affiliation": "Adobe Research", "__typename": "ArticleAuthorType" }, { "givenName": "Timothy R.", "surname": "Langlois", "fullName": "Timothy R. Langlois", "affiliation": "Adobe Research", "__typename": "ArticleAuthorType" }, { "givenName": "Dinesh", "surname": "Manocha", "fullName": "Dinesh Manocha", "affiliation": "University of Maryland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1991-2001", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019294", "title": "Improving acoustic modeling using audio-visual speech", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019294/12OmNCbU2UT", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a621", "title": "3D Room Geometry Reconstruction Using Audio-Visual Sensors", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a621/12OmNqNosaP", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2006/9753/0/04042282", "title": "Spatial Sound Rendering Using Measured Room Impulse Responses", "doi": null, "abstractUrl": "/proceedings-article/isspit/2006/04042282/12OmNy3iFfx", "parentPublication": { "id": "proceedings/isspit/2006/9753/0", "title": "2006 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1991/2470/0/00186470", "title": "Desired-speech signal cancellation by microphone arrays in reverberant rooms", "doi": null, "abstractUrl": "/proceedings-article/acssc/1991/00186470/12OmNzZWbRP", "parentPublication": { "id": "proceedings/acssc/1991/2470/0", "title": "Conference Record of the Twenty-Fifth Asilomar Conference on Signals, Systems & Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/03/07849179", "title": "Acoustic Classification and Optimization for Multi-Modal Rendering of Real-World Scenes", "doi": null, "abstractUrl": "/journal/tg/2018/03/07849179/13rRUNvgz9Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8836", "title": "Visual Acoustic Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8836/1H0LBaUp66Y", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798247", "title": "Immersive Spatial Audio Reproduction for VR/AR Using Room Acoustic Modelling from 360&#x00B0; Images", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798247/1cJ1gHhXwha", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090553", "title": "Scene-aware Sound Rendering in Virtual and Real Worlds", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090553/1jIxzQzr0EU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093307", "title": "Audio-Visual Model Distillation Using Acoustic Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093307/1jPbtIN9OWA", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a487", "title": "Vision-based Acoustic Information Retrieval for Interactive Sound Rendering", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a487/1yfxLit9Jw4", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998293", "articleId": "1hrXiCmKkak", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998303", "articleId": "1hrXfo1lGb6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgrrdfQXK", "name": "ttg202005-08998301s1-supp1-2973058.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998301s1-supp1-2973058.mp4", "extension": "mp4", "size": "4.92 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXfo1lGb6", "doi": "10.1109/TVCG.2020.2973050", "abstract": "In mixed reality (MR), augmenting virtual objects consistently with real-world illumination is one of the key factors that provide a realistic and immersive user experience. For this purpose, we propose a novel deep learning-based method to estimate high dynamic range (HDR) illumination from a single RGB image of a reference object. To obtain illumination of a current scene, previous approaches inserted a special camera in that scene, which may interfere with user's immersion, or they analyzed reflected radiances from a passive light probe with a specific type of materials or a known shape. The proposed method does not require any additional gadgets or strong prior cues, and aims to predict illumination from a single image of an observed object with a wide range of homogeneous materials and shapes. To effectively solve this ill-posed inverse rendering problem, three sequential deep neural networks are employed based on a physically-inspired design. These networks perform end-to-end regression to gradually decrease dependency on the material and shape. To cover various conditions, the proposed networks are trained on a large synthetic dataset generated by physically-based rendering. Finally, the reconstructed HDR illumination enables realistic image-based lighting of virtual objects in MR. Experimental results demonstrate the effectiveness of this approach compared against state-of-the-art methods. The paper also suggests some interesting MR applications in indoor and outdoor scenes.", "abstracts": [ { "abstractType": "Regular", "content": "In mixed reality (MR), augmenting virtual objects consistently with real-world illumination is one of the key factors that provide a realistic and immersive user experience. For this purpose, we propose a novel deep learning-based method to estimate high dynamic range (HDR) illumination from a single RGB image of a reference object. To obtain illumination of a current scene, previous approaches inserted a special camera in that scene, which may interfere with user's immersion, or they analyzed reflected radiances from a passive light probe with a specific type of materials or a known shape. The proposed method does not require any additional gadgets or strong prior cues, and aims to predict illumination from a single image of an observed object with a wide range of homogeneous materials and shapes. To effectively solve this ill-posed inverse rendering problem, three sequential deep neural networks are employed based on a physically-inspired design. These networks perform end-to-end regression to gradually decrease dependency on the material and shape. To cover various conditions, the proposed networks are trained on a large synthetic dataset generated by physically-based rendering. Finally, the reconstructed HDR illumination enables realistic image-based lighting of virtual objects in MR. Experimental results demonstrate the effectiveness of this approach compared against state-of-the-art methods. The paper also suggests some interesting MR applications in indoor and outdoor scenes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In mixed reality (MR), augmenting virtual objects consistently with real-world illumination is one of the key factors that provide a realistic and immersive user experience. For this purpose, we propose a novel deep learning-based method to estimate high dynamic range (HDR) illumination from a single RGB image of a reference object. To obtain illumination of a current scene, previous approaches inserted a special camera in that scene, which may interfere with user's immersion, or they analyzed reflected radiances from a passive light probe with a specific type of materials or a known shape. The proposed method does not require any additional gadgets or strong prior cues, and aims to predict illumination from a single image of an observed object with a wide range of homogeneous materials and shapes. To effectively solve this ill-posed inverse rendering problem, three sequential deep neural networks are employed based on a physically-inspired design. These networks perform end-to-end regression to gradually decrease dependency on the material and shape. To cover various conditions, the proposed networks are trained on a large synthetic dataset generated by physically-based rendering. Finally, the reconstructed HDR illumination enables realistic image-based lighting of virtual objects in MR. Experimental results demonstrate the effectiveness of this approach compared against state-of-the-art methods. The paper also suggests some interesting MR applications in indoor and outdoor scenes.", "title": "Physically-inspired Deep Light Estimation from a Homogeneous-Material Object for Mixed Reality Lighting", "normalizedTitle": "Physically-inspired Deep Light Estimation from a Homogeneous-Material Object for Mixed Reality Lighting", "fno": "08998303", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Cameras", "Image Colour Analysis", "Image Reconstruction", "Learning Artificial Intelligence", "Lighting", "Neural Nets", "Realistic Images", "Regression Analysis", "Rendering Computer Graphics", "Physically Inspired Deep Light Estimation", "Homogeneous Material Object", "Mixed Reality Lighting", "Virtual Objects", "Real World Illumination", "Realistic User Experience", "Immersive User Experience", "Deep Learning", "High Dynamic Range Illumination", "Single RGB Image", "Special Camera", "Passive Light Probe", "Inverse Rendering Problem", "Sequential Deep Neural Networks", "End To End Regression", "Physically Based Rendering", "Reconstructed HDR Illumination", "Realistic Image Based Lighting", "Indoor Scenes", "Outdoor Scenes", "Lighting", "Shape", "Probes", "Estimation", "Virtual Reality", "Image Reconstruction", "Cameras", "Light Estimation", "Light Probe", "Physically Based Rendering", "Deep Learning", "Coherent Rendering", "Mixed Reality" ], "authors": [ { "givenName": "Jinwoo", "surname": "Park", "fullName": "Jinwoo Park", "affiliation": "KAIST UVR Lab.", "__typename": "ArticleAuthorType" }, { "givenName": "Hunmin", "surname": "Park", "fullName": "Hunmin Park", "affiliation": "KAIST", "__typename": "ArticleAuthorType" }, { "givenName": "Sung-Eui", "surname": "Yoon", "fullName": "Sung-Eui Yoon", "affiliation": "KAIST", "__typename": "ArticleAuthorType" }, { "givenName": "Woontack", "surname": "Woo", "fullName": "Woontack Woo", "affiliation": "KAIST UVR Lab.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2002-2011", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dv/2014/7000/1/7000a131", "title": "Lighting Estimation in Outdoor Image Collections", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a131/12OmNBdJ5j1", "parentPublication": { "id": "3dv/2014/7000/1", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a037", "title": "A Single Camera Image Based Approach for Glossy Reflections in Mixed Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a037/12OmNrJAdMm", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457c373", "title": "Deep outdoor illumination estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457c373/12OmNy1SFMf", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/04/08511066", "title": "Automatic Spatially Varying Illumination Recovery of Indoor Scenes Based on a Single RGB-D Image", "doi": null, "abstractUrl": "/journal/tg/2020/04/08511066/14H4WOKjoti", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a138", "title": "Glossy Reflections for Mixed Reality Environments on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a138/17D45Wda7hc", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300g920", "title": "Deep Sky Modeling for Single Image Outdoor Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300g920/1gyrdbEY2sM", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300k0150", "title": "All-Weather Deep Outdoor Lighting Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300k0150/1gyrg6Ricuc", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090568", "title": "Real-time Illumination Estimation for Mixed Reality on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090568/1jIxuGbpWa4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a355", "title": "High-Dynamic-Range Lighting Estimation From Face Portraits", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a355/1qyxlbQeCtO", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523888", "title": "Adaptive Light Estimation using Dynamic Filtering for Diverse Lighting Conditions", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523888/1wpqxgia3Vm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998301", "articleId": "1hpPBqG2djy", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998140", "articleId": "1hpPDSYGijK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPDSYGijK", "doi": "10.1109/TVCG.2020.2973477", "abstract": "Semantic understanding of 3D environments is critical for both the unmanned system and the human involved virtual/augmented reality (VR/AR) immersive experience. Spatially-sparse convolution, taking advantage of the intrinsic sparsity of 3D point cloud data, makes high resolution 3D convolutional neural networks tractable with state-of-the-art results on 3D semantic segmentation problems. However, the exhaustive computations limits the practical usage of semantic 3D perception for VR/AR applications in portable devices. In this paper, we identify that the efficiency bottleneck lies in the unorganized memory access of the sparse convolution steps, i.e., the points are stored independently based on a predefined dictionary, which is inefficient due to the limited memory bandwidth of parallel computing devices (GPU). With the insight that points are continuous as 2D surfaces in 3D space, a chunk-based sparse convolution scheme is proposed to reuse the neighboring points within each spatially organized chunk. An efficient multi-layer adaptive fusion module is further proposed for employing the spatial consistency cue of 3D data to further reduce the computational burden. Quantitative experiments on public datasets demonstrate that our approach works 11&#x00D7; faster than previous approaches with competitive accuracy. By implementing both semantic and geometric 3D reconstruction simultaneously on a portable tablet device, we demo a foundation platform for immersive AR applications.", "abstracts": [ { "abstractType": "Regular", "content": "Semantic understanding of 3D environments is critical for both the unmanned system and the human involved virtual/augmented reality (VR/AR) immersive experience. Spatially-sparse convolution, taking advantage of the intrinsic sparsity of 3D point cloud data, makes high resolution 3D convolutional neural networks tractable with state-of-the-art results on 3D semantic segmentation problems. However, the exhaustive computations limits the practical usage of semantic 3D perception for VR/AR applications in portable devices. In this paper, we identify that the efficiency bottleneck lies in the unorganized memory access of the sparse convolution steps, i.e., the points are stored independently based on a predefined dictionary, which is inefficient due to the limited memory bandwidth of parallel computing devices (GPU). With the insight that points are continuous as 2D surfaces in 3D space, a chunk-based sparse convolution scheme is proposed to reuse the neighboring points within each spatially organized chunk. An efficient multi-layer adaptive fusion module is further proposed for employing the spatial consistency cue of 3D data to further reduce the computational burden. Quantitative experiments on public datasets demonstrate that our approach works 11&#x00D7; faster than previous approaches with competitive accuracy. By implementing both semantic and geometric 3D reconstruction simultaneously on a portable tablet device, we demo a foundation platform for immersive AR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Semantic understanding of 3D environments is critical for both the unmanned system and the human involved virtual/augmented reality (VR/AR) immersive experience. Spatially-sparse convolution, taking advantage of the intrinsic sparsity of 3D point cloud data, makes high resolution 3D convolutional neural networks tractable with state-of-the-art results on 3D semantic segmentation problems. However, the exhaustive computations limits the practical usage of semantic 3D perception for VR/AR applications in portable devices. In this paper, we identify that the efficiency bottleneck lies in the unorganized memory access of the sparse convolution steps, i.e., the points are stored independently based on a predefined dictionary, which is inefficient due to the limited memory bandwidth of parallel computing devices (GPU). With the insight that points are continuous as 2D surfaces in 3D space, a chunk-based sparse convolution scheme is proposed to reuse the neighboring points within each spatially organized chunk. An efficient multi-layer adaptive fusion module is further proposed for employing the spatial consistency cue of 3D data to further reduce the computational burden. Quantitative experiments on public datasets demonstrate that our approach works 11× faster than previous approaches with competitive accuracy. By implementing both semantic and geometric 3D reconstruction simultaneously on a portable tablet device, we demo a foundation platform for immersive AR applications.", "title": "Live Semantic 3D Perception for Immersive Augmented Reality", "normalizedTitle": "Live Semantic 3D Perception for Immersive Augmented Reality", "fno": "08998140", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Computer Graphics", "Convolutional Neural Nets", "Image Reconstruction", "Image Segmentation", "Multilayer Adaptive Fusion Module", "Spatial Consistency Cue", "Semantic 3 D Reconstruction", "Geometric 3 D Reconstruction", "Portable Tablet Device", "Immersive AR Applications", "Live Semantic 3 D Perception", "Immersive Augmented Reality", "Unmanned System", "Reality Immersive Experience", "Spatially Sparse Convolution", "3 D Point Cloud Data", "High Resolution 3 D Convolutional Neural Networks", "3 D Semantic Segmentation Problems", "Sparse Convolution Steps", "Parallel Computing Devices", "Chunk Based Sparse Convolution Scheme", "Three Dimensional Displays", "Convolution", "Semantics", "Two Dimensional Displays", "Image Segmentation", "Graphics Processing Units", "Solid Modeling", "Dense 3 D Reconstruction", "3 D Semantic Segmentation", "3 D Convolutional Network", "Virtual Reality", "Augmented Reality" ], "authors": [ { "givenName": "Lei", "surname": "Han", "fullName": "Lei Han", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Tian", "surname": "Zheng", "fullName": "Tian Zheng", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Yinheng", "surname": "Zhu", "fullName": "Yinheng Zhu", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Lan", "surname": "Xu", "fullName": "Lan Xu", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Lu", "surname": "Fang", "fullName": "Lu Fang", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2012-2022", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836502", "title": "PoLAR: A Portable Library for Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836502/12OmNAoDhRV", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000d002", "title": "SurfConv: Bridging 3D and 2D Convolution for RGBD Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d002/17D45WaTklB", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699222", "title": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699222/19F1PQOMxWg", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a173", "title": "3D Neighborhood Convolution: Learning Depth-Aware Features for RGB-D and RGB Semantic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a173/1ezRDmQtEY0", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a144", "title": "Hand ControlAR: An Augmented Reality Application for Learning 3D Geometry", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a144/1gysoyOrm2A", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089446", "title": "Graphical Perception for Immersive Analytics", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089446/1jIxfA3tlUk", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e533", "title": "Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e533/1m3o50gS9u8", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a203", "title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2020/6768/0/676800b267", "title": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation", "doi": null, "abstractUrl": "/proceedings-article/ase/2020/676800b267/1pP3IvL3Z6w", "parentPublication": { "id": "proceedings/ase/2020/6768/0", "title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a064", "title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998303", "articleId": "1hrXfo1lGb6", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998352", "articleId": "1hpPCCB7Bte", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgMIwxDNe", "name": "ttg202005-08998140s1-supp1-2973477.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998140s1-supp1-2973477.mp4", "extension": "mp4", "size": "117 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPCCB7Bte", "doi": "10.1109/TVCG.2020.2973075", "abstract": "Through avatar embodiment in Virtual Reality (VR) we can achieve the illusion that an avatar is substituting our body: the avatar moves as we move and we see it from a first person perspective. However, self-identification, the process of identifying a representation as being oneself, poses new challenges because a key determinant is that we see and have agency in our own face. Providing control over the face is hard with current HMD technologies because face tracking is either cumbersome or error prone. However, limited animation is easily achieved based on speaking. We investigate the level of avatar enfacement, that is believing that a picture of a face is one's own face, with three levels of facial animation: (i) one in which the facial expressions of the avatars are static, (ii) one in which we implement lip-sync motion and (iii) one in which the avatar presents lip-sync plus additional facial animations, with blinks, designed by a professional animator. We measure self-identification using a face morphing tool that morphs from the face of the participant to the face of a gender matched avatar. We find that self-identification on avatars can be increased through pre-baked animations even when these are not photorealistic nor look like the participant.", "abstracts": [ { "abstractType": "Regular", "content": "Through avatar embodiment in Virtual Reality (VR) we can achieve the illusion that an avatar is substituting our body: the avatar moves as we move and we see it from a first person perspective. However, self-identification, the process of identifying a representation as being oneself, poses new challenges because a key determinant is that we see and have agency in our own face. Providing control over the face is hard with current HMD technologies because face tracking is either cumbersome or error prone. However, limited animation is easily achieved based on speaking. We investigate the level of avatar enfacement, that is believing that a picture of a face is one's own face, with three levels of facial animation: (i) one in which the facial expressions of the avatars are static, (ii) one in which we implement lip-sync motion and (iii) one in which the avatar presents lip-sync plus additional facial animations, with blinks, designed by a professional animator. We measure self-identification using a face morphing tool that morphs from the face of the participant to the face of a gender matched avatar. We find that self-identification on avatars can be increased through pre-baked animations even when these are not photorealistic nor look like the participant.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Through avatar embodiment in Virtual Reality (VR) we can achieve the illusion that an avatar is substituting our body: the avatar moves as we move and we see it from a first person perspective. However, self-identification, the process of identifying a representation as being oneself, poses new challenges because a key determinant is that we see and have agency in our own face. Providing control over the face is hard with current HMD technologies because face tracking is either cumbersome or error prone. However, limited animation is easily achieved based on speaking. We investigate the level of avatar enfacement, that is believing that a picture of a face is one's own face, with three levels of facial animation: (i) one in which the facial expressions of the avatars are static, (ii) one in which we implement lip-sync motion and (iii) one in which the avatar presents lip-sync plus additional facial animations, with blinks, designed by a professional animator. We measure self-identification using a face morphing tool that morphs from the face of the participant to the face of a gender matched avatar. We find that self-identification on avatars can be increased through pre-baked animations even when these are not photorealistic nor look like the participant.", "title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification", "normalizedTitle": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification", "fno": "08998352", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Face Recognition", "Helmet Mounted Displays", "Virtual Reality", "Face Tracking", "Avatar Enfacement", "Facial Animation", "Face Morphing Tool", "Enfacement Illusion", "Avatar Embodiment", "Avatar Self Identification", "Virtual Reality", "HMD Technologies", "Facial Expressions", "Lip Sync Motion", "Avatars", "Face", "Facial Animation", "Resists", "Mirrors", "Self Avatars", "Virtual Reality", "Embodiment", "Face Animation", "Enfacement" ], "authors": [ { "givenName": "Mar", "surname": "Gonzalez-Franco", "fullName": "Mar Gonzalez-Franco", "affiliation": "Microsoft Research", "__typename": "ArticleAuthorType" }, { "givenName": "Anthony", "surname": "Steed", "fullName": "Anthony Steed", "affiliation": "Microsoft Research", "__typename": "ArticleAuthorType" }, { "givenName": "Steve", "surname": "Hoogendyk", "fullName": "Steve Hoogendyk", "affiliation": "Microsoft Research", "__typename": "ArticleAuthorType" }, { "givenName": "Eyal", "surname": "Ofek", "fullName": "Eyal Ofek", "affiliation": "Microsoft Research", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2023-2029", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fg/2011/9140/0/05771364", "title": "Facial expression recognition using emotion avatar image", "doi": null, "abstractUrl": "/proceedings-article/fg/2011/05771364/12OmNAi6vUx", "parentPublication": { "id": "proceedings/fg/2011/9140/0", "title": "Face and Gesture 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163173", "title": "Real-time facial character animation", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163173/12OmNApcuBK", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmdcm/2011/4413/0/4413a132", "title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation", "doi": null, "abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9", "parentPublication": { "id": "proceedings/dmdcm/2011/4413/0", "title": "Digital Media and Digital Content Management, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1996/7588/0/75880098", "title": "Facial Animation", "doi": null, "abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2", "parentPublication": { "id": "proceedings/ca/1996/7588/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892245", "title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890231", "title": "Real-time control of 3D facial animation", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890231/12OmNyOHG1A", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a479", "title": "Reading Personality: Avatar vs. Human Faces", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a479/12OmNzUgdfd", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a039", "title": "HeadBox: A Facial Blendshape Animation Toolkit for the Microsoft Rocketbox Library", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a039/1CJeXP9uYta", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a558", "title": "CV-Mora Based Lip Sync Facial Animations for Japanese Speech", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a558/1CJfmJhWzMQ", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a429", "title": "Real-time Expressive Avatar Animation Generation based on Monocular Videos", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a429/1J7Wj0kJrJm", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998140", "articleId": "1hpPDSYGijK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998378", "articleId": "1hpPCL9mirK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgrIrXBGE", "name": "ttg202005-08998352s1-supp1-2973075.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998352s1-supp1-2973075.mp4", "extension": "mp4", "size": "24.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPCL9mirK", "doi": "10.1109/TVCG.2020.2973444", "abstract": "This paper presents a novel active marker for dynamic projection mapping (PM) that emits a temporal blinking pattern of infrared (IR) light representing its ID. We used a multi-material three dimensional (3D) printer to fabricate a projection object with optical fibers that can guide IR light from LEDs attached on the bottom of the object. The aperture of an optical fiber is typically very small; thus, it is unnoticeable to human observers under projection and can be placed on a strongly curved part of a projection surface. In addition, the working range of our system can be larger than previous marker-based methods as the blinking patterns can theoretically be recognized by a camera placed at a wide range of distances from markers. We propose an automatic marker placement algorithm to spread multiple active markers over the surface of a projection object such that its pose can be robustly estimated using captured images from arbitrary directions. We also propose an optimization framework for determining the routes of the optical fibers in such a way that collisions of the fibers can be avoided while minimizing the loss of light intensity in the fibers. Through experiments conducted using three fabricated objects containing strongly curved surfaces, we confirmed that the proposed method can achieve accurate dynamic PMs in a significantly wide working range.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel active marker for dynamic projection mapping (PM) that emits a temporal blinking pattern of infrared (IR) light representing its ID. We used a multi-material three dimensional (3D) printer to fabricate a projection object with optical fibers that can guide IR light from LEDs attached on the bottom of the object. The aperture of an optical fiber is typically very small; thus, it is unnoticeable to human observers under projection and can be placed on a strongly curved part of a projection surface. In addition, the working range of our system can be larger than previous marker-based methods as the blinking patterns can theoretically be recognized by a camera placed at a wide range of distances from markers. We propose an automatic marker placement algorithm to spread multiple active markers over the surface of a projection object such that its pose can be robustly estimated using captured images from arbitrary directions. We also propose an optimization framework for determining the routes of the optical fibers in such a way that collisions of the fibers can be avoided while minimizing the loss of light intensity in the fibers. Through experiments conducted using three fabricated objects containing strongly curved surfaces, we confirmed that the proposed method can achieve accurate dynamic PMs in a significantly wide working range.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel active marker for dynamic projection mapping (PM) that emits a temporal blinking pattern of infrared (IR) light representing its ID. We used a multi-material three dimensional (3D) printer to fabricate a projection object with optical fibers that can guide IR light from LEDs attached on the bottom of the object. The aperture of an optical fiber is typically very small; thus, it is unnoticeable to human observers under projection and can be placed on a strongly curved part of a projection surface. In addition, the working range of our system can be larger than previous marker-based methods as the blinking patterns can theoretically be recognized by a camera placed at a wide range of distances from markers. We propose an automatic marker placement algorithm to spread multiple active markers over the surface of a projection object such that its pose can be robustly estimated using captured images from arbitrary directions. We also propose an optimization framework for determining the routes of the optical fibers in such a way that collisions of the fibers can be avoided while minimizing the loss of light intensity in the fibers. Through experiments conducted using three fabricated objects containing strongly curved surfaces, we confirmed that the proposed method can achieve accurate dynamic PMs in a significantly wide working range.", "title": "FibAR: Embedding Optical Fibers in 3D Printed Objects for Active Markers in Dynamic Projection Mapping", "normalizedTitle": "FibAR: Embedding Optical Fibers in 3D Printed Objects for Active Markers in Dynamic Projection Mapping", "fno": "08998378", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Optical Fibres", "Optimisation", "Pose Estimation", "Three Dimensional Printing", "Fib AR", "Multiple Active Markers", "Automatic Marker Placement Algorithm", "Blinking Patterns", "IR Light", "Optical Fiber", "Projection Object", "Multimaterial Three Dimensional Printer", "Infrared Light", "Temporal Blinking Pattern", "Dynamic Projection Mapping", "Active Marker", "3 D Printed Objects", "Optical Fibers", "Optical Imaging", "Cameras", "Optical Device Fabrication", "Robustness", "Three Dimensional Displays", "Observers", "Printers", "Projection Mapping", "Spatial Augmented Reality", "Multi Material 3 D Printer", "Optical Fiber", "Active Marker" ], "authors": [ { "givenName": "Daiki", "surname": "Tone", "fullName": "Daiki Tone", "affiliation": "Osaka University", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Osaka University, JST, PRESTO", "__typename": "ArticleAuthorType" }, { "givenName": "Shinsaku", "surname": "Hiura", "fullName": "Shinsaku Hiura", "affiliation": "University of Hyogo", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Osaka University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2030-2040", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660a174", "title": "[POSTER] Pseudo Printed Fabrics through Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a174/12OmNwoPtwk", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1993/3560/0/00522766", "title": "A heuristic approach to the computation of 3D-ray trajectories in step index optical fibers", "doi": null, "abstractUrl": "/proceedings-article/ssst/1993/00522766/12OmNxb5hxq", "parentPublication": { "id": "proceedings/ssst/1993/3560/0", "title": "1993 (25th) Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscc/2009/4672/0/05202360", "title": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers", "doi": null, "abstractUrl": "/proceedings-article/iscc/2009/05202360/12OmNzUPpdr", "parentPublication": { "id": "proceedings/iscc/2009/4672/0", "title": "2009 IEEE Symposium on Computers and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2010/8420/0/05720385", "title": "PyFibers: A Semi-automatic Tool for Contour Extraction from Cross Section Images of Photonic Crystal Fibers", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720385/12OmNzX6cf5", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682378", "title": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers", "doi": null, "abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682378/12OmNzwZ6tL", "parentPublication": { "id": "proceedings/greencom-ithingscpscom/2013/5046/0", "title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/02/07831400", "title": "Fabricating Diminishable Visual Markers for Geometric Registration in Projection Mapping", "doi": null, "abstractUrl": "/journal/tg/2018/02/07831400/13rRUyYjK5m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102813", "title": "Projection Mapping System To A Widely Dynamic Sphere With Circumferential Markers", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102813/1kwqWza3GI8", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2020/11/09217985", "title": "A Printed Camouflaged Cell Against Reverse Engineering of Printed Electronics Circuits", "doi": null, "abstractUrl": "/journal/si/2020/11/09217985/1nL7sJLJYf6", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2021/08/09448191", "title": "Defect Detection in Transparent Printed Electronics Using Learning-Based Optical Inspection", "doi": null, "abstractUrl": "/journal/si/2021/08/09448191/1ugE7OC979u", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09536434", "title": "Dynamic Projection Mapping for Robust Sphere Posture Tracking Using Uniform/Biased Circumferential Markers", "doi": null, "abstractUrl": "/journal/tg/2022/12/09536434/1wREa2FncUE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998352", "articleId": "1hpPCCB7Bte", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998368", "articleId": "1hrXfCmEWHe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfygC3IBi", "name": "ttg202005-08998378s1-supp1-2973444.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998378s1-supp1-2973444.mp4", "extension": "mp4", "size": "31.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXfCmEWHe", "doi": "10.1109/TVCG.2020.2973034", "abstract": "Fitts's law facilitates approximate comparisons of target acquisition performance across a variety of settings. Conceptually, also the index of difficulty of 3D object manipulation with six degrees of freedom can be computed, which allows the comparison of results from different studies. Prior experiments, however, often revealed much worse performance than one would reasonably expect on this basis. We argue that this discrepancy stems from confounding variables and show how Fitts's law and related research methods can be applied to isolate and identify relevant factors of motor performance in 3D manipulation tasks. The results of a formal user study (n=21) demonstrate competitive performance in compliance with Fitts's model and provide empirical evidence that simultaneous 3D rotation and translation can be beneficial.", "abstracts": [ { "abstractType": "Regular", "content": "Fitts's law facilitates approximate comparisons of target acquisition performance across a variety of settings. Conceptually, also the index of difficulty of 3D object manipulation with six degrees of freedom can be computed, which allows the comparison of results from different studies. Prior experiments, however, often revealed much worse performance than one would reasonably expect on this basis. We argue that this discrepancy stems from confounding variables and show how Fitts's law and related research methods can be applied to isolate and identify relevant factors of motor performance in 3D manipulation tasks. The results of a formal user study (n=21) demonstrate competitive performance in compliance with Fitts's model and provide empirical evidence that simultaneous 3D rotation and translation can be beneficial.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Fitts's law facilitates approximate comparisons of target acquisition performance across a variety of settings. Conceptually, also the index of difficulty of 3D object manipulation with six degrees of freedom can be computed, which allows the comparison of results from different studies. Prior experiments, however, often revealed much worse performance than one would reasonably expect on this basis. We argue that this discrepancy stems from confounding variables and show how Fitts's law and related research methods can be applied to isolate and identify relevant factors of motor performance in 3D manipulation tasks. The results of a formal user study (n=21) demonstrate competitive performance in compliance with Fitts's model and provide empirical evidence that simultaneous 3D rotation and translation can be beneficial.", "title": "On Motor Performance in Virtual 3D Object Manipulation", "normalizedTitle": "On Motor Performance in Virtual 3D Object Manipulation", "fno": "08998368", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Object Recognition", "User Interfaces", "Virtualisation", "Motor Performance", "Virtual 3 D Object Manipulation", "Fittss Law", "Target Acquisition Performance", "Fittss Model", "Three Dimensional Displays", "Task Analysis", "Solid Modeling", "Mathematical Model", "Throughput", "Two Dimensional Displays", "Computational Modeling", "Fittss Law", "Throughput", "Motor Performance", "Aimed Movements", "3 D User Interfaces" ], "authors": [ { "givenName": "Alexander", "surname": "Kulik", "fullName": "Alexander Kulik", "affiliation": "Virtual Reality and Visualization ResearchBauhaus-Universität Weimar", "__typename": "ArticleAuthorType" }, { "givenName": "André", "surname": "Kunert", "fullName": "André Kunert", "affiliation": "Virtual Reality and Visualization ResearchBauhaus-Universität Weimar", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Froehlich", "fullName": "Bernd Froehlich", "affiliation": "Virtual Reality and Visualization ResearchBauhaus-Universität Weimar", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2041-2050", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733a494", "title": "3D Pose Regression Using Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a494/12OmNAlNiEz", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2012/1204/0/06184216", "title": "Poster: Manipulation techniques of 3D objects represented as multi-viewpoint images in a 3D scene", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184216/12OmNBvkdnk", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/t4e/2016/6115/0/6115a026", "title": "Geometry via Gestures: Learning 3D Geometry Using Gestures", "doi": null, "abstractUrl": "/proceedings-article/t4e/2016/6115a026/12OmNx5GTY6", "parentPublication": { "id": "proceedings/t4e/2016/6115/0", "title": "2016 IEEE Eighth International Conference on Technology for Education (T4E)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446445", "title": "A Framework for Virtual 3D Manipulation of Face in Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446445/13bd1AITnaH", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446317", "title": "Analysis of Proximity-Based Multimodal Feedback for 3D Selection in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446317/13bd1sx4Zta", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a937", "title": "3D Object Detection with Latent Support Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a937/17D45Xtvp98", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e410", "title": "Text and Image Guided 3D Avatar Generation and Manipulation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e410/1KxUFsh4ZdS", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c212", "title": "3D-RelNet: Joint Object and Relational Network for 3D Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c212/1hVl8eX0Hok", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a358", "title": "Workload, Presence and Task Performance of Virtual Object Manipulation on WebVR", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a358/1qpzAYILRRe", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09668999", "title": "Cross-Domain and Disentangled Face Manipulation With 3D Guidance", "doi": null, "abstractUrl": "/journal/tg/2023/04/09668999/1zTfZzq1wqY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998378", "articleId": "1hpPCL9mirK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08999805", "articleId": "1hpPCtKIAaA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPCtKIAaA", "doi": "10.1109/TVCG.2020.2973496", "abstract": "Aiming at realizing novel vision augmentation experiences, this paper proposes the IlluminatedFocus technique, which spatially defocuses real-world appearances regardless of the distance from the user's eyes to observed real objects. With the proposed technique, a part of a real object in an image appears blurred, while the fine details of the other part at the same distance remain visible. We apply Electrically Focus-Tunable Lenses (ETL) as eyeglasses and a synchronized high-speed projector as illumination for a real scene. We periodically modulate the focal lengths of the glasses (focal sweep) at more than 60 Hz so that a wearer cannot perceive the modulation. A part of the scene to appear focused is illuminated by the projector when it is in focus of the user's eyes, while another part to appear blurred is illuminated when it is out of the focus. As the basis of our spatial focus control, we build mathematical models to predict the range of distance from the ETL within which real objects become blurred on the retina of a user. Based on the blur range, we discuss a design guideline for effective illumination timing and focal sweep range. We also model the apparent size of a real scene altered by the focal length modulation. This leads to an undesirable visible seam between focused and blurred areas. We solve this unique problem by gradually blending the two areas. Finally, we demonstrate the feasibility of our proposal by implementing various vision augmentation applications.", "abstracts": [ { "abstractType": "Regular", "content": "Aiming at realizing novel vision augmentation experiences, this paper proposes the IlluminatedFocus technique, which spatially defocuses real-world appearances regardless of the distance from the user's eyes to observed real objects. With the proposed technique, a part of a real object in an image appears blurred, while the fine details of the other part at the same distance remain visible. We apply Electrically Focus-Tunable Lenses (ETL) as eyeglasses and a synchronized high-speed projector as illumination for a real scene. We periodically modulate the focal lengths of the glasses (focal sweep) at more than 60 Hz so that a wearer cannot perceive the modulation. A part of the scene to appear focused is illuminated by the projector when it is in focus of the user's eyes, while another part to appear blurred is illuminated when it is out of the focus. As the basis of our spatial focus control, we build mathematical models to predict the range of distance from the ETL within which real objects become blurred on the retina of a user. Based on the blur range, we discuss a design guideline for effective illumination timing and focal sweep range. We also model the apparent size of a real scene altered by the focal length modulation. This leads to an undesirable visible seam between focused and blurred areas. We solve this unique problem by gradually blending the two areas. Finally, we demonstrate the feasibility of our proposal by implementing various vision augmentation applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Aiming at realizing novel vision augmentation experiences, this paper proposes the IlluminatedFocus technique, which spatially defocuses real-world appearances regardless of the distance from the user's eyes to observed real objects. With the proposed technique, a part of a real object in an image appears blurred, while the fine details of the other part at the same distance remain visible. We apply Electrically Focus-Tunable Lenses (ETL) as eyeglasses and a synchronized high-speed projector as illumination for a real scene. We periodically modulate the focal lengths of the glasses (focal sweep) at more than 60 Hz so that a wearer cannot perceive the modulation. A part of the scene to appear focused is illuminated by the projector when it is in focus of the user's eyes, while another part to appear blurred is illuminated when it is out of the focus. As the basis of our spatial focus control, we build mathematical models to predict the range of distance from the ETL within which real objects become blurred on the retina of a user. Based on the blur range, we discuss a design guideline for effective illumination timing and focal sweep range. We also model the apparent size of a real scene altered by the focal length modulation. This leads to an undesirable visible seam between focused and blurred areas. We solve this unique problem by gradually blending the two areas. Finally, we demonstrate the feasibility of our proposal by implementing various vision augmentation applications.", "title": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector", "normalizedTitle": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector", "fno": "08999805", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Vision", "High Speed Optical Techniques", "Image Processing", "Lenses", "Optical Design Techniques", "Optical Focusing", "Optical Glass", "Optical Projectors", "Blurred Areas", "Illuminated Focus Technique", "Electrically Focus Tunable Lenses", "Vision Augmentation Applications", "Focal Length Modulation", "Effective Illumination Timing", "Blur Range", "Spatial Focus Control", "Vision Augmentation Experiences", "High Speed Projector", "Focal Sweep Eyeglasses", "Spatial Defocusing", "Lenses", "Visualization", "Modulation", "Optical Imaging", "High Speed Optical Techniques", "Lighting", "Mathematical Model", "Vision Augmentation", "Spatial Defocusing", "Depth Of Field", "Focal Sweep", "High Speed Projection", "Spatial Augmented Reality" ], "authors": [ { "givenName": "Tatsuyuki", "surname": "Ueda", "fullName": "Tatsuyuki Ueda", "affiliation": "Osaka University", "__typename": "ArticleAuthorType" }, { "givenName": "Daisuke", "surname": "Iwai", "fullName": "Daisuke Iwai", "affiliation": "Osaka University, JST, PRESTO", "__typename": "ArticleAuthorType" }, { "givenName": "Takefumi", "surname": "Hiraki", "fullName": "Takefumi Hiraki", "affiliation": "Osaka University", "__typename": "ArticleAuthorType" }, { "givenName": "Kosuke", "surname": "Sato", "fullName": "Kosuke Sato", "affiliation": "Osaka University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2051-2061", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccp/2018/2526/0/08368469", "title": "Focal sweep imaging with multi-focal diffractive optics", "doi": null, "abstractUrl": "/proceedings-article/iccp/2018/08368469/12OmNBV9Ii2", "parentPublication": { "id": "proceedings/iccp/2018/2526/0", "title": "2018 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a966", "title": "Focal Track: Depth and Accommodation with Oscillating Lens Deformation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a966/12OmNBrlPxE", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a202", "title": "Non-frontal Camera Calibration Using Focal Stack Imagery", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a202/12OmNC943xl", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2010/7023/0/05585101", "title": "Spectral Focal Sweep: Extended depth of field from chromatic aberrations", "doi": null, "abstractUrl": "/proceedings-article/iccp/2010/05585101/12OmNrYCXIW", "parentPublication": { "id": "proceedings/iccp/2010/7023/0", "title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d505", "title": "Extended Depth of Field Catadioptric Imaging Using Focal Sweep", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d505/12OmNxEjXTz", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528302", "title": "Focal sweep videography with deformable optics", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528302/12OmNxUMHo6", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2014/5188/0/06831818", "title": "Which side of the focal plane are you on?", "doi": null, "abstractUrl": "/proceedings-article/iccp/2014/06831818/12OmNzDNtsg", "parentPublication": { "id": "proceedings/iccp/2014/5188/0", "title": "2014 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460700", "title": "Removal of dust artifacts in focal stack image sequences", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460700/12OmNzZWbH0", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07014259", "title": "Extended Depth-of-Field Projector by Fast Focal Sweep Projection", "doi": null, "abstractUrl": "/journal/tg/2015/04/07014259/13rRUxAASVV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08868217", "title": "Computational Phase-Modulated Eyeglasses", "doi": null, "abstractUrl": "/journal/tg/2021/03/08868217/1e7BZyDZnvq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998368", "articleId": "1hrXfCmEWHe", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998305", "articleId": "1hpPBuW1ahy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfuHxEYM0", "name": "ttg202005-08999805s1-supp1-2973496.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08999805s1-supp1-2973496.mp4", "extension": "mp4", "size": "68.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPBuW1ahy", "doi": "10.1109/TVCG.2020.2973077", "abstract": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given &#x201C;optimal&#x201D; SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a &#x201C;minimal&#x201D; SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.", "abstracts": [ { "abstractType": "Regular", "content": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given &#x201C;optimal&#x201D; SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a &#x201C;minimal&#x201D; SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In Virtual Reality, a number of studies have been conducted to assess the influence of avatar appearance, avatar control and user point of view on the Sense of Embodiment (SoE) towards a virtual avatar. However, such studies tend to explore each factor in isolation. This paper aims to better understand the inter-relations among these three factors by conducting a subjective matching experiment. In the presented experiment (n=40), participants had to match a given “optimal” SoE avatar configuration (realistic avatar, full-body motion capture, first-person point of view), starting by a “minimal” SoE configuration (minimal avatar, no control, third-person point of view), by iteratively increasing the level of each factor. The choices of the participants provide insights about their preferences and perception over the three factors considered. Moreover, the subjective matching procedure was conducted in the context of four different interaction tasks with the goal of covering a wide range of actions an avatar can do in a VE. The paper also describes a baseline experiment (n=20) which was used to define the number and order of the different levels for each factor, prior to the subjective matching experiment (e.g. different degrees of realism ranging from abstract to personalised avatars for the visual appearance). The results of the subjective matching experiment show that point of view and control levels were consistently increased by users before appearance levels when it comes to enhancing the SoE. Second, several configurations were identified with equivalent SoE as the one felt in the optimal configuration, but vary between the tasks. Taken together, our results provide valuable insights about which factors to prioritize in order to enhance the SoE towards an avatar in different tasks, and about configurations which lead to fulfilling SoE in VE.", "title": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View", "normalizedTitle": "Avatar and Sense of Embodiment: Studying the Relative Preference Between Appearance, Control and Point of View", "fno": "08998305", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Virtual Reality", "Visual Appearance", "Subjective Matching Experiment", "Control Levels", "Appearance Levels", "Equivalent So E", "Optimal Configuration", "Relative Preference", "Virtual Reality", "Avatar Appearance", "Avatar Control", "User Point", "Virtual Avatar", "Realistic Avatar", "Minimal So E Configuration", "Minimal Avatar", "Subjective Matching Procedure", "Interaction Tasks", "Baseline Experiment", "Personalised Avatars", "Optimal So E Avatar Configuration", "Avatars", "Task Analysis", "Animation", "Visualization", "Three Dimensional Displays", "Legged Locomotion", "Avatar", "Sense Of Embodiment", "Immersive Virtual Reality", "Psychophysics", "Subjective Matching Technique" ], "authors": [ { "givenName": "Rebecca", "surname": "Fribourg", "fullName": "Rebecca Fribourg", "affiliation": "Inria, Univ Rennes, CNRS, IRISA, France", "__typename": "ArticleAuthorType" }, { "givenName": "Ferran", "surname": "Argelaguet", "fullName": "Ferran Argelaguet", "affiliation": "Inria, Univ Rennes, CNRS, IRISA, France", "__typename": "ArticleAuthorType" }, { "givenName": "Anatole", "surname": "Lécuyer", "fullName": "Anatole Lécuyer", "affiliation": "Inria, Univ Rennes, CNRS, IRISA, France", "__typename": "ArticleAuthorType" }, { "givenName": "Ludovic", "surname": "Hoyet", "fullName": "Ludovic Hoyet", "affiliation": "Inria, Univ Rennes, CNRS, IRISA, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2062-2072", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223406", "title": "Self-characterstics and sound in immersive virtual reality — Estimating avatar weight from footstep sounds", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223406/12OmNAlvHUH", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448293", "title": "Studying the Sense of Embodiment in VR Shared Experiences", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448293/13bd1AIBM1S", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040591", "title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09911682", "title": "Effect of Vibrations on Impression of Walking and Embodiment With First- and Third-Person Avatar", "doi": null, "abstractUrl": "/journal/tg/5555/01/09911682/1HeiWQWKlTG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a503", "title": "Studying &#x201C;Avatar Transitions&#x201D; in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a083", "title": "I&#x0027;m Transforming&#x0021; Effects of Visual Transitions to Change of Avatar on the Sense of Embodiment in AR", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a083/1MNgRmjl6Zq", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798263", "title": "EEG Can Be Used to Measure Embodiment When Controlling a Walking Self-Avatar", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798263/1cJ1gj5NtQc", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089654", "title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090634", "title": "Rhythmic proprioceptive stimulation improves embodiment in a walking avatar when added to visual stimulation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090634/1jIxkrgIlEY", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a452", "title": "Studying the Inter-Relation Between Locomotion Techniques and Embodiment in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a452/1pysvNRUnD2", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08999805", "articleId": "1hpPCtKIAaA", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998379", "articleId": "1hrXhy1IFpu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfNeDRTKU", "name": "ttg202005-08998305s1-supp1-2973077.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998305s1-supp1-2973077.mp4", "extension": "mp4", "size": "15.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXhy1IFpu", "doi": "10.1109/TVCG.2020.2973063", "abstract": "The core idea in an XR (VR/MR/AR) application is to digitally stimulate one or more sensory systems (e.g. visual, auditory, olfactory) of the human user in an interactive way to achieve an immersive experience. Since the early 2000s biologists have been using Virtual Environments (VE) to investigate the mechanisms of behavior in non-human animals including insects, fish, and mammals. VEs have become reliable tools for studying vision, cognition, and sensory-motor control in animals. In turn, the knowledge gained from studying such behaviors can be harnessed by researchers designing biologically inspired robots, smart sensors, and rnulti-agent artificial intelligence. VE for animals is becoming a widely used application of XR technology but such applications have not previously been reported in the technical literature related to XR. Biologists and computer scientists can benefit greatly from deepening interdisciplinary research in this emerging field and together we can develop new methods for conducting fundamental research in behavioral sciences and engineering. To support our argument we present this review which provides an overview of animal behavior experiments conducted in virtual environments.", "abstracts": [ { "abstractType": "Regular", "content": "The core idea in an XR (VR/MR/AR) application is to digitally stimulate one or more sensory systems (e.g. visual, auditory, olfactory) of the human user in an interactive way to achieve an immersive experience. Since the early 2000s biologists have been using Virtual Environments (VE) to investigate the mechanisms of behavior in non-human animals including insects, fish, and mammals. VEs have become reliable tools for studying vision, cognition, and sensory-motor control in animals. In turn, the knowledge gained from studying such behaviors can be harnessed by researchers designing biologically inspired robots, smart sensors, and rnulti-agent artificial intelligence. VE for animals is becoming a widely used application of XR technology but such applications have not previously been reported in the technical literature related to XR. Biologists and computer scientists can benefit greatly from deepening interdisciplinary research in this emerging field and together we can develop new methods for conducting fundamental research in behavioral sciences and engineering. To support our argument we present this review which provides an overview of animal behavior experiments conducted in virtual environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The core idea in an XR (VR/MR/AR) application is to digitally stimulate one or more sensory systems (e.g. visual, auditory, olfactory) of the human user in an interactive way to achieve an immersive experience. Since the early 2000s biologists have been using Virtual Environments (VE) to investigate the mechanisms of behavior in non-human animals including insects, fish, and mammals. VEs have become reliable tools for studying vision, cognition, and sensory-motor control in animals. In turn, the knowledge gained from studying such behaviors can be harnessed by researchers designing biologically inspired robots, smart sensors, and rnulti-agent artificial intelligence. VE for animals is becoming a widely used application of XR technology but such applications have not previously been reported in the technical literature related to XR. Biologists and computer scientists can benefit greatly from deepening interdisciplinary research in this emerging field and together we can develop new methods for conducting fundamental research in behavioral sciences and engineering. To support our argument we present this review which provides an overview of animal behavior experiments conducted in virtual environments.", "title": "Animals in Virtual Environments", "normalizedTitle": "Animals in Virtual Environments", "fno": "08998379", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biology Computing", "Virtual Reality", "Zoology", "XR Application", "Sensory Systems", "Immersive Experience", "Virtual Environments", "Nonhuman Animals", "Sensory Motor Control", "XR Technology", "Behavioral Sciences", "Animal Behavior Experiments", "Virtual Environments", "Visualization", "Animal Behavior", "Robot Sensing Systems", "Insects", "Animal Behavior", "Computer Vision", "Neuroscience", "Interactive Experiments", "Evolution", "Ecology", "Ethology" ], "authors": [ { "givenName": "Hemal", "surname": "Naik", "fullName": "Hemal Naik", "affiliation": "Department of BiologyMax Planck Institute of Animal Behavior, Centre for the Advanced Study of Collective Behaviour, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Renaud", "surname": "Bastien", "fullName": "Renaud Bastien", "affiliation": "Department of BiologyMax Planck Institute of Animal Behavior, Centre for the Advanced Study of Collective Behaviour, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Nassir", "surname": "Navab", "fullName": "Nassir Navab", "affiliation": "Technische Universität München", "__typename": "ArticleAuthorType" }, { "givenName": "Iain D", "surname": "Couzin", "fullName": "Iain D Couzin", "affiliation": "Department of BiologyMax Planck Institute of Animal Behavior, Centre for the Advanced Study of Collective Behaviour, University of Konstanz", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2073-2083", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2003/1882/0/18820141", "title": "Effect of Latency on Presence in Stressful Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2003/18820141/12OmNAFnCwJ", "parentPublication": { "id": "proceedings/vr/2003/1882/0", "title": "Proceedings IEEE Virtual Reality 2003", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1993/1363/0/00380793", "title": "Presence in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380793/12OmNAPSMme", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/3/01334454", "title": "Pattern perception in animals remote from man", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/01334454/12OmNBpEeZ6", "parentPublication": { "id": "proceedings/icpr/2004/2128/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/searis/2012/1249/0/06231171", "title": "Configurable semi-autonomic animated animal characters in interactive virtual reality applications", "doi": null, "abstractUrl": "/proceedings-article/searis/2012/06231171/12OmNvnOww6", "parentPublication": { "id": "proceedings/searis/2012/1249/0", "title": "2012 5th Workshop on Software Engineering and Architectures for Realtime Interactive Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a181", "title": "A Video Tracking System for Limb Motion Measurement in Small Animals", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a181/12OmNyOq4Zj", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444791", "title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444791/12OmNyoAA64", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isic/1988/2012/0/00065421", "title": "Multisensor integration in biological systems", "doi": null, "abstractUrl": "/proceedings-article/isic/1988/00065421/12OmNzIUfQB", "parentPublication": { "id": "proceedings/isic/1988/2012/0", "title": "Proceedings 1988 IEEE International Symposium on Intelligent Control", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2019/2605/0/08944350", "title": "Bringing the Field into the Lab: Large-Scale Visualization of Animal Movement Trajectories within a Virtual Island", "doi": null, "abstractUrl": "/proceedings-article/ldav/2019/08944350/1grOFwuFq7e", "parentPublication": { "id": "proceedings/ldav/2019/2605/0", "title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev-&-icivpr/2020/9331/0/09306528", "title": "Virtual and Augmented Reality Animals in Smart and Playful Cities: (Invited Paper)", "doi": null, "abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306528/1qcie8QjZIY", "parentPublication": { "id": "proceedings/iciev-&-icivpr/2020/9331/0", "title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a014", "title": "Collective Intelligence of Autonomous Animals in VR Hunting", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a014/1tnWCWkHcQg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998305", "articleId": "1hpPBuW1ahy", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998298", "articleId": "1hrXce2Kmhq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hrXce2Kmhq", "doi": "10.1109/TVCG.2020.2973441", "abstract": "Virtual Reality (VR) has a great potential to improve skills of Deaf and Hard-of-Hearing (DHH) people. Most VR applications and devices are designed for persons without hearing problems. Therefore, DHH persons have many limitations when using VR. Adding special features in a VR environment, such as subtitles, or haptic devices will help them. Previously, it was necessary to design a special VR environment for DHH persons. We introduce and evaluate a new prototype called &#x201C;EarVR&#x201D; that can be mounted on any desktop or mobile VR Head-Mounted Display (HMD). EarVR analyzes 3D sounds in a VR environment and locates the direction of the sound source that is closest to a user. It notifies the user about the sound direction using two vibro-motors placed on the user's ears. EarVR helps DHH persons to complete sound-based VR tasks in any VR application with 3D audio and a mute option for background music. Therefore, DHH persons can use all VR applications with 3D audio, not only those applications designed for them. Our user study shows that DHH participants were able to complete a simple VR task significantly faster with EarVR than without. The completion time of DHH participants was very close to participants without hearing problems. Also, it shows that DHH participants were able to finish a complex VR task with EarVR, while without it, they could not finish the task even once. Finally, our qualitative and quantitative evaluation among DHH participants indicates that they preferred to use EarVR and it encouraged them to use VR technology more.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality (VR) has a great potential to improve skills of Deaf and Hard-of-Hearing (DHH) people. Most VR applications and devices are designed for persons without hearing problems. Therefore, DHH persons have many limitations when using VR. Adding special features in a VR environment, such as subtitles, or haptic devices will help them. Previously, it was necessary to design a special VR environment for DHH persons. We introduce and evaluate a new prototype called &#x201C;EarVR&#x201D; that can be mounted on any desktop or mobile VR Head-Mounted Display (HMD). EarVR analyzes 3D sounds in a VR environment and locates the direction of the sound source that is closest to a user. It notifies the user about the sound direction using two vibro-motors placed on the user's ears. EarVR helps DHH persons to complete sound-based VR tasks in any VR application with 3D audio and a mute option for background music. Therefore, DHH persons can use all VR applications with 3D audio, not only those applications designed for them. Our user study shows that DHH participants were able to complete a simple VR task significantly faster with EarVR than without. The completion time of DHH participants was very close to participants without hearing problems. Also, it shows that DHH participants were able to finish a complex VR task with EarVR, while without it, they could not finish the task even once. Finally, our qualitative and quantitative evaluation among DHH participants indicates that they preferred to use EarVR and it encouraged them to use VR technology more.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality (VR) has a great potential to improve skills of Deaf and Hard-of-Hearing (DHH) people. Most VR applications and devices are designed for persons without hearing problems. Therefore, DHH persons have many limitations when using VR. Adding special features in a VR environment, such as subtitles, or haptic devices will help them. Previously, it was necessary to design a special VR environment for DHH persons. We introduce and evaluate a new prototype called “EarVR” that can be mounted on any desktop or mobile VR Head-Mounted Display (HMD). EarVR analyzes 3D sounds in a VR environment and locates the direction of the sound source that is closest to a user. It notifies the user about the sound direction using two vibro-motors placed on the user's ears. EarVR helps DHH persons to complete sound-based VR tasks in any VR application with 3D audio and a mute option for background music. Therefore, DHH persons can use all VR applications with 3D audio, not only those applications designed for them. Our user study shows that DHH participants were able to complete a simple VR task significantly faster with EarVR than without. The completion time of DHH participants was very close to participants without hearing problems. Also, it shows that DHH participants were able to finish a complex VR task with EarVR, while without it, they could not finish the task even once. Finally, our qualitative and quantitative evaluation among DHH participants indicates that they preferred to use EarVR and it encouraged them to use VR technology more.", "title": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People", "normalizedTitle": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People", "fno": "08998298", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Handicapped Aids", "Haptic Interfaces", "Helmet Mounted Displays", "Virtual Reality", "3 D Sounds", "3 D Audio", "Deaf And Hard Of Hearing People", "Head Mounted Display", "VR Application", "Ear VR", "VR Technology", "Haptic Devices", "DHH Persons", "Hearing Problems", "VR Application", "Virtual Reality", "Ear Haptics", "Haptic Interfaces", "Auditory System", "Task Analysis", "Ear", "Resists", "Hardware", "Three Dimensional Displays", "Virtual Reality", "Haptic", "Vibrotactile", "3 D Audio", "Sound Localization", "Deaf And Hard Of Hearing" ], "authors": [ { "givenName": "Mohammadreza", "surname": "Mirzaei", "fullName": "Mohammadreza Mirzaei", "affiliation": "Institute of Visual Computing and Human-Centered Technology, Vienna University of Technology, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Kán", "fullName": "Peter Kán", "affiliation": "Institute of Visual Computing and Human-Centered Technology, Vienna University of Technology, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Hannes", "surname": "Kaufmann", "fullName": "Hannes Kaufmann", "affiliation": "Institute of Visual Computing and Human-Centered Technology, Vienna University of Technology, Vienna, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2084-2093", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bibe/2015/7983/0/07367660", "title": "Inner ear boundary motion during bone conduction stimulation — Indications for inner ear compression and fluid inertia", "doi": null, "abstractUrl": "/proceedings-article/bibe/2015/07367660/12OmNqGRG9l", "parentPublication": { "id": "proceedings/bibe/2015/7983/0", "title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2017/1710/0/1710a570", "title": "HEAR?INFO: A Modern Mobile-Web Platform Addressed to Hard-of-Hearing Elderly Individuals", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a570/12OmNrJiCGD", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2015/8454/0/07344327", "title": "Enhancing the educational experience for deaf and hard of hearing students in software engineering", "doi": null, "abstractUrl": "/proceedings-article/fie/2015/07344327/12OmNzahcka", "parentPublication": { "id": "proceedings/fie/2015/8454/0", "title": "2015 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2018/9605/0/960500a221", "title": "Representing Sentiment Using Colors and Particles to Provide Accessibility for Deaf and Hard of Hearing Players", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2018/960500a221/17D45WaTkny", "parentPublication": { "id": "proceedings/sbgames/2018/9605/0", "title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2018/7568/0/08642632", "title": "Deep Learning-Based Hazardous Sound Classification for the Hard of Hearing and Deaf", "doi": null, "abstractUrl": "/proceedings-article/isspit/2018/08642632/17QjJcN2ztN", "parentPublication": { "id": "proceedings/isspit/2018/7568/0", "title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/services/2019/3851/0/385100a307", "title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data", "doi": null, "abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi", "parentPublication": { "id": "proceedings/services/2019/3851/2642-939X", "title": "2019 IEEE World Congress on Services (SERVICES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998401", "title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998401/1hrXgAAK6NW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a588", "title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a588/1tuAGAPl3Tq", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a582", "title": "Head Up Visualization of Spatial Sound Sources in Virtual Reality for Deaf and Hard-of-Hearing People", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a582/1tuAPlsZnMc", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2020/0497/0/049700a213", "title": "Design of Auxiliary Hearing Compensation System Based on Bluetooth for Deaf Children", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2020/049700a213/1vg7WQmVvCU", "parentPublication": { "id": "proceedings/icvrv/2020/0497/0", "title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998379", "articleId": "1hrXhy1IFpu", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998563", "articleId": "1hx2CTjPZII", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hx2CTjPZII", "doi": "10.1109/TVCG.2020.2973056", "abstract": "We propose and evaluate novel pseudo-haptic techniques to display mass and mass distribution for proxy-based object manipulation in virtual reality. These techniques are specifically designed to generate haptic effects during the object's rotation. They rely on manipulating the mapping between visual cues of motion and kinesthetic cues of force to generate a sense of heaviness, which alters the perception of the object's mass-related properties without changing the physical proxy. First we present a technique to display an object's mass by scaling its rotational motion relative to its mass. A psycho-physical experiment demonstrates that this technique effectively generates correct perceptions of relative mass between two virtual objects. We then present two pseudo-haptic techniques designed to display an object's mass distribution. One of them relies on manipulating the pivot point of rotation, while the other adjusts rotational motion based on the real-time dynamics of the moving object. An empirical study shows that both techniques can influence perception of mass distribution, with the second technique being significantly more effective.", "abstracts": [ { "abstractType": "Regular", "content": "We propose and evaluate novel pseudo-haptic techniques to display mass and mass distribution for proxy-based object manipulation in virtual reality. These techniques are specifically designed to generate haptic effects during the object's rotation. They rely on manipulating the mapping between visual cues of motion and kinesthetic cues of force to generate a sense of heaviness, which alters the perception of the object's mass-related properties without changing the physical proxy. First we present a technique to display an object's mass by scaling its rotational motion relative to its mass. A psycho-physical experiment demonstrates that this technique effectively generates correct perceptions of relative mass between two virtual objects. We then present two pseudo-haptic techniques designed to display an object's mass distribution. One of them relies on manipulating the pivot point of rotation, while the other adjusts rotational motion based on the real-time dynamics of the moving object. An empirical study shows that both techniques can influence perception of mass distribution, with the second technique being significantly more effective.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose and evaluate novel pseudo-haptic techniques to display mass and mass distribution for proxy-based object manipulation in virtual reality. These techniques are specifically designed to generate haptic effects during the object's rotation. They rely on manipulating the mapping between visual cues of motion and kinesthetic cues of force to generate a sense of heaviness, which alters the perception of the object's mass-related properties without changing the physical proxy. First we present a technique to display an object's mass by scaling its rotational motion relative to its mass. A psycho-physical experiment demonstrates that this technique effectively generates correct perceptions of relative mass between two virtual objects. We then present two pseudo-haptic techniques designed to display an object's mass distribution. One of them relies on manipulating the pivot point of rotation, while the other adjusts rotational motion based on the real-time dynamics of the moving object. An empirical study shows that both techniques can influence perception of mass distribution, with the second technique being significantly more effective.", "title": "Pseudo-Haptic Display of Mass and Mass Distribution During Object Rotation in Virtual Reality", "normalizedTitle": "Pseudo-Haptic Display of Mass and Mass Distribution During Object Rotation in Virtual Reality", "fno": "08998563", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Haptic Interfaces", "Virtual Reality", "Object Rotation", "Virtual Reality", "Novel Pseudohaptic Techniques", "Mass Distribution", "Proxy Based Object Manipulation", "Haptic Effects", "Mass Related Properties", "Relative Mass", "Virtual Objects", "Rotational Motion", "Moving Object", "Pseudohaptic Display", "Haptic Interfaces", "Visualization", "Force", "Dynamics", "Kinematics", "Virtual Reality", "Real Time Systems", "Virtual Reality", "Object Manipulation", "Object Rotation", "Human Perception", "Pseudo Haptics" ], "authors": [ { "givenName": "Run", "surname": "Yu", "fullName": "Run Yu", "affiliation": "Virginia Tech", "__typename": "ArticleAuthorType" }, { "givenName": "Doug A.", "surname": "Bowman", "fullName": "Doug A. Bowman", "affiliation": "Virginia Tech", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2094-2103", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iros/1995/7108/3/71083146", "title": "A constraint-based god-object method for haptic display", "doi": null, "abstractUrl": "/proceedings-article/iros/1995/71083146/12OmNBDQblW", "parentPublication": { "id": "proceedings/iros/1995/7108/3", "title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a157", "title": "Stable Dynamic Algorithm Based on Virtual Coupling for 6-DOF Haptic Rendering", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a157/12OmNqJ8tk6", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2009/3804/2/3804b156", "title": "Realization and Application of Mass-Spring Model in Haptic Rendering System for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/icicta/2009/3804b156/12OmNqJZgGC", "parentPublication": { "id": "proceedings/icicta/2009/3804/3", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890080", "title": "On the Display of Haptic Recordings for Cutting Biological Tissues", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890080/12OmNwHQB8p", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780233", "title": "Visuo-Haptic Display Using Head-Mounted Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2011/4602/0/4602a271", "title": "Haptic Rendering of Virtual Hand with Force Smoothing", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2011/4602a271/12OmNx3q6XD", "parentPublication": { "id": "proceedings/icvrv/2011/4602/0", "title": "2011 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223322", "title": "Perceiving mass in mixed reality through pseudo-haptic rendering of Newton's third law", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223322/12OmNx5Yvk2", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145155", "title": "Evaluation of Human Performance with Kinematic and Haptic Errors", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145155/12OmNxFJXRu", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446524", "title": "HangerOVER: Development of HMO-Embedded Haptic Display Using the Hanger Reflex and VR Application", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446524/13bd1fdV4l2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/03/07457685", "title": "Pseudo-Haptic Feedback in Teleoperation", "doi": null, "abstractUrl": "/journal/th/2016/03/07457685/13rRUyYjK5o", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998298", "articleId": "1hrXce2Kmhq", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998292", "articleId": "1hpPCy1gJoI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPCy1gJoI", "doi": "10.1109/TVCG.2020.2973476", "abstract": "In many professional domains, relevant processes are documented as abstract process models, such as event-driven process chains (EPCs). EPCs are traditionally visualized as 2D graphs and their size varies with the complexity of the process. While process modeling experts are used to interpreting complex 2D EPCs, in certain scenarios such as, for example, professional training or education, also novice users inexperienced in interpreting 2D EPC data are facing the challenge of learning and understanding complex process models. To communicate process knowledge in an effective yet motivating and interesting way, we propose a novel virtual reality (VR) interface for non-expert users. Our proposed system turns the exploration of arbitrarily complex EPCs into an interactive and multi-sensory VR experience. It automatically generates a virtual 3D environment from a process model and lets users explore processes through a combination of natural walking and teleportation. Our immersive interface leverages basic gamification in the form of a logical walkthrough mode to motivate users to interact with the virtual process. The generated user experience is entirely novel in the field of immersive data exploration and supported by a combination of visual, auditory, vibrotactile and passive haptic feedback. In a user study with Z_$\\mathrm{N}=27$_Z novice users, we evaluate the effect of our proposed system on process model understandability and user experience, while comparing it to a traditional 2D interface on a tablet device. The results indicate a tradeoff between efficiency and user interest as assessed by the UEQ novelty subscale, while no significant decrease in model understanding performance was found using the proposed VR interface. Our investigation highlights the potential of multi-sensory VR for less time-critical professional application domains, such as employee training, communication, education, and related scenarios focusing on user interest.", "abstracts": [ { "abstractType": "Regular", "content": "In many professional domains, relevant processes are documented as abstract process models, such as event-driven process chains (EPCs). EPCs are traditionally visualized as 2D graphs and their size varies with the complexity of the process. While process modeling experts are used to interpreting complex 2D EPCs, in certain scenarios such as, for example, professional training or education, also novice users inexperienced in interpreting 2D EPC data are facing the challenge of learning and understanding complex process models. To communicate process knowledge in an effective yet motivating and interesting way, we propose a novel virtual reality (VR) interface for non-expert users. Our proposed system turns the exploration of arbitrarily complex EPCs into an interactive and multi-sensory VR experience. It automatically generates a virtual 3D environment from a process model and lets users explore processes through a combination of natural walking and teleportation. Our immersive interface leverages basic gamification in the form of a logical walkthrough mode to motivate users to interact with the virtual process. The generated user experience is entirely novel in the field of immersive data exploration and supported by a combination of visual, auditory, vibrotactile and passive haptic feedback. In a user study with $\\mathrm{N}=27$ novice users, we evaluate the effect of our proposed system on process model understandability and user experience, while comparing it to a traditional 2D interface on a tablet device. The results indicate a tradeoff between efficiency and user interest as assessed by the UEQ novelty subscale, while no significant decrease in model understanding performance was found using the proposed VR interface. Our investigation highlights the potential of multi-sensory VR for less time-critical professional application domains, such as employee training, communication, education, and related scenarios focusing on user interest.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In many professional domains, relevant processes are documented as abstract process models, such as event-driven process chains (EPCs). EPCs are traditionally visualized as 2D graphs and their size varies with the complexity of the process. While process modeling experts are used to interpreting complex 2D EPCs, in certain scenarios such as, for example, professional training or education, also novice users inexperienced in interpreting 2D EPC data are facing the challenge of learning and understanding complex process models. To communicate process knowledge in an effective yet motivating and interesting way, we propose a novel virtual reality (VR) interface for non-expert users. Our proposed system turns the exploration of arbitrarily complex EPCs into an interactive and multi-sensory VR experience. It automatically generates a virtual 3D environment from a process model and lets users explore processes through a combination of natural walking and teleportation. Our immersive interface leverages basic gamification in the form of a logical walkthrough mode to motivate users to interact with the virtual process. The generated user experience is entirely novel in the field of immersive data exploration and supported by a combination of visual, auditory, vibrotactile and passive haptic feedback. In a user study with - novice users, we evaluate the effect of our proposed system on process model understandability and user experience, while comparing it to a traditional 2D interface on a tablet device. The results indicate a tradeoff between efficiency and user interest as assessed by the UEQ novelty subscale, while no significant decrease in model understanding performance was found using the proposed VR interface. Our investigation highlights the potential of multi-sensory VR for less time-critical professional application domains, such as employee training, communication, education, and related scenarios focusing on user interest.", "title": "Immersive Process Model Exploration in Virtual Reality", "normalizedTitle": "Immersive Process Model Exploration in Virtual Reality", "fno": "08998292", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Haptic Interfaces", "User Experience", "Virtual Reality", "User Experience", "2 D Interface", "VR Interface", "Multisensory VR", "Immersive Process Model Exploration", "Abstract Process Models", "Event Driven Process Chains", "Virtual Reality Interface", "EPC", "Virtual 3 D Environment", "Immersive Interface", "Immersive Data Exploration", "Visual Feedback", "Auditory Feedback", "Vibrotactile Feedback", "Passive Haptic Feedback", "2 D EPC Data", "Basic Gamification", "Haptic Interfaces", "Solid Modeling", "Two Dimensional Displays", "Data Models", "Three Dimensional Displays", "Virtual Reality", "Business", "Virtual Reality", "Multi Sensory Feedback", "Passive Haptics", "Immersion", "Business Process Models", "Immersive Data Analysis" ], "authors": [ { "givenName": "André", "surname": "Zenner", "fullName": "André Zenner", "affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Akhmajon", "surname": "Makhsadov", "fullName": "Akhmajon Makhsadov", "affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Sören", "surname": "Klingner", "fullName": "Sören Klingner", "affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Liebemann", "fullName": "David Liebemann", "affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Antonio", "surname": "Krüger", "fullName": "Antonio Krüger", "affiliation": "German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2104-2114", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a063", "title": "[POSTER] Reactive Displays for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a063/12OmNzaQoyU", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446550", "title": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446550/13bd1fph1xN", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a291", "title": "Virtual Reality Observations: Using Virtual Reality to Augment Lab-Based Shoulder Surfing Research", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a291/1CJcrwDUDgk", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089504", "title": "Touch the Wall: Comparison of Virtual and Augmented Reality with Conventional 2D Screen Eye-Hand Coordination Training Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089504/1jIxfvWzz6o", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a283", "title": "User Study on Virtual Reality for Design Reviews in Architecture", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a283/1pBMhwWNphC", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2020/6768/0/676800b267", "title": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation", "doi": null, "abstractUrl": "/proceedings-article/ase/2020/676800b267/1pP3IvL3Z6w", "parentPublication": { "id": "proceedings/ase/2020/6768/0", "title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a713", "title": "An In-Depth Exploration of the Effect of 2D/3D Views and Controller Types on First Person Shooter Games in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a713/1pysuzo9dLi", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2020/5382/0/09374344", "title": "Integrating 3D and 2D Views of Medical Image Data in Virtual Reality for Efficient Navigation", "doi": null, "abstractUrl": "/proceedings-article/ichi/2020/09374344/1rUJ2FLVdEk", "parentPublication": { "id": "proceedings/ichi/2020/5382/0", "title": "2020 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a131", "title": "Diegetic Tool Management in a Virtual Reality Training Simulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a131/1tuAgbFhYCQ", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2021/1865/0/186500a070", "title": "FPX-G: First Person Exploration for Graph", "doi": null, "abstractUrl": "/proceedings-article/mipr/2021/186500a070/1xPso0QWilO", "parentPublication": { "id": "proceedings/mipr/2021/1865/0", "title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998563", "articleId": "1hx2CTjPZII", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998361", "articleId": "1hpPBmpcsXm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgS2AUkow", "name": "ttg202005-08998292s1-supp2-2973476.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998292s1-supp2-2973476.mp4", "extension": "mp4", "size": "274 MB", "__typename": "WebExtraType" }, { "id": "1iEguOr2Ia4", "name": "ttg202005-08998292s1-supp1-2973476.png", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998292s1-supp1-2973476.png", "extension": "png", "size": "133 kB", "__typename": "WebExtraType" }, { "id": "1iEgBYI7gqI", "name": "ttg202005-08998292s1-supp4-2973476.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998292s1-supp4-2973476.pdf", "extension": "pdf", "size": "227 kB", "__typename": "WebExtraType" }, { "id": "1iEfNbjdV3W", "name": "ttg202005-08998292s1-supp3-2973476.png", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998292s1-supp3-2973476.png", "extension": "png", "size": "41.5 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPBmpcsXm", "doi": "10.1109/TVCG.2020.2973055", "abstract": "Immersive environments have been successfully applied to a broad range of safety training in high-risk domains. However, very little research has used these systems to evaluate the risk-taking behavior of construction workers. In this study, we investigated the feasibility and usefulness of providing passive haptics in a mixed-reality environment to capture the risk-taking behavior of workers, identify at-risk workers, and propose injury-prevention interventions to counteract excessive risk-taking and risk-compensatory behavior. Within a mixed-reality environment in a CAVE-like display system, our subjects installed shingles on a (physical) sloped roof of a (virtual) two-story residential building on a morning in a suburban area. Through this controlled, within-subject experimental design, we exposed each subject to three experimental conditions by manipulating the level of safety intervention. Workers' subjective reports, physiological signals, psychophysical responses, and reactionary behaviors were then considered as promising measures of Presence. The results showed that our mixed-reality environment was a suitable platform for triggering behavioral changes under different experimental conditions and for evaluating the risk perception and risk-taking behavior of workers in a risk-free setting. These results demonstrated the value of immersive technology to investigate natural human factors.", "abstracts": [ { "abstractType": "Regular", "content": "Immersive environments have been successfully applied to a broad range of safety training in high-risk domains. However, very little research has used these systems to evaluate the risk-taking behavior of construction workers. In this study, we investigated the feasibility and usefulness of providing passive haptics in a mixed-reality environment to capture the risk-taking behavior of workers, identify at-risk workers, and propose injury-prevention interventions to counteract excessive risk-taking and risk-compensatory behavior. Within a mixed-reality environment in a CAVE-like display system, our subjects installed shingles on a (physical) sloped roof of a (virtual) two-story residential building on a morning in a suburban area. Through this controlled, within-subject experimental design, we exposed each subject to three experimental conditions by manipulating the level of safety intervention. Workers' subjective reports, physiological signals, psychophysical responses, and reactionary behaviors were then considered as promising measures of Presence. The results showed that our mixed-reality environment was a suitable platform for triggering behavioral changes under different experimental conditions and for evaluating the risk perception and risk-taking behavior of workers in a risk-free setting. These results demonstrated the value of immersive technology to investigate natural human factors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Immersive environments have been successfully applied to a broad range of safety training in high-risk domains. However, very little research has used these systems to evaluate the risk-taking behavior of construction workers. In this study, we investigated the feasibility and usefulness of providing passive haptics in a mixed-reality environment to capture the risk-taking behavior of workers, identify at-risk workers, and propose injury-prevention interventions to counteract excessive risk-taking and risk-compensatory behavior. Within a mixed-reality environment in a CAVE-like display system, our subjects installed shingles on a (physical) sloped roof of a (virtual) two-story residential building on a morning in a suburban area. Through this controlled, within-subject experimental design, we exposed each subject to three experimental conditions by manipulating the level of safety intervention. Workers' subjective reports, physiological signals, psychophysical responses, and reactionary behaviors were then considered as promising measures of Presence. The results showed that our mixed-reality environment was a suitable platform for triggering behavioral changes under different experimental conditions and for evaluating the risk perception and risk-taking behavior of workers in a risk-free setting. These results demonstrated the value of immersive technology to investigate natural human factors.", "title": "Presence, Mixed Reality, and Risk-Taking Behavior: A Study in Safety Interventions", "normalizedTitle": "Presence, Mixed Reality, and Risk-Taking Behavior: A Study in Safety Interventions", "fno": "08998361", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Behavioural Sciences Computing", "Construction Industry", "Haptic Interfaces", "Human Factors", "Injuries", "Occupational Safety", "Risk Analysis", "Injury Prevention Intervention", "CAVE Like Display System", "Risk Taking Behavior", "Reactionary Behaviors", "At Risk Workers", "High Risk Domains", "Safety Intervention", "Risk Free Setting", "Risk Perception", "Behavioral Changes", "Mixed Reality Environment", "Safety", "Haptic Interfaces", "Training", "Virtual Environments", "Physiology", "Human Factors", "Mixed Reality", "Passive Haptics", "Presence", "Human Factors", "Risk Taking Behavior", "X 3 D", "Construction Safety" ], "authors": [ { "givenName": "Sogand", "surname": "Hasanzadeh", "fullName": "Sogand Hasanzadeh", "affiliation": "Department of Civil and Environmental EngineeringVirginia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Nicholas F.", "surname": "Polys", "fullName": "Nicholas F. Polys", "affiliation": "Department of Computer ScienceVirginia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Jesus M.", "surname": "de la Garza", "fullName": "Jesus M. de la Garza", "affiliation": "Department of Civil EngineeringClemson University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2115-2125", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isuvr/2017/3091/0/3091a038", "title": "Empathic Mixed Reality: Sharing What You Feel and Interacting with What You See", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2017/3091a038/12OmNBNM97G", "parentPublication": { "id": "proceedings/isuvr/2017/3091/0", "title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2016/5670/0/5670b851", "title": "Risk Taking in Online Crowdsourcing Tournaments", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670b851/12OmNCdk2HE", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802076", "title": "Social presence in mixed agency interactions", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802076/12OmNvrdI6z", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/passat-socialcom/2012/5638/0/06406356", "title": "Social and Emotional Turn Taking for Embodied Conversational Agents", "doi": null, "abstractUrl": "/proceedings-article/passat-socialcom/2012/06406356/12OmNx5pj2h", "parentPublication": { "id": "proceedings/passat-socialcom/2012/5638/0", "title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icm/2011/4522/2/4522b324", "title": "Behavioural Approaches to Safety Management in Underground Mines", "doi": null, "abstractUrl": "/proceedings-article/icm/2011/4522b324/12OmNzwHvsL", "parentPublication": { "id": "proceedings/icm/2011/4522/2", "title": "Information Technology, Computer Engineering and Management Sciences, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2021/3965/0/396500a046", "title": "Detection and Analysis of Interrupted Behaviors by Public Policy Interventions during COVID-19", "doi": null, "abstractUrl": "/proceedings-article/chase/2021/396500a046/1AIMIxRcJag", "parentPublication": { "id": "proceedings/chase/2021/3965/0", "title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icekim/2022/1666/0/166600a176", "title": "Effect Perception of Online and Offline Mixed Teaching among College Students during the Epidemic: A Study from the Perspective of Social Distancing", "doi": null, "abstractUrl": "/proceedings-article/icekim/2022/166600a176/1KpBZv1EgJG", "parentPublication": { "id": "proceedings/icekim/2022/1666/0", "title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a172", "title": "Virtual Reality for Training and Fitness Assessments for Construction Safety", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a172/1olHyhKIA0g", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a371", "title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360&#x00B0; VR Videos", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a371/1qpzCZXhpS0", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a180", "title": "Entering a new Dimension in Virtual Reality Research: An Overview of Existing Toolkits, their Features and Challenges", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a180/1yBF0L6Dd8k", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998292", "articleId": "1hpPCy1gJoI", "__typename": "AdjacentArticleType" }, "next": { "fno": "08999630", "articleId": "1hpPDGcaf9C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEgPJyuLwQ", "name": "ttg202005-08998361s1-supp1-2973055.wmv", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998361s1-supp1-2973055.wmv", "extension": "wmv", "size": "113 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPDGcaf9C", "doi": "10.1109/TVCG.2020.2973053", "abstract": "Emergent in the field of head mounted display design is a desire to leverage the limitations of the human visual system to reduce the computation, communication, and display workload in power and form-factor constrained systems. Fundamental to this reduced workload is the ability to match display resolution to the acuity of the human visual system, along with a resulting need to follow the gaze of the eye as it moves, a process referred to as foveation. A display that moves its content along with the eye may be called a Foveated Display, though this term is also commonly used to describe displays with non-uniform resolution that attempt to mimic human visual acuity. We therefore recommend a definition for the term Foveated Display that accepts both of these interpretations. Furthermore, we include a simplified model for human visual Acuity Distribution Functions (ADFs) at various levels of visual acuity, across wide fields of view and propose comparison of this ADF with the Resolution Distribution Function of a foveated display for evaluation of its resolution at a particular gaze direction. We also provide a taxonomy to allow the field to meaningfully compare and contrast various aspects of foveated displays in a display and optical technology-agnostic manner.", "abstracts": [ { "abstractType": "Regular", "content": "Emergent in the field of head mounted display design is a desire to leverage the limitations of the human visual system to reduce the computation, communication, and display workload in power and form-factor constrained systems. Fundamental to this reduced workload is the ability to match display resolution to the acuity of the human visual system, along with a resulting need to follow the gaze of the eye as it moves, a process referred to as foveation. A display that moves its content along with the eye may be called a Foveated Display, though this term is also commonly used to describe displays with non-uniform resolution that attempt to mimic human visual acuity. We therefore recommend a definition for the term Foveated Display that accepts both of these interpretations. Furthermore, we include a simplified model for human visual Acuity Distribution Functions (ADFs) at various levels of visual acuity, across wide fields of view and propose comparison of this ADF with the Resolution Distribution Function of a foveated display for evaluation of its resolution at a particular gaze direction. We also provide a taxonomy to allow the field to meaningfully compare and contrast various aspects of foveated displays in a display and optical technology-agnostic manner.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Emergent in the field of head mounted display design is a desire to leverage the limitations of the human visual system to reduce the computation, communication, and display workload in power and form-factor constrained systems. Fundamental to this reduced workload is the ability to match display resolution to the acuity of the human visual system, along with a resulting need to follow the gaze of the eye as it moves, a process referred to as foveation. A display that moves its content along with the eye may be called a Foveated Display, though this term is also commonly used to describe displays with non-uniform resolution that attempt to mimic human visual acuity. We therefore recommend a definition for the term Foveated Display that accepts both of these interpretations. Furthermore, we include a simplified model for human visual Acuity Distribution Functions (ADFs) at various levels of visual acuity, across wide fields of view and propose comparison of this ADF with the Resolution Distribution Function of a foveated display for evaluation of its resolution at a particular gaze direction. We also provide a taxonomy to allow the field to meaningfully compare and contrast various aspects of foveated displays in a display and optical technology-agnostic manner.", "title": "Toward Standardized Classification of Foveated Displays", "normalizedTitle": "Toward Standardized Classification of Foveated Displays", "fno": "08999630", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Gaze Tracking", "Helmet Mounted Displays", "User Interfaces", "Human Visual System", "Display Workload", "Form Factor Constrained Systems", "Display Resolution", "Human Visual Acuity Distribution Functions", "Display Design", "Foveated Displays", "Visualization", "Distribution Functions", "Resource Description Framework", "Optical Sensors", "Rendering Computer Graphics", "Visual Systems", "Spatial Resolution", "Head Mounted Displays", "Virtual Reality", "Augmented Reality", "Foveated Display" ], "authors": [ { "givenName": "Josef", "surname": "Spjut", "fullName": "Josef Spjut", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Ben", "surname": "Boudaoud", "fullName": "Ben Boudaoud", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Jonghyun", "surname": "Kim", "fullName": "Jonghyun Kim", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Trey", "surname": "Greer", "fullName": "Trey Greer", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Rachel", "surname": "Albert", "fullName": "Rachel Albert", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Stengel", "fullName": "Michael Stengel", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "Kaan", "surname": "Akşit", "fullName": "Kaan Akşit", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Luebke", "fullName": "David Luebke", "affiliation": "NVIDIA Corporation", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "2126-2134", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2019/05/08642529", "title": "Manufacturing Application-Driven Foveated Near-Eye Displays", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a756", "title": "Rectangular Mapping-based Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a756/1CJcj9wHjH2", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873853", "title": "Foveated Stochastic Lightcuts", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873853/1GjwMIuxYUE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a471", "title": "Locomotion-aware Foveated Rendering", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a471/1MNgzzb0RWg", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09007492", "title": "3D-Kernel Foveated Rendering for Light Fields", "doi": null, "abstractUrl": "/journal/tg/2021/08/09007492/1hJKlSvNgo8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09005240", "title": "Eye-dominance-guided Foveated Rendering", "doi": null, "abstractUrl": "/journal/tg/2020/05/09005240/1hzNcOce8OQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09224182", "title": "Foveated Encoding for Large High-Resolution Displays", "doi": null, "abstractUrl": "/journal/tg/2021/02/09224182/1nV7LZ69x7i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a001", "title": "Foveated Instant Radiosity", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523836", "title": "Foveated Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a413", "title": "Selective Foveated Ray Tracing for Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998361", "articleId": "1hpPBmpcsXm", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yoxK239w7C", "doi": "10.1109/TVCG.2021.3126478", "abstract": "We present a novel two-stage approach for automated floorplan design in residential buildings with a given exterior wall boundary. Our approach has the unique advantage of being human-centric, that is, the generated floorplans can be geometrically plausible, as well as topologically reasonable to enhance resident interaction with the environment. From the input boundary, we first synthesize a human-activity map that reflects both the spatial configuration and human-environment interaction in an architectural space. We propose to produce the human-activity map either automatically by a pre-trained generative adversarial network (GAN) model, or semi-automatically by synthesizing it with user manipulation of the furniture. Second, we feed the human-activity map into our deep framework <italic>ActFloor-GAN</italic> to guide a pixel-wise prediction of room types. We adopt a re-formulated cycle-consistency constraint in <italic>ActFloor-GAN</italic> to maximize the overall prediction performance, so that we can produce high-quality room layouts that are readily convertible to vectorized floorplans. Experimental results show several benefits of our approach. First, a quantitative comparison with prior methods shows superior performance of leveraging the human-activity map in predicting piecewise room types. Second, a subjective evaluation by architects shows that our results have compelling quality as professionally-designed floorplans and much better than those generated by existing methods in terms of the room layout topology. Last, our approach allows manipulating the furniture placement, considers the human activities in the environment, and enables the incorporation of user-design preferences.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel two-stage approach for automated floorplan design in residential buildings with a given exterior wall boundary. Our approach has the unique advantage of being human-centric, that is, the generated floorplans can be geometrically plausible, as well as topologically reasonable to enhance resident interaction with the environment. From the input boundary, we first synthesize a human-activity map that reflects both the spatial configuration and human-environment interaction in an architectural space. We propose to produce the human-activity map either automatically by a pre-trained generative adversarial network (GAN) model, or semi-automatically by synthesizing it with user manipulation of the furniture. Second, we feed the human-activity map into our deep framework <italic>ActFloor-GAN</italic> to guide a pixel-wise prediction of room types. We adopt a re-formulated cycle-consistency constraint in <italic>ActFloor-GAN</italic> to maximize the overall prediction performance, so that we can produce high-quality room layouts that are readily convertible to vectorized floorplans. Experimental results show several benefits of our approach. First, a quantitative comparison with prior methods shows superior performance of leveraging the human-activity map in predicting piecewise room types. Second, a subjective evaluation by architects shows that our results have compelling quality as professionally-designed floorplans and much better than those generated by existing methods in terms of the room layout topology. Last, our approach allows manipulating the furniture placement, considers the human activities in the environment, and enables the incorporation of user-design preferences.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel two-stage approach for automated floorplan design in residential buildings with a given exterior wall boundary. Our approach has the unique advantage of being human-centric, that is, the generated floorplans can be geometrically plausible, as well as topologically reasonable to enhance resident interaction with the environment. From the input boundary, we first synthesize a human-activity map that reflects both the spatial configuration and human-environment interaction in an architectural space. We propose to produce the human-activity map either automatically by a pre-trained generative adversarial network (GAN) model, or semi-automatically by synthesizing it with user manipulation of the furniture. Second, we feed the human-activity map into our deep framework ActFloor-GAN to guide a pixel-wise prediction of room types. We adopt a re-formulated cycle-consistency constraint in ActFloor-GAN to maximize the overall prediction performance, so that we can produce high-quality room layouts that are readily convertible to vectorized floorplans. Experimental results show several benefits of our approach. First, a quantitative comparison with prior methods shows superior performance of leveraging the human-activity map in predicting piecewise room types. Second, a subjective evaluation by architects shows that our results have compelling quality as professionally-designed floorplans and much better than those generated by existing methods in terms of the room layout topology. Last, our approach allows manipulating the furniture placement, considers the human activities in the environment, and enables the incorporation of user-design preferences.", "title": "ActFloor-GAN: Activity-Guided Adversarial Networks for Human-Centric Floorplan Design", "normalizedTitle": "ActFloor-GAN: Activity-Guided Adversarial Networks for Human-Centric Floorplan Design", "fno": "09609576", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Architectural CAD", "Architecture", "Buildings Structures", "Deep Learning Artificial Intelligence", "Furniture", "Structural Engineering Computing", "Walls", "Act Floor GAN", "Activity Guided Adversarial Networks", "Architectural Space", "Automated Floorplan Design", "Cycle Consistency Constraint", "Deep Learning", "Exterior Wall Boundary", "Furniture Placement", "Furniture User Manipulation", "High Quality Room Layouts", "Human Activity Map", "Human Centric Floorplan Design", "Human Environment Interaction", "Pixel Wise Prediction", "Pre Trained Generative Adversarial Network", "Professionally Designed Floorplans", "Resident Interaction", "Residential Buildings", "Room Layout Topology", "Room Types", "Spatial Configuration", "User Design Preferences", "Vectorized Floorplans", "Layout", "Generative Adversarial Networks", "Buildings", "Predictive Models", "Computer Architecture", "Topology", "Optimization", "Floorplan Design", "Room Layout", "Human Centric", "GAN" ], "authors": [ { "givenName": "Shidong", "surname": "Wang", "fullName": "Shidong Wang", "affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Zeng", "fullName": "Wei Zeng", "affiliation": "The Hong Kong University of Science and Technology, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xi", "surname": "Chen", "fullName": "Xi Chen", "affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu", "surname": "Ye", "fullName": "Yu Ye", "affiliation": "Tongji University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu", "surname": "Qiao", "fullName": "Yu Qiao", "affiliation": "Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chi-Wing", "surname": "Fu", "fullName": "Chi-Wing Fu", "affiliation": "Chinese University of Hong Kong, Hong Kong SAR, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1610-1624", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000j455", "title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859870", "title": "Feature-Guided Blind Face Restoration with GAN Prior", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859870/1G9EkarxxgQ", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600l1265", "title": "DO-GAN: A Double Oracle Framework for Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600l1265/1H1kORDwthu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09965611", "title": "BL-GAN: Semi-Supervised Bug Localization Via Generative Adversarial Network", "doi": null, "abstractUrl": "/journal/tk/5555/01/09965611/1IHMMPUiMnu", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1469", "title": "R&#x00B2;GAN: Cross-Modal Recipe Retrieval With Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1469/1gyriUTXkcM", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2020/9228/0/922800a923", "title": "VC-GAN: Classifying Vessel Types by Maritime Trajectories using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/ictai/2020/922800a923/1pP3td7Dhuw", "parentPublication": { "id": "proceedings/ictai/2020/9228/0", "title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09387601", "title": "SG-GAN: Adversarial Self-Attention GCN for Point Cloud Topological Parts Generation", "doi": null, "abstractUrl": "/journal/tg/2022/10/09387601/1smD5kvWVjy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428140", "title": "CI-GAN : Co-Clustering By Information Maximizing Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428140/1uim9HTyCSk", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2021/4121/0/412100a079", "title": "Ret-GAN: Retinal Image Enhancement using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cbms/2021/412100a079/1vb8Vz0ZV0A", "parentPublication": { "id": "proceedings/cbms/2021/4121/0", "title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900n3627", "title": "House-GAN++: Generative Adversarial Layout Refinement Network towards Intelligent Computational Agent for Professional Architects", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900n3627/1yeKfpa1XIQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09610985", "articleId": "1ypYfbK3U88", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1ypYfbK3U88", "doi": "10.1109/TVCG.2021.3127132", "abstract": "Recent advances in high-resolution microscopy have allowed scientists to better understand the underlying brain connectivity. However, due to the limitation that biological specimens can only be imaged at a single timepoint, studying changes to neural projections over time is limited to observations gathered using population analysis. In this article, we introduce <italic>NeuRegenerate</italic>, a novel end-to-end framework for the prediction and visualization of changes in neural fiber morphology within a subject across specified age-timepoints. To predict projections, we present <italic>neuReGANerator</italic>, a deep-learning network based on cycle-consistent generative adversarial network (GAN) that translates features of neuronal structures across age-timepoints for large brain microscopy volumes. We improve the reconstruction quality of the predicted neuronal structures by implementing a density multiplier and a new loss function, called the hallucination loss. Moreover, to alleviate artifacts that occur due to tiling of large input volumes, we introduce a spatial-consistency module in the training pipeline of neuReGANerator. Finally, to visualize the change in projections, predicted using neuReGANerator, NeuRegenerate offers two modes: (i) <italic>neuroCompare</italic> to simultaneously visualize the difference in the structures of the neuronal projections, from two age domains (using structural view and bounded view), and (ii) <italic>neuroMorph</italic>, a vesselness-based morphing technique to interactively visualize the transformation of the structures from one age-timepoint to the other. Our framework is designed specifically for volumes acquired using wide-field microscopy. We demonstrate our framework by visualizing the structural changes within the cholinergic system of the mouse brain between a young and old specimen.", "abstracts": [ { "abstractType": "Regular", "content": "Recent advances in high-resolution microscopy have allowed scientists to better understand the underlying brain connectivity. However, due to the limitation that biological specimens can only be imaged at a single timepoint, studying changes to neural projections over time is limited to observations gathered using population analysis. In this article, we introduce <italic>NeuRegenerate</italic>, a novel end-to-end framework for the prediction and visualization of changes in neural fiber morphology within a subject across specified age-timepoints. To predict projections, we present <italic>neuReGANerator</italic>, a deep-learning network based on cycle-consistent generative adversarial network (GAN) that translates features of neuronal structures across age-timepoints for large brain microscopy volumes. We improve the reconstruction quality of the predicted neuronal structures by implementing a density multiplier and a new loss function, called the hallucination loss. Moreover, to alleviate artifacts that occur due to tiling of large input volumes, we introduce a spatial-consistency module in the training pipeline of neuReGANerator. Finally, to visualize the change in projections, predicted using neuReGANerator, NeuRegenerate offers two modes: (i) <italic>neuroCompare</italic> to simultaneously visualize the difference in the structures of the neuronal projections, from two age domains (using structural view and bounded view), and (ii) <italic>neuroMorph</italic>, a vesselness-based morphing technique to interactively visualize the transformation of the structures from one age-timepoint to the other. Our framework is designed specifically for volumes acquired using wide-field microscopy. We demonstrate our framework by visualizing the structural changes within the cholinergic system of the mouse brain between a young and old specimen.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent advances in high-resolution microscopy have allowed scientists to better understand the underlying brain connectivity. However, due to the limitation that biological specimens can only be imaged at a single timepoint, studying changes to neural projections over time is limited to observations gathered using population analysis. In this article, we introduce NeuRegenerate, a novel end-to-end framework for the prediction and visualization of changes in neural fiber morphology within a subject across specified age-timepoints. To predict projections, we present neuReGANerator, a deep-learning network based on cycle-consistent generative adversarial network (GAN) that translates features of neuronal structures across age-timepoints for large brain microscopy volumes. We improve the reconstruction quality of the predicted neuronal structures by implementing a density multiplier and a new loss function, called the hallucination loss. Moreover, to alleviate artifacts that occur due to tiling of large input volumes, we introduce a spatial-consistency module in the training pipeline of neuReGANerator. Finally, to visualize the change in projections, predicted using neuReGANerator, NeuRegenerate offers two modes: (i) neuroCompare to simultaneously visualize the difference in the structures of the neuronal projections, from two age domains (using structural view and bounded view), and (ii) neuroMorph, a vesselness-based morphing technique to interactively visualize the transformation of the structures from one age-timepoint to the other. Our framework is designed specifically for volumes acquired using wide-field microscopy. We demonstrate our framework by visualizing the structural changes within the cholinergic system of the mouse brain between a young and old specimen.", "title": "NeuRegenerate: A Framework for Visualizing Neurodegeneration", "normalizedTitle": "NeuRegenerate: A Framework for Visualizing Neurodegeneration", "fno": "09610985", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biomedical Optical Imaging", "Brain", "Data Visualisation", "Deep Learning Artificial Intelligence", "Image Reconstruction", "Image Resolution", "Medical Image Processing", "Neurophysiology", "Optical Microscopy", "Age Domains", "Age Timepoint", "Biological Specimens", "Bounded View", "Brain Connectivity", "Brain Microscopy Volumes", "Cholinergic System", "Cycle Consistent Generative Adversarial Network", "Deep Learning Network", "Density Multiplier", "End To End Framework", "Hallucination Loss", "High Resolution Microscopy", "Input Volumes", "Loss Function", "Mouse Brain", "Neural Fiber Morphology", "Neural Projections", "Neu Re GA Nerator", "Neu Regenerate", "Neurodegeneration", "Neuro Morph", "Neuronal Projections", "Neuronal Structures", "Old Specimen", "Population Analysis", "Reconstruction Quality", "Single Timepoint", "Spatial Consistency Module", "Structural Changes", "Structural View", "Vesselness Based Morphing Technique", "Visualization", "Wide Field Microscopy", "Young Specimen", "Microscopy", "Data Visualization", "Image Reconstruction", "Visualization", "Neurites", "Training", "Three Dimensional Displays", "Neuron Visualization", "Volume Visualization", "Volume Transformation", "Wide Field Microscopy", "Machine Learning" ], "authors": [ { "givenName": "Saeed", "surname": "Boorboor", "fullName": "Saeed Boorboor", "affiliation": "Department of Computer Science, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Shawn", "surname": "Mathew", "fullName": "Shawn Mathew", "affiliation": "Department of Computer Science, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Mala", "surname": "Ananth", "fullName": "Mala Ananth", "affiliation": "National Institutes of Health, Bethesda, MD, USA", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Talmage", "fullName": "David Talmage", "affiliation": "National Institutes of Health, Bethesda, MD, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Lorna W.", "surname": "Role", "fullName": "Lorna W. Role", "affiliation": "National Institutes of Health, Bethesda, MD, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Arie E.", "surname": "Kaufman", "fullName": "Arie E. Kaufman", "affiliation": "Department of Computer Science, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1625-1637", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icip/1998/8821/2/882120706", "title": "Reconstruction problems in 3D for viral cryo electron microscopy", "doi": null, "abstractUrl": "/proceedings-article/icip/1998/882120706/12OmNBpEeWP", "parentPublication": { "id": "proceedings/icip/1998/8821/3", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2013/1309/0/06732714", "title": "A novel seeding method based on spatial sliding volume filter for neuron reconstruction", "doi": null, "abstractUrl": "/proceedings-article/bibm/2013/06732714/12OmNwF0BNI", "parentPublication": { "id": "proceedings/bibm/2013/1309/0", "title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2015/9721/0/9721a023", "title": "Wrinkle Image Registration for Serial Microscopy Sections", "doi": null, "abstractUrl": "/proceedings-article/sitis/2015/9721a023/12OmNzTYCaI", "parentPublication": { "id": "proceedings/sitis/2015/9721/0", "title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122802", "title": "A Lightweight Tangible 3D Interface for Interactive Visualization of Thin Fiber Structures", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122802/13rRUIJuxpA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875935", "title": "NeuroLines: A Subway Map Metaphor for Visualizing Nanoscale Neuronal Connectivity", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875935/13rRUIM2VBJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440805", "title": "Visualization of Neuronal Structures in Wide-Field Microscopy Brain Images", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440805/17D45WnnFUX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a379", "title": "Interactive Web-based 3D Viewer for Multidimensional Microscope Imaging Modalities", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a379/1KaH0uXY3i8", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09529035", "title": "NeuroConstruct: 3D Reconstruction and Visualization of Neurites in Optical Microscopy Brain Images", "doi": null, "abstractUrl": "/journal/tg/2022/12/09529035/1wB2yTuC5Q4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09555234", "title": "DXplorer: A Unified Visualization Framework for Interactive Dendritic Spine Analysis Using 3D Morphological Features", "doi": null, "abstractUrl": "/journal/tg/2023/02/09555234/1xjQYixG2Lm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900d750", "title": "RCNN-SliceNet: A Slice and Cluster Approach for Nuclei Centroid Detection in Three-Dimensional Fluorescence Microscopy Images", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900d750/1yJYilRygHS", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09609576", "articleId": "1yoxK239w7C", "__typename": "AdjacentArticleType" }, "next": { "fno": "09615008", "articleId": "1yyho082gEw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmySeOusYo", "name": "ttg202303-09610985s1-supp1-3127132.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09610985s1-supp1-3127132.mp4", "extension": "mp4", "size": "36.1 MB", "__typename": "WebExtraType" }, { "id": "1KmyS9XTge4", "name": "ttg202303-09610985s1-supp2-3127132.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09610985s1-supp2-3127132.pdf", "extension": "pdf", "size": "132 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yyho082gEw", "doi": "10.1109/TVCG.2021.3128157", "abstract": "Data visualizations have been increasingly used in oral presentations to communicate data patterns to the general public. Clear verbal introductions of visualizations to explain how to interpret the visually encoded information are essential to convey the takeaways and avoid misunderstandings. We contribute a series of studies to investigate how to effectively introduce visualizations to the audience with varying degrees of visualization literacy. We begin with understanding how people are introducing visualizations. We crowdsource 110 introductions of visualizations and categorize them based on their content and structures. From these crowdsourced introductions, we identify different introduction strategies and generate a set of introductions for evaluation. We conduct experiments to systematically compare the effectiveness of different introduction strategies across four visualizations with 1,080 participants. We find that introductions explaining visual encodings with concrete examples are the most effective. Our study provides both qualitative and quantitative insights into how to construct effective verbal introductions of visualizations in presentations, inspiring further research in data storytelling.", "abstracts": [ { "abstractType": "Regular", "content": "Data visualizations have been increasingly used in oral presentations to communicate data patterns to the general public. Clear verbal introductions of visualizations to explain how to interpret the visually encoded information are essential to convey the takeaways and avoid misunderstandings. We contribute a series of studies to investigate how to effectively introduce visualizations to the audience with varying degrees of visualization literacy. We begin with understanding how people are introducing visualizations. We crowdsource 110 introductions of visualizations and categorize them based on their content and structures. From these crowdsourced introductions, we identify different introduction strategies and generate a set of introductions for evaluation. We conduct experiments to systematically compare the effectiveness of different introduction strategies across four visualizations with 1,080 participants. We find that introductions explaining visual encodings with concrete examples are the most effective. Our study provides both qualitative and quantitative insights into how to construct effective verbal introductions of visualizations in presentations, inspiring further research in data storytelling.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Data visualizations have been increasingly used in oral presentations to communicate data patterns to the general public. Clear verbal introductions of visualizations to explain how to interpret the visually encoded information are essential to convey the takeaways and avoid misunderstandings. We contribute a series of studies to investigate how to effectively introduce visualizations to the audience with varying degrees of visualization literacy. We begin with understanding how people are introducing visualizations. We crowdsource 110 introductions of visualizations and categorize them based on their content and structures. From these crowdsourced introductions, we identify different introduction strategies and generate a set of introductions for evaluation. We conduct experiments to systematically compare the effectiveness of different introduction strategies across four visualizations with 1,080 participants. We find that introductions explaining visual encodings with concrete examples are the most effective. Our study provides both qualitative and quantitative insights into how to construct effective verbal introductions of visualizations in presentations, inspiring further research in data storytelling.", "title": "Explaining With Examples: Lessons Learned From Crowdsourced Introductory Description of Information Visualizations", "normalizedTitle": "Explaining With Examples: Lessons Learned From Crowdsourced Introductory Description of Information Visualizations", "fno": "09615008", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Literacy", "Crowdsourcing", "Data Visualisation", "Multimedia Computing", "Crowdsourced Introductory Description", "Data Storytelling", "Data Visualizations", "Information Visualizations", "Oral Presentations", "Verbal Introductions", "Visual Encodings", "Visualization Literacy", "Data Visualization", "Visualization", "Encoding", "Education", "Task Analysis", "Annotations", "Design Methodology", "Narrative Visualization", "Oral Presentation", "Introduction" ], "authors": [ { "givenName": "Leni", "surname": "Yang", "fullName": "Leni Yang", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Cindy", "surname": "Xiong", "fullName": "Cindy Xiong", "affiliation": "University of Massachusetts Amherst, Amherst, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jason K.", "surname": "Wong", "fullName": "Jason K. Wong", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Aoyu", "surname": "Wu", "fullName": "Aoyu Wu", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Huamin", "surname": "Qu", "fullName": "Huamin Qu", "affiliation": "Hong Kong University of Science and Technology, Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1638-1650", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2011/0868/0/06004064", "title": "Listening to Managers: A Study about Visualizations in Corporate Presentations", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004064/12OmNqBbHF8", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2014/3922/0/07044207", "title": "Interactive visualizations for teaching quantum mechanics and semiconductor physics", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07044207/12OmNxEBz3P", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536142", "title": "Exploring the Possibilities of Embedding Heterogeneous Data Attributes in Familiar Visualizations", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536142/13rRUEgarjx", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017606", "title": "Active Reading of Visualizations", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017606/13rRUyYSWl5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a160", "title": "Beyond Visuals: Examining the Experiences of Geoscience Professionals With Vision Disabilities in Accessing Data Visualizations", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a160/1J6hbizj1Xq", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waie/2022/6351/0/635100a001", "title": "Design and Implementation of a Teaching Verbal Behavior Analysis Aid in Instructional Videos", "doi": null, "abstractUrl": "/proceedings-article/waie/2022/635100a001/1KzzolbliEw", "parentPublication": { "id": "proceedings/waie/2022/6351/0", "title": "2022 4th International Workshop on Artificial Intelligence and Education (WAIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/05/08744242", "title": "Data2Vis: Automatic Generation of Data Visualizations Using Sequence-to-Sequence Recurrent Neural Networks", "doi": null, "abstractUrl": "/magazine/cg/2019/05/08744242/1cFV5domibu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809832", "title": "Searching the Visual Style and Structure of D3 Visualizations", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809832/1cHEgg8WeNW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933747", "title": "EasyPZ.js: Interaction Binding for Pan and Zoom Visualizations", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933747/1fTgFR19dTi", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/beliv/2020/9642/0/964200a019", "title": "How to evaluate data visualizations across different levels of understanding", "doi": null, "abstractUrl": "/proceedings-article/beliv/2020/964200a019/1q0FOQPpIic", "parentPublication": { "id": "proceedings/beliv/2020/9642/0", "title": "2020 IEEE Workshop on Evaluation and Beyond - Methodological Approaches to Visualization (BELIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09610985", "articleId": "1ypYfbK3U88", "__typename": "AdjacentArticleType" }, "next": { "fno": "09614998", "articleId": "1yyho7vk3cs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyWuhEmpa", "name": "ttg202303-09615008s1-tvcg-3128157-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09615008s1-tvcg-3128157-mm.zip", "extension": "zip", "size": "10.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yyho7vk3cs", "doi": "10.1109/TVCG.2021.3127918", "abstract": "We present a novel approach for volume exploration that is versatile yet effective in isolating semantic structures in both noisy and clean data. Specifically, we describe a hierarchical active contours approach based on Bhattacharyya gradient flow which is easier to control, robust to noise, and can incorporate various types of statistical information to drive an edge-agnostic exploration process. To facilitate a time-bound user-driven volume exploration process that is applicable to a wide variety of data sources, we present an efficient multi-GPU implementation that (1) is approximately 400 times faster than a single thread CPU implementation, (2) allows hierarchical exploration of 2D and 3D images, (3) supports customization through multidimensional attribute spaces, and (4) is applicable to a variety of data sources and semantic structures. The exploration system follows a 2-step process. It first applies active contours to isolate semantically meaningful subsets of the volume. It then applies transfer functions to the isolated regions locally to produce clear and clutter-free visualizations. We show the effectiveness of our approach in isolating and visualizing structures-of-interest without needing any specialized segmentation methods on a variety of data sources, including 3D optical microscopy, multi-channel optical volumes, abdominal and chest CT, micro-CT, MRI, simulation, and synthetic data. We also gathered feedback from a medical trainee regarding the usefulness of our approach and discussion on potential applications in clinical workflows.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel approach for volume exploration that is versatile yet effective in isolating semantic structures in both noisy and clean data. Specifically, we describe a hierarchical active contours approach based on Bhattacharyya gradient flow which is easier to control, robust to noise, and can incorporate various types of statistical information to drive an edge-agnostic exploration process. To facilitate a time-bound user-driven volume exploration process that is applicable to a wide variety of data sources, we present an efficient multi-GPU implementation that (1) is approximately 400 times faster than a single thread CPU implementation, (2) allows hierarchical exploration of 2D and 3D images, (3) supports customization through multidimensional attribute spaces, and (4) is applicable to a variety of data sources and semantic structures. The exploration system follows a 2-step process. It first applies active contours to isolate semantically meaningful subsets of the volume. It then applies transfer functions to the isolated regions locally to produce clear and clutter-free visualizations. We show the effectiveness of our approach in isolating and visualizing structures-of-interest without needing any specialized segmentation methods on a variety of data sources, including 3D optical microscopy, multi-channel optical volumes, abdominal and chest CT, micro-CT, MRI, simulation, and synthetic data. We also gathered feedback from a medical trainee regarding the usefulness of our approach and discussion on potential applications in clinical workflows.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel approach for volume exploration that is versatile yet effective in isolating semantic structures in both noisy and clean data. Specifically, we describe a hierarchical active contours approach based on Bhattacharyya gradient flow which is easier to control, robust to noise, and can incorporate various types of statistical information to drive an edge-agnostic exploration process. To facilitate a time-bound user-driven volume exploration process that is applicable to a wide variety of data sources, we present an efficient multi-GPU implementation that (1) is approximately 400 times faster than a single thread CPU implementation, (2) allows hierarchical exploration of 2D and 3D images, (3) supports customization through multidimensional attribute spaces, and (4) is applicable to a variety of data sources and semantic structures. The exploration system follows a 2-step process. It first applies active contours to isolate semantically meaningful subsets of the volume. It then applies transfer functions to the isolated regions locally to produce clear and clutter-free visualizations. We show the effectiveness of our approach in isolating and visualizing structures-of-interest without needing any specialized segmentation methods on a variety of data sources, including 3D optical microscopy, multi-channel optical volumes, abdominal and chest CT, micro-CT, MRI, simulation, and synthetic data. We also gathered feedback from a medical trainee regarding the usefulness of our approach and discussion on potential applications in clinical workflows.", "title": "Volume Exploration Using Multidimensional Bhattacharyya Flow", "normalizedTitle": "Volume Exploration Using Multidimensional Bhattacharyya Flow", "fno": "09614998", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Biomedical MRI", "Computerised Tomography", "Data Fusion", "Data Visualisation", "Graphics Processing Units", "Medical Image Processing", "Optical Microscopy", "Statistical Analysis", "Transfer Functions", "2 Step Process", "2 D Images", "3 D Images", "3 D Optical Microscopy", "Abdominal CT", "Bhattacharyya Gradient Flow", "Chest CT", "Clean Data", "Clutter Free Visualizations", "Computed Tomography", "Data Sources", "Edge Agnostic Exploration Process", "Hierarchical Active Contour Approach", "Hierarchical Exploration", "Isolated Regions", "Micro CT", "MRI", "Multichannel Optical Volumes", "Multidimensional Attribute Spaces", "Multidimensional Bhattacharyya Flow", "Multi GPU Implementation", "Noisy Data", "Semantic Structures", "Semantically Meaningful Subsets", "Single Thread CPU Implementation", "Statistical Information", "Synthetic Data", "Time Bound User Driven Volume Exploration Process", "Transfer Functions", "Active Contours", "Semantics", "Tools", "Three Dimensional Displays", "Histograms", "Biomedical Optical Imaging", "Optical Feedback", "Volume Exploration", "Geometric Active Contours", "Bhattacharyya Flow", "Hierarchical Decomposition", "Multi GPU" ], "authors": [ { "givenName": "Shreeraj", "surname": "Jadhav", "fullName": "Shreeraj Jadhav", "affiliation": "Computer Science Department, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Mahsa", "surname": "Torkaman", "fullName": "Mahsa Torkaman", "affiliation": "Department of Radiology & Biomedical Imaging, University of California San Francisco (UCSF), San Francisco, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Allen", "surname": "Tannenbaum", "fullName": "Allen Tannenbaum", "affiliation": "Computer Science and Applied Mathematics & Statistics Departments, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Saad", "surname": "Nadeem", "fullName": "Saad Nadeem", "affiliation": "Department of Medical Physics & Pathology, Memorial Sloan Kettering Cancer Center, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Arie E.", "surname": "Kaufman", "fullName": "Arie E. Kaufman", "affiliation": "Computer Science Department, Stony Brook University, Stony Brook, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1651-1663", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2010/4109/0/4109c788", "title": "Adaptive Diffusion Flow for Parametric Active Contours", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c788/12OmNBAqZGn", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1991/2245/0/00175772", "title": "Volume rendering of flow-visualization point data", "doi": null, "abstractUrl": "/proceedings-article/visual/1991/00175772/12OmNBaT62B", "parentPublication": { "id": "proceedings/visual/1991/2245/0", "title": "1991 Proceeding Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imvip/2011/0230/0/06167845", "title": "Optic Flow Providing External Force for Active Contours in Visually Tracking Dense Cell Population", "doi": null, "abstractUrl": "/proceedings-article/imvip/2011/06167845/12OmNC4eSyz", "parentPublication": { "id": "proceedings/imvip/2011/0230/0", "title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsip/2014/5100/0/5100a142", "title": "An Active Contour Method for MR Image Segmentation of Anterior Cruciate Ligament (ACL)", "doi": null, "abstractUrl": "/proceedings-article/icsip/2014/5100a142/12OmNwEJ0SD", "parentPublication": { "id": "proceedings/icsip/2014/5100/0", "title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdip/2009/3565/0/3565a112", "title": "A New Algorithm for Human Motion Capture via 3D Active Contours", "doi": null, "abstractUrl": "/proceedings-article/icdip/2009/3565a112/12OmNzgNXZU", "parentPublication": { "id": "proceedings/icdip/2009/3565/0", "title": "Digital Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/09/08412138", "title": "FeatureLego: Volume Exploration Using Exhaustive Clustering of Super-Voxels", "doi": null, "abstractUrl": "/journal/tg/2019/09/08412138/13rRUwInvl8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1996/02/v0144", "title": "Volumetric data exploration using interval volume", "doi": null, "abstractUrl": "/journal/tg/1996/02/v0144/13rRUxCitJ1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/09/06987271", "title": "Distribution Matching with the Bhattacharyya Similarity: A Bound Optimization Framework", "doi": null, "abstractUrl": "/journal/tp/2015/09/06987271/13rRUy3xY9i", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061683", "title": "Relation-Aware Volume Exploration Pipeline", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061683/13rRUygT7sw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a031", "title": "Revealable Volume Displays: 3D Exploration of Mixed-Reality Public Exhibitions", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a031/1tuArQOYe9q", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09615008", "articleId": "1yyho082gEw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09616433", "articleId": "1yA76RmrVtK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyWKYRrMs", "name": "ttg202303-09614998s1-supp1-3127918.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09614998s1-supp1-3127918.mp4", "extension": "mp4", "size": "84.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yA76RmrVtK", "doi": "10.1109/TVCG.2021.3128286", "abstract": "Virtual traffic benefits a variety of applications, including video games, traffic engineering, autonomous driving, and virtual reality. To date, traffic visualization via different simulation models can reconstruct detailed traffic flows. However, each specific behavior of vehicles is always described by establishing an independent control model. Moreover, mutual interactions between vehicles and other road users are rarely modeled in existing simulators. An all-in-one simulator that considers the complex behaviors of all potential road users in a realistic urban environment is urgently needed. In this work, we propose a novel, extensible, and microscopic method to build heterogeneous traffic simulation using the force-based concept. This force-based approach can accurately replicate the sophisticated behaviors of various road users and their interactions in a simple and unified manner. We calibrate the model parameters using real-world traffic trajectory data. The effectiveness of this approach is demonstrated through many simulation experiments, as well as comparisons to real-world traffic data and popular microscopic simulators for traffic animation.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual traffic benefits a variety of applications, including video games, traffic engineering, autonomous driving, and virtual reality. To date, traffic visualization via different simulation models can reconstruct detailed traffic flows. However, each specific behavior of vehicles is always described by establishing an independent control model. Moreover, mutual interactions between vehicles and other road users are rarely modeled in existing simulators. An all-in-one simulator that considers the complex behaviors of all potential road users in a realistic urban environment is urgently needed. In this work, we propose a novel, extensible, and microscopic method to build heterogeneous traffic simulation using the force-based concept. This force-based approach can accurately replicate the sophisticated behaviors of various road users and their interactions in a simple and unified manner. We calibrate the model parameters using real-world traffic trajectory data. The effectiveness of this approach is demonstrated through many simulation experiments, as well as comparisons to real-world traffic data and popular microscopic simulators for traffic animation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual traffic benefits a variety of applications, including video games, traffic engineering, autonomous driving, and virtual reality. To date, traffic visualization via different simulation models can reconstruct detailed traffic flows. However, each specific behavior of vehicles is always described by establishing an independent control model. Moreover, mutual interactions between vehicles and other road users are rarely modeled in existing simulators. An all-in-one simulator that considers the complex behaviors of all potential road users in a realistic urban environment is urgently needed. In this work, we propose a novel, extensible, and microscopic method to build heterogeneous traffic simulation using the force-based concept. This force-based approach can accurately replicate the sophisticated behaviors of various road users and their interactions in a simple and unified manner. We calibrate the model parameters using real-world traffic trajectory data. The effectiveness of this approach is demonstrated through many simulation experiments, as well as comparisons to real-world traffic data and popular microscopic simulators for traffic animation.", "title": "A Calibrated Force-Based Model for Mixed Traffic Simulation", "normalizedTitle": "A Calibrated Force-Based Model for Mixed Traffic Simulation", "fno": "09616433", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Data Visualisation", "Road Traffic", "Road Vehicles", "Telecommunication Traffic", "Traffic Engineering Computing", "Virtual Reality", "Autonomous Driving", "Calibrated Force Based Model", "Complex Behaviors", "Detailed Traffic Flows", "Different Simulation Models", "Existing Simulators", "Force Based Approach", "Force Based Concept", "Heterogeneous Traffic Simulation", "Independent Control Model", "Microscopic Method", "Mixed Traffic Simulation", "Model Parameters", "Mutual Interactions", "Popular Microscopic Simulators", "Potential Road Users", "Real World Traffic Data", "Real World Traffic Trajectory Data", "Realistic Urban Environment", "Simulation Experiments", "Sophisticated Behaviors", "Specific Behavior", "Traffic Animation", "Traffic Engineering", "Traffic Visualization", "Video Games", "Virtual Reality", "Virtual Traffic Benefits", "Computational Modeling", "Roads", "Force", "Solid Modeling", "Microscopy", "Data Models", "Trajectory", "Traffic Simulation", "Simulator", "Detailed Traffic Flow", "Heterogeneous", "Social Force" ], "authors": [ { "givenName": "Qianwen", "surname": "Chao", "fullName": "Qianwen Chao", "affiliation": "Department of Computer Science, Xidian University, Xi’an, Shaanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Pengfei", "surname": "Liu", "fullName": "Pengfei Liu", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yi", "surname": "Han", "fullName": "Yi Han", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingying", "surname": "Lin", "fullName": "Yingying Lin", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chaoneng", "surname": "Li", "fullName": "Chaoneng Li", "affiliation": "Department of Computer Science, Xidian University, Xi’an, Shaanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qiguang", "surname": "Miao", "fullName": "Qiguang Miao", "affiliation": "Department of Computer Science, Xidian University, Xi’an, Shaanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaogang", "surname": "Jin", "fullName": "Xiaogang Jin", "affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1664-1677", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/eurosim/2013/5073/0/5073a448", "title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation", "doi": null, "abstractUrl": "/proceedings-article/eurosim/2013/5073a448/12OmNBlXs5Y", "parentPublication": { "id": "proceedings/eurosim/2013/5073/0", "title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2014/6636/0/6636a879", "title": "Traffic Simulation Modeling and Analysis of BRT Based on Vissim", "doi": null, "abstractUrl": "/proceedings-article/icicta/2014/6636a879/12OmNs5rl1t", "parentPublication": { "id": "proceedings/icicta/2014/6636/0", "title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecbs-eerc/2015/7967/0/7967a111", "title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement", "doi": null, "abstractUrl": "/proceedings-article/ecbs-eerc/2015/7967a111/12OmNvSKNQI", "parentPublication": { "id": "proceedings/ecbs-eerc/2015/7967/0", "title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kse/2010/4213/0/4213a131", "title": "Simulation of Mixed Traffic Flow within Intersection", "doi": null, "abstractUrl": "/proceedings-article/kse/2010/4213a131/12OmNyXMQo0", "parentPublication": { "id": "proceedings/kse/2010/4213/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ds-rt/2018/5048/0/08601016", "title": "Exploring Execution Schemes for Agent-Based Traffic Simulation on Heterogeneous Hardware", "doi": null, "abstractUrl": "/proceedings-article/ds-rt/2018/08601016/17D45XdBRR1", "parentPublication": { "id": "proceedings/ds-rt/2018/5048/0", "title": "2018 IEEE/ACM 22nd International Symposium on Distributed Simulation and Real Time Applications (DS-RT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a679", "title": "A high performance approach with MATSim for traffic road simulation", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a679/1GU762UY6as", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2019/3363/0/336300a320", "title": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark", "doi": null, "abstractUrl": "/proceedings-article/mdm/2019/336300a320/1ckrPWX3OQ8", "parentPublication": { "id": "proceedings/mdm/2019/3363/0", "title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b104", "title": "RONIN: a SUMO Interoperable Mesoscopic Urban Traffic Simulator", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b104/1t7mQqrHmiQ", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b080", "title": "Analysis of Urban Traffic Incidents Through Road Network Features", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b080/1t7n2Rdahsk", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2020/9234/0/923400a045", "title": "A Controllable Spring Force Based Method for Fluid Surface Disturbance Details Simulation", "doi": null, "abstractUrl": "/proceedings-article/icdh/2020/923400a045/1uGXV1Qs8IU", "parentPublication": { "id": "proceedings/icdh/2020/9234/0", "title": "2020 8th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09614998", "articleId": "1yyho7vk3cs", "__typename": "AdjacentArticleType" }, "next": { "fno": "09619951", "articleId": "1yDftLVqexG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yDftLVqexG", "doi": "10.1109/TVCG.2021.3129156", "abstract": "Deep learning techniques have proven effective in many applications, but these implementations mostly apply to data in one or two dimensions. Handling 3D data is more challenging due to its irregularity and complexity, and there is a growing interest in adapting deep learning techniques to the 3D domain. A recent successful approach called MeshCNN consists of a set of convolutional and pooling operators applied to the edges of triangular meshes. While this approach produced superb results in classification and segmentation of 3D shapes, it can only be applied to edges of a mesh, which can constitute a disadvantage for applications where the focuses are other primitives of the mesh. In this study, we propose face-based and vertex-based operators for mesh convolutional networks. We design two novel architectures based on the MeshCNN network that can operate on faces and vertices of a mesh, respectively. We demonstrate that the proposed face-based architecture outperforms the original MeshCNN implementation in mesh classification and mesh segmentation, setting the new state of the art on benchmark datasets. In addition, we extend the vertex-based operator to fit in the Point2Mesh model for mesh reconstruction from clean, noisy, and incomplete point clouds. While no statistically significant performance improvements are observed, the model training and inference time are reduced by the proposed approach by 91&#x0025; and 20&#x0025;, respectively, as compared with the original Point2Mesh model.", "abstracts": [ { "abstractType": "Regular", "content": "Deep learning techniques have proven effective in many applications, but these implementations mostly apply to data in one or two dimensions. Handling 3D data is more challenging due to its irregularity and complexity, and there is a growing interest in adapting deep learning techniques to the 3D domain. A recent successful approach called MeshCNN consists of a set of convolutional and pooling operators applied to the edges of triangular meshes. While this approach produced superb results in classification and segmentation of 3D shapes, it can only be applied to edges of a mesh, which can constitute a disadvantage for applications where the focuses are other primitives of the mesh. In this study, we propose face-based and vertex-based operators for mesh convolutional networks. We design two novel architectures based on the MeshCNN network that can operate on faces and vertices of a mesh, respectively. We demonstrate that the proposed face-based architecture outperforms the original MeshCNN implementation in mesh classification and mesh segmentation, setting the new state of the art on benchmark datasets. In addition, we extend the vertex-based operator to fit in the Point2Mesh model for mesh reconstruction from clean, noisy, and incomplete point clouds. While no statistically significant performance improvements are observed, the model training and inference time are reduced by the proposed approach by 91&#x0025; and 20&#x0025;, respectively, as compared with the original Point2Mesh model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep learning techniques have proven effective in many applications, but these implementations mostly apply to data in one or two dimensions. Handling 3D data is more challenging due to its irregularity and complexity, and there is a growing interest in adapting deep learning techniques to the 3D domain. A recent successful approach called MeshCNN consists of a set of convolutional and pooling operators applied to the edges of triangular meshes. While this approach produced superb results in classification and segmentation of 3D shapes, it can only be applied to edges of a mesh, which can constitute a disadvantage for applications where the focuses are other primitives of the mesh. In this study, we propose face-based and vertex-based operators for mesh convolutional networks. We design two novel architectures based on the MeshCNN network that can operate on faces and vertices of a mesh, respectively. We demonstrate that the proposed face-based architecture outperforms the original MeshCNN implementation in mesh classification and mesh segmentation, setting the new state of the art on benchmark datasets. In addition, we extend the vertex-based operator to fit in the Point2Mesh model for mesh reconstruction from clean, noisy, and incomplete point clouds. While no statistically significant performance improvements are observed, the model training and inference time are reduced by the proposed approach by 91% and 20%, respectively, as compared with the original Point2Mesh model.", "title": "Mesh Convolutional Networks With Face and Vertex Feature Operators", "normalizedTitle": "Mesh Convolutional Networks With Face and Vertex Feature Operators", "fno": "09619951", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Convolution", "Face Recognition", "Feature Extraction", "Image Reconstruction", "Image Segmentation", "Learning Artificial Intelligence", "Mesh Generation", "Convolutional Operators", "Deep Learning Techniques", "Face Based Architecture", "Mesh Classification", "Mesh Convolutional Networks", "Mesh Reconstruction", "Mesh Segmentation", "Mesh CNN Network", "Original Mesh CNN Implementation", "Original Point 2 Mesh Model", "Pooling Operators", "Recent Successful Approach", "Triangular Meshes", "Vertex Feature Operators", "Vertex Based Operator", "Vertices", "Conferences", "Portable Document Format", "Indexes", "Typesetting", "Loading", "Web Sites", "Warranties", "Geometric Deep Learning", "Mesh", "Classification", "Segmentation", "Feature Selection" ], "authors": [ { "givenName": "Daniel", "surname": "Perez", "fullName": "Daniel Perez", "affiliation": "Department of Computational Modeling and Simulation Engineering, Old Dominion University, Norfolk, VA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yuzhong", "surname": "Shen", "fullName": "Yuzhong Shen", "affiliation": "Department of Computational Modeling and Simulation Engineering, Old Dominion University, Norfolk, VA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jiang", "surname": "Li", "fullName": "Jiang Li", "affiliation": "Department of Electrical and Computer Engineering, Old Dominion University, Norfolk, VA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1678-1690", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdma/2012/4772/0/4772a001", "title": "3D Model Retrieval Based on Projected Area at Mesh Vertex", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a001/12OmNs0TKLE", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336c971", "title": "Adaptive Mesh Simplification Using Vertex Clustering with Topology Preserving", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336c971/12OmNyRg4mT", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2023/02/09772368", "title": "Shrinking the Kernel Attack Surface Through Static and Dynamic Syscall Limitation", "doi": null, "abstractUrl": "/journal/sc/2023/02/09772368/1DgjvFwHct2", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2021/06/09397284", "title": "Plasticity-on-Chip Design: Exploiting Self-Similarity for Data Communications", "doi": null, "abstractUrl": "/journal/tc/2021/06/09397284/1sA4XChIK6A", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/02/09395237", "title": "Manifold-Inspired Search-Based Algorithm for Automated Test Case Generation", "doi": null, "abstractUrl": "/journal/ec/2022/02/09395237/1syqhlKA1Q4", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/12/09616408", "title": "Pull Request Governance in Open Source Communities", "doi": null, "abstractUrl": "/journal/ts/2022/12/09616408/1yA77WD69DG", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/12/09664358", "title": "Hybrid Static-Dynamic Analysis of Data Races Caused by Inconsistent Locking Discipline in Device Drivers", "doi": null, "abstractUrl": "/journal/ts/2022/12/09664358/1zHDJf1LXLW", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/04/09645177", "title": "Event Detection With Dynamic Word-Trigger-Argument Graph Neural Networks", "doi": null, "abstractUrl": "/journal/tk/2023/04/09645177/1zc6wR47sFW", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/09/09647962", "title": "PPOAccel: A High-Throughput Acceleration Framework for Proximal Policy Optimization", "doi": null, "abstractUrl": "/journal/td/2022/09/09647962/1ziKjpj06Wc", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/09/09650595", "title": "Cooperative Edge Caching Based on Temporal Convolutional Networks", "doi": null, "abstractUrl": "/journal/td/2022/09/09650595/1zkp2c5KJsA", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09616433", "articleId": "1yA76RmrVtK", "__typename": "AdjacentArticleType" }, "next": { "fno": "09622132", "articleId": "1yEUqT5fBwQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yEUqT5fBwQ", "doi": "10.1109/TVCG.2021.3129414", "abstract": "Optimizing the performance of large-scale parallel codes is critical for efficient utilization of computing resources. Code developers often explore various execution parameters, such as hardware configurations, system software choices, and application parameters, and are interested in detecting and understanding bottlenecks in different executions. They often collect hierarchical performance profiles represented as call graphs, which combine performance metrics with their execution contexts. The crucial task of exploring multiple call graphs together is tedious and challenging because of the many structural differences in the execution contexts and significant variability in the collected performance metrics (e.g., execution runtime). In this paper, we present <italic>Ensemble CallFlow</italic> to support the exploration of ensembles of call graphs using new types of visualizations, analysis, graph operations, and features. We introduce <italic>ensemble-Sankey</italic>, a new visual design that combines the strengths of resource-flow (Sankey) and box-plot visualization techniques. Whereas the resource-flow visualization can easily and intuitively describe the graphical nature of the call graph, the box plots overlaid on the nodes of Sankey convey the performance variability within the ensemble. Our interactive visual interface provides linked views to help explore ensembles of call graphs, e.g., by facilitating the analysis of structural differences, and identifying similar or distinct call graphs. We demonstrate the effectiveness and usefulness of our design through case studies on large-scale parallel codes.", "abstracts": [ { "abstractType": "Regular", "content": "Optimizing the performance of large-scale parallel codes is critical for efficient utilization of computing resources. Code developers often explore various execution parameters, such as hardware configurations, system software choices, and application parameters, and are interested in detecting and understanding bottlenecks in different executions. They often collect hierarchical performance profiles represented as call graphs, which combine performance metrics with their execution contexts. The crucial task of exploring multiple call graphs together is tedious and challenging because of the many structural differences in the execution contexts and significant variability in the collected performance metrics (e.g., execution runtime). In this paper, we present <italic>Ensemble CallFlow</italic> to support the exploration of ensembles of call graphs using new types of visualizations, analysis, graph operations, and features. We introduce <italic>ensemble-Sankey</italic>, a new visual design that combines the strengths of resource-flow (Sankey) and box-plot visualization techniques. Whereas the resource-flow visualization can easily and intuitively describe the graphical nature of the call graph, the box plots overlaid on the nodes of Sankey convey the performance variability within the ensemble. Our interactive visual interface provides linked views to help explore ensembles of call graphs, e.g., by facilitating the analysis of structural differences, and identifying similar or distinct call graphs. We demonstrate the effectiveness and usefulness of our design through case studies on large-scale parallel codes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Optimizing the performance of large-scale parallel codes is critical for efficient utilization of computing resources. Code developers often explore various execution parameters, such as hardware configurations, system software choices, and application parameters, and are interested in detecting and understanding bottlenecks in different executions. They often collect hierarchical performance profiles represented as call graphs, which combine performance metrics with their execution contexts. The crucial task of exploring multiple call graphs together is tedious and challenging because of the many structural differences in the execution contexts and significant variability in the collected performance metrics (e.g., execution runtime). In this paper, we present Ensemble CallFlow to support the exploration of ensembles of call graphs using new types of visualizations, analysis, graph operations, and features. We introduce ensemble-Sankey, a new visual design that combines the strengths of resource-flow (Sankey) and box-plot visualization techniques. Whereas the resource-flow visualization can easily and intuitively describe the graphical nature of the call graph, the box plots overlaid on the nodes of Sankey convey the performance variability within the ensemble. Our interactive visual interface provides linked views to help explore ensembles of call graphs, e.g., by facilitating the analysis of structural differences, and identifying similar or distinct call graphs. We demonstrate the effectiveness and usefulness of our design through case studies on large-scale parallel codes.", "title": "Scalable Comparative Visualization of Ensembles of Call Graphs", "normalizedTitle": "Scalable Comparative Visualization of Ensembles of Call Graphs", "fno": "09622132", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Flow Graphs", "Interactive Systems", "Mathematics Computing", "Application Parameters", "Box Plot Visualization Techniques", "Call Graph Ensembles", "Code Developers", "Ensemble Sankey", "Execution Parameters", "Execution Runtime", "Graph Operations", "Hardware Configurations", "Hierarchical Performance Profiles", "Interactive Visual Interface", "Large Scale Parallel Codes", "Performance Variability", "Resource Flow Visualization", "Scalable Comparative Visualization", "System Software Choices", "Visual Design", "Codes", "Measurement", "Tools", "Runtime", "Libraries", "Task Analysis", "Data Visualization", "Performance Analysis", "Software Visualization", "Visual Analytics", "Hierarchical Data", "Coordinated And Multiple Views" ], "authors": [ { "givenName": "Suraj P.", "surname": "Kesavan", "fullName": "Suraj P. Kesavan", "affiliation": "Department of Computer Science, University of California, Davis, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Harsh", "surname": "Bhatia", "fullName": "Harsh Bhatia", "affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Abhinav", "surname": "Bhatele", "fullName": "Abhinav Bhatele", "affiliation": "Department of Computer Science, University of Maryland, College Park, MD, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Stephanie", "surname": "Brink", "fullName": "Stephanie Brink", "affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Olga", "surname": "Pearce", "fullName": "Olga Pearce", "affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Todd", "surname": "Gamblin", "fullName": "Todd Gamblin", "affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Peer-Timo", "surname": "Bremer", "fullName": "Peer-Timo Bremer", "affiliation": "Center for Applied Scientific Computing, Lawrence Livermore National Laboratory, Livermore, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": "Department of Computer Science, University of California, Davis, CA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1691-1704", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/lcn-workshops/2013/0540/0/06758510", "title": "All eyes on code: Using call graphs for WSN software optimization", "doi": null, "abstractUrl": "/proceedings-article/lcn-workshops/2013/06758510/12OmNCxtyMd", "parentPublication": { "id": "proceedings/lcn-workshops/2013/0540/0", "title": "2013 IEEE 38th Conference on Local Computer Networks Workshops (LCN Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2011/1246/0/06070388", "title": "Visualizing call graphs", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2011/06070388/12OmNxjjEdP", "parentPublication": { "id": "proceedings/vlhcc/2011/1246/0", "title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scam/2018/8290/0/829000a177", "title": "[Research Paper] Static JavaScript Call Graphs: A Comparative Study", "doi": null, "abstractUrl": "/proceedings-article/scam/2018/829000a177/17D45X0yjW1", "parentPublication": { "id": "proceedings/scam/2018/8290/0", "title": "2018 IEEE 18th International Working Conference on Source Code Analysis and Manipulation (SCAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/cc/2021/03/08656535", "title": "CloudCFI: Context-Sensitive and Incremental CFI in the Cloud Environment", "doi": null, "abstractUrl": "/journal/cc/2021/03/08656535/187PRKXSEGQ", "parentPublication": { "id": "trans/cc", "title": "IEEE Transactions on Cloud Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/protools/2022/7564/0/756400a011", "title": "Generating and Analyzing Program Call Graphs using Ontology", "doi": null, "abstractUrl": "/proceedings-article/protools/2022/756400a011/1KmF2CjFIK4", "parentPublication": { "id": "proceedings/protools/2022/7564/0", "title": "2022 IEEE/ACM Workshop on Programming and Performance Visualization Tools (ProTools)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005560", "title": "GraphEvo: Characterizing and Understanding Software Evolution using Call Graphs", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005560/1hJsp9zjTCU", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/02/09106860", "title": "Explaining Static Analysis With Rule Graphs", "doi": null, "abstractUrl": "/journal/ts/2022/02/09106860/1kkFHDSIB4k", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/01/09205639", "title": "AppDNA: Profiling App Behavior via Deep-Learning Function Call Graphs", "doi": null, "abstractUrl": "/journal/ec/2022/01/09205639/1nnSSKB9HeE", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09229522", "title": "Learning on Attribute-Missing Graphs", "doi": null, "abstractUrl": "/journal/tp/2022/02/09229522/1o3nin1pJ3G", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900a099", "title": "Scalable Call Graph Constructor for Maven", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900a099/1sET629XZaE", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09619951", "articleId": "1yDftLVqexG", "__typename": "AdjacentArticleType" }, "next": { "fno": "09625765", "articleId": "1yLTqDGZSOQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yLTqDGZSOQ", "doi": "10.1109/TVCG.2021.3130071", "abstract": "An infographic is a type of visualization chart that displays pieces of information through information blocks. Existing information block detection work utilizes spatial proximity to group elements into several information blocks. However, prior studies ignore the chromatic and structural features of the infographic, resulting in incorrect omissions when detecting information blocks. To alleviate this kind of error, we use a scene graph to represent an infographic and propose a graph-based information block detection model to group elements based on Gestalt Organization Principles (spatial proximity, chromatic similarity, and structural similarity principle). We also construct a new dataset for information block detection. Quantitative and qualitative experiments show that our model can detect the information blocks in the infographic more effectively compared with the spatial proximity-based method.", "abstracts": [ { "abstractType": "Regular", "content": "An infographic is a type of visualization chart that displays pieces of information through information blocks. Existing information block detection work utilizes spatial proximity to group elements into several information blocks. However, prior studies ignore the chromatic and structural features of the infographic, resulting in incorrect omissions when detecting information blocks. To alleviate this kind of error, we use a scene graph to represent an infographic and propose a graph-based information block detection model to group elements based on Gestalt Organization Principles (spatial proximity, chromatic similarity, and structural similarity principle). We also construct a new dataset for information block detection. Quantitative and qualitative experiments show that our model can detect the information blocks in the infographic more effectively compared with the spatial proximity-based method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An infographic is a type of visualization chart that displays pieces of information through information blocks. Existing information block detection work utilizes spatial proximity to group elements into several information blocks. However, prior studies ignore the chromatic and structural features of the infographic, resulting in incorrect omissions when detecting information blocks. To alleviate this kind of error, we use a scene graph to represent an infographic and propose a graph-based information block detection model to group elements based on Gestalt Organization Principles (spatial proximity, chromatic similarity, and structural similarity principle). We also construct a new dataset for information block detection. Quantitative and qualitative experiments show that our model can detect the information blocks in the infographic more effectively compared with the spatial proximity-based method.", "title": "Graph-Based Information Block Detection in Infographic With Gestalt Organization Principles", "normalizedTitle": "Graph-Based Information Block Detection in Infographic With Gestalt Organization Principles", "fno": "09625765", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graph Theory", "Chart Visualization", "Chromatic Similarity", "Gestalt Organization Principles", "Graph Based Information Block Detection", "Infographic", "Spatial Proximity", "Spatial Proximity Based Method", "Structural Similarity Principle", "Feature Extraction", "Visualization", "Data Visualization", "Semantics", "Organizations", "Layout", "Task Analysis", "Infographic", "Deep Learning", "Graph Based Approach", "Information Block Detection" ], "authors": [ { "givenName": "Jie", "surname": "Lin", "fullName": "Jie Lin", "affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yi", "surname": "Cai", "fullName": "Yi Cai", "affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xin", "surname": "Wu", "fullName": "Xin Wu", "affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jianwei", "surname": "Lu", "fullName": "Jianwei Lu", "affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1705-1718", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847177", "title": "UVisP: User-centric Visualization of Data Provenance with Gestalt Principles", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847177/12OmNBJNL12", "parentPublication": { "id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0", "title": "2016 IEEE Trustcom/BigDataSE/I​SPA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460443", "title": "A graph-based method of newspaper article reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460443/12OmNC8uRjk", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/02/mcg2015020080", "title": "Gestalt Principles in Multimodal Data Representation", "doi": null, "abstractUrl": "/magazine/cg/2015/02/mcg2015020080/13rRUxCitB0", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl-hcc/2022/4214/0/09833103", "title": "Code-Chips: Interactive Syntax in Visual Programming", "doi": null, "abstractUrl": "/proceedings-article/vl-hcc/2022/09833103/1FUSFinOlwc", "parentPublication": { "id": "proceedings/vl-hcc/2022/4214/0", "title": "2022 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl-hcc/2022/4214/0/09833128", "title": "Exploring Organization of Computational Notebook Cells in 2D Space", "doi": null, "abstractUrl": "/proceedings-article/vl-hcc/2022/09833128/1FUSKPSqVuE", "parentPublication": { "id": "proceedings/vl-hcc/2022/4214/0", "title": "2022 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vahc/2022/0103/0/10108526", "title": "Browser-based Infographic Tailoring Self-service Interface (BITSI)", "doi": null, "abstractUrl": "/proceedings-article/vahc/2022/10108526/1MIgSVcHS9O", "parentPublication": { "id": "proceedings/vahc/2022/0103/0", "title": "2022 Workshop on Visual Analytics in Healthcare (VAHC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807266", "title": "Towards Automated Infographic Design: Deep Learning-based Auto-Extraction of Extensible Timeline", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807266/1cG6bYWFt3W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h434", "title": "Seq-SG2SL: Inferring Semantic Layout From Scene Graph Through Sequence to Sequence Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h434/1hVlTDVb5dK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2021/3931/0/393100a031", "title": "Parsing and Summarizing Infographics with Synthetically Trained Icon Detection", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2021/393100a031/1tTts9CdeyQ", "parentPublication": { "id": "proceedings/pacificvis/2021/3931/0", "title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09585700", "title": "A Mixed-Initiative Approach to Reusing Infographic Charts", "doi": null, "abstractUrl": "/journal/tg/2022/01/09585700/1y11cGSPuPC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09622132", "articleId": "1yEUqT5fBwQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "09626557", "articleId": "1yNd5vlQLrG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yNd5vlQLrG", "doi": "10.1109/TVCG.2021.3130422", "abstract": "We propose SimuExplorer, a visualization system to help analysts explore how player behaviors impact scoring rates in table tennis. Such analysis is indispensable for analysts and coaches, who aim to formulate training plans that can help players improve. However, it is challenging to identify the impacts of individual behaviors, as well as to understand how these impacts are generated and accumulated gradually over the course of a game. To address these challenges, we worked closely with experts who work for a top national table tennis team to design SimuExplorer. The SimuExplorer system integrates a Markov chain model to simulate individual and cumulative impacts of particular behaviors. It then provides flow and matrix views to help users visualize and interpret these impacts. We demonstrate the usefulness of the system with case studies and expert interviews. The experts think highly of the system and have obtained insights into players&#x2019; behaviors using it.", "abstracts": [ { "abstractType": "Regular", "content": "We propose SimuExplorer, a visualization system to help analysts explore how player behaviors impact scoring rates in table tennis. Such analysis is indispensable for analysts and coaches, who aim to formulate training plans that can help players improve. However, it is challenging to identify the impacts of individual behaviors, as well as to understand how these impacts are generated and accumulated gradually over the course of a game. To address these challenges, we worked closely with experts who work for a top national table tennis team to design SimuExplorer. The SimuExplorer system integrates a Markov chain model to simulate individual and cumulative impacts of particular behaviors. It then provides flow and matrix views to help users visualize and interpret these impacts. We demonstrate the usefulness of the system with case studies and expert interviews. The experts think highly of the system and have obtained insights into players&#x2019; behaviors using it.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose SimuExplorer, a visualization system to help analysts explore how player behaviors impact scoring rates in table tennis. Such analysis is indispensable for analysts and coaches, who aim to formulate training plans that can help players improve. However, it is challenging to identify the impacts of individual behaviors, as well as to understand how these impacts are generated and accumulated gradually over the course of a game. To address these challenges, we worked closely with experts who work for a top national table tennis team to design SimuExplorer. The SimuExplorer system integrates a Markov chain model to simulate individual and cumulative impacts of particular behaviors. It then provides flow and matrix views to help users visualize and interpret these impacts. We demonstrate the usefulness of the system with case studies and expert interviews. The experts think highly of the system and have obtained insights into players’ behaviors using it.", "title": "SimuExplorer: Visual Exploration of Game Simulation in Table Tennis", "normalizedTitle": "SimuExplorer: Visual Exploration of Game Simulation in Table Tennis", "fno": "09626557", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Games", "Data Analysis", "Data Mining", "Data Visualisation", "Markov Processes", "Sport", "Cumulative Impacts", "Game Simulation", "Impact Scoring Rates", "Individual Behaviors", "Individual Impacts", "Markov Chain Model", "National Table Tennis Team", "Particular Behaviors", "Players", "Simu Explorer System", "Training Plans", "Visual Exploration", "Visualization System", "Sports", "Markov Processes", "Visualization", "Analytical Models", "Tools", "Task Analysis", "Software", "Sports Visualization", "Game Simulation", "Model Interpretation", "Etc" ], "authors": [ { "givenName": "Ji", "surname": "Lan", "fullName": "Ji Lan", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zheng", "surname": "Zhou", "fullName": "Zheng Zhou", "affiliation": "Department of Sport Science, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiachen", "surname": "Wang", "fullName": "Jiachen Wang", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hui", "surname": "Zhang", "fullName": "Hui Zhang", "affiliation": "Department of Sport Science, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiao", "surname": "Xie", "fullName": "Xiao Xie", "affiliation": "Department of Sport Science, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1719-1732", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2018/01/08017600", "title": "iTTVis: Interactive Visualization of Table Tennis Data", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017600/13rRUyY28YD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise-ie/2021/3829/0/382900b210", "title": "Application of Micro-lecture in Table Tennis Teaching for Children", "doi": null, "abstractUrl": "/proceedings-article/icise-ie/2021/382900b210/1C8GamvUKGI", "parentPublication": { "id": "proceedings/icise-ie/2021/3829/0", "title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiars/2022/5457/0/545700a135", "title": "Intelligent Repair System of Table Tennis Server Based on Data Analysis Algorithm", "doi": null, "abstractUrl": "/proceedings-article/aiars/2022/545700a135/1J2XPikx7b2", "parentPublication": { "id": "proceedings/aiars/2022/5457/0", "title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807264", "title": "Tac-Simur: Tactic-based Simulative Visual Analytics of Table Tennis", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807264/1cG6vo24hRC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08795584", "title": "CourtTime: Generating Actionable Insights into Tennis Matches Using Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2020/01/08795584/1csHUeq7TB6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpai/2020/4262/0/426200a058", "title": "Stress Level Classifier: Taiwanese College Table Tennis Athletes&#x2019; Electroencephalography Analysis Based on Decision Trees", "doi": null, "abstractUrl": "/proceedings-article/icpai/2020/426200a058/1pZ17Yuv70Y", "parentPublication": { "id": "proceedings/icpai/2020/4262/0", "title": "2020 International Conference on Pervasive Artificial Intelligence (ICPAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09411869", "title": "Tac-Miner: Visual Tactic Mining for Multiple Table Tennis Matches", "doi": null, "abstractUrl": "/journal/tg/2021/06/09411869/1t2ii7r7RcI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2021/3892/0/389200a632", "title": "Tactical Decision System of Table Tennis Match based on C4.5 Decision Tree", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2021/389200a632/1t2nmIZ5RBe", "parentPublication": { "id": "proceedings/icmtma/2021/3892/0", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tcs/2021/2910/0/291000a533", "title": "A Study of Liu Shiwen&#x2019;s Table Tennis Techniques and Tactics Based on Computer-aided Video", "doi": null, "abstractUrl": "/proceedings-article/tcs/2021/291000a533/1wRIl8gP8xW", "parentPublication": { "id": "proceedings/tcs/2021/2910/0", "title": "2021 International Conference on Information Technology and Contemporary Sports (TCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmu/2021/48/0/09638855", "title": "Toward the Perfect Stroke: A Multimodal Approach for Table Tennis Stroke Evaluation", "doi": null, "abstractUrl": "/proceedings-article/icmu/2021/09638855/1zktfg0C87u", "parentPublication": { "id": "proceedings/icmu/2021/48/0", "title": "2021 Thirteenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09625765", "articleId": "1yLTqDGZSOQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "09627526", "articleId": "1yORLIqoJnq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyTvrF5Ys", "name": "ttg202303-09626557s1-supp1-3130422.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09626557s1-supp1-3130422.pdf", "extension": "pdf", "size": "199 kB", "__typename": "WebExtraType" }, { "id": "1KmyTpbV2ik", "name": "ttg202303-09626557s1-supp2-3130422.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09626557s1-supp2-3130422.mp4", "extension": "mp4", "size": "44.7 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yORLIqoJnq", "doi": "10.1109/TVCG.2021.3130670", "abstract": "We present a method for producing documentary-style content using real-time scientific visualization. We introduce molecumentaries, i.e., molecular documentaries featuring structural models from molecular biology, created through adaptable methods instead of the rigid traditional production pipeline. Our work is motivated by the rapid evolution of scientific visualization and it potential in science dissemination. Without some form of explanation or guidance, however, novices and lay-persons often find it difficult to gain insights from the visualization itself. We integrate such knowledge using the verbal channel and provide it along an engaging visual presentation. To realize the synthesis of a molecumentary, we provide technical solutions along two major production steps: (1) preparing a story structure and (2) turning the story into a concrete narrative. In the first step, we compile information about the model from heterogeneous sources into a story graph. We combine local knowledge with external sources to complete the story graph and enrich the final result. In the second step, we synthesize a narrative, i.e., story elements presented in sequence, using the story graph. We then traverse the story graph and generate a virtual tour, using automated camera and visualization transitions. We turn texts written by domain experts into verbal representations using text-to-speech functionality and provide them as a commentary. Using the described framework, we synthesize fly-throughs with descriptions: automatic ones that mimic a manually authored documentary or semi-automatic ones which guide the documentary narrative solely through curated textual input.", "abstracts": [ { "abstractType": "Regular", "content": "We present a method for producing documentary-style content using real-time scientific visualization. We introduce molecumentaries, i.e., molecular documentaries featuring structural models from molecular biology, created through adaptable methods instead of the rigid traditional production pipeline. Our work is motivated by the rapid evolution of scientific visualization and it potential in science dissemination. Without some form of explanation or guidance, however, novices and lay-persons often find it difficult to gain insights from the visualization itself. We integrate such knowledge using the verbal channel and provide it along an engaging visual presentation. To realize the synthesis of a molecumentary, we provide technical solutions along two major production steps: (1) preparing a story structure and (2) turning the story into a concrete narrative. In the first step, we compile information about the model from heterogeneous sources into a story graph. We combine local knowledge with external sources to complete the story graph and enrich the final result. In the second step, we synthesize a narrative, i.e., story elements presented in sequence, using the story graph. We then traverse the story graph and generate a virtual tour, using automated camera and visualization transitions. We turn texts written by domain experts into verbal representations using text-to-speech functionality and provide them as a commentary. Using the described framework, we synthesize fly-throughs with descriptions: automatic ones that mimic a manually authored documentary or semi-automatic ones which guide the documentary narrative solely through curated textual input.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a method for producing documentary-style content using real-time scientific visualization. We introduce molecumentaries, i.e., molecular documentaries featuring structural models from molecular biology, created through adaptable methods instead of the rigid traditional production pipeline. Our work is motivated by the rapid evolution of scientific visualization and it potential in science dissemination. Without some form of explanation or guidance, however, novices and lay-persons often find it difficult to gain insights from the visualization itself. We integrate such knowledge using the verbal channel and provide it along an engaging visual presentation. To realize the synthesis of a molecumentary, we provide technical solutions along two major production steps: (1) preparing a story structure and (2) turning the story into a concrete narrative. In the first step, we compile information about the model from heterogeneous sources into a story graph. We combine local knowledge with external sources to complete the story graph and enrich the final result. In the second step, we synthesize a narrative, i.e., story elements presented in sequence, using the story graph. We then traverse the story graph and generate a virtual tour, using automated camera and visualization transitions. We turn texts written by domain experts into verbal representations using text-to-speech functionality and provide them as a commentary. Using the described framework, we synthesize fly-throughs with descriptions: automatic ones that mimic a manually authored documentary or semi-automatic ones which guide the documentary narrative solely through curated textual input.", "title": "Molecumentary: Adaptable Narrated Documentaries Using Molecular Visualization", "normalizedTitle": "Molecumentary: Adaptable Narrated Documentaries Using Molecular Visualization", "fno": "09627526", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Data Visualisation", "Graph Theory", "Humanities", "Interactive Systems", "Text Analysis", "Video Signal Processing", "Virtual Reality", "Adaptable Methods", "Adaptable Narrated Documentaries", "Concrete Narrative", "Documentary Narrative", "Documentary Style Content", "Engaging Visual Presentation", "Manually Authored Documentary", "Molecular Biology", "Molecular Documentaries", "Molecular Visualization", "Molecumentary", "Production Steps", "Real Time Scientific Visualization", "Rigid Traditional Production Pipeline", "Story Elements", "Story Graph", "Story Structure And", "Structural Models", "Data Visualization", "Visualization", "Cameras", "Three Dimensional Displays", "Animation", "Real Time Systems", "Solid Modeling", "Virtual Tour", "Audio", "Biological Data", "Storytelling", "Illustrative Visualization" ], "authors": [ { "givenName": "David", "surname": "Kouřil", "fullName": "David Kouřil", "affiliation": "Masaryk University, Brno, Czech Republic", "__typename": "ArticleAuthorType" }, { "givenName": "Ondřej", "surname": "Strnad", "fullName": "Ondřej Strnad", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Mindek", "fullName": "Peter Mindek", "affiliation": "TU Wien and Nanographics GmbH, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Sarkis", "surname": "Halladjian", "fullName": "Sarkis Halladjian", "affiliation": "CNRS, Inria, LISN, Université Paris-Saclay, Gif-sur-Yvette, France", "__typename": "ArticleAuthorType" }, { "givenName": "Tobias", "surname": "Isenberg", "fullName": "Tobias Isenberg", "affiliation": "CNRS, Inria, LISN, Université Paris-Saclay, Gif-sur-Yvette, France", "__typename": "ArticleAuthorType" }, { "givenName": "M. Eduard", "surname": "Gröller", "fullName": "M. Eduard Gröller", "affiliation": "TU Wien and the VRVis Research Center, Wien, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Ivan", "surname": "Viola", "fullName": "Ivan Viola", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1733-1747", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2016/2303/0/2303a211", "title": "StoryCake: A Hierarchical Plot Visualization Method for Storytelling in Polar Coordinates", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a211/12OmNBDQbk9", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/varms-ieeevr/2015/6926/0/07151725", "title": "Enhancing visualization of molecular simulations using sonification", "doi": null, "abstractUrl": "/proceedings-article/varms-ieeevr/2015/07151725/12OmNx0A7CV", "parentPublication": { "id": "proceedings/varms-ieeevr/2015/6926/0", "title": "2015 IEEE 1st International Workshop on Virtual and Augmented Reality for Molecular Science (VARMS@IEEEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122406", "title": "A Deeper Understanding of Sequence in Narrative Visualization", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122406/13rRUwIF6l7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061139", "title": "Narrative Visualization: Telling Stories with Data", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061139/13rRUxAAST1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122231", "title": "Visualization Rhetoric: Framing Effects in Narrative Visualization", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122231/13rRUxBJhFs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/05/mcg2015050084", "title": "More Than Telling a Story: Transforming Data into Visually Shared Stories", "doi": null, "abstractUrl": "/magazine/cg/2015/05/mcg2015050084/13rRUxjyX6v", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2010/02/05438752", "title": "Cinematic Visual Discourse: Representation, Generation, and Evaluation", "doi": null, "abstractUrl": "/journal/ci/2010/02/05438752/13rRUypp5ar", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a100", "title": "A Two-Level Planning Framework for Mixed Reality Interactive Narratives with User Engagement", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a100/17D45Xh13v2", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a044", "title": "Once Upon a Time in a Land Far Away: Guidelines for Spatio-Temporal Narrative Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a044/1cMF8rgW5na", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552203", "title": "A Design Space for Applying the Freytag&#x0027;s Pyramid Structure to Data Stories", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552203/1xic2a0UxkA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09626557", "articleId": "1yNd5vlQLrG", "__typename": "AdjacentArticleType" }, "next": { "fno": "09629264", "articleId": "1yXvJdO9qaQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyQgXbhPa", "name": "ttg202303-09627526s1-supp3-3130670.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09627526s1-supp3-3130670.mp4", "extension": "mp4", "size": "44.8 MB", "__typename": "WebExtraType" }, { "id": "1KmyQYeJ1x6", "name": "ttg202303-09627526s1-supp1-3130670.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09627526s1-supp1-3130670.mp4", "extension": "mp4", "size": "52.7 MB", "__typename": "WebExtraType" }, { "id": "1KmyQIhdm6c", "name": "ttg202303-09627526s1-supp5-3130670.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09627526s1-supp5-3130670.mp4", "extension": "mp4", "size": "34.4 MB", "__typename": "WebExtraType" }, { "id": "1KmyQtH8WOI", "name": "ttg202303-09627526s1-supp4-3130670.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09627526s1-supp4-3130670.mp4", "extension": "mp4", "size": "59.2 MB", "__typename": "WebExtraType" }, { "id": "1KmyQ6kmiVq", "name": "ttg202303-09627526s1-supp2-3130670.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09627526s1-supp2-3130670.mp4", "extension": "mp4", "size": "27.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yXvJdO9qaQ", "doi": "10.1109/TVCG.2021.3131422", "abstract": "Using standard handheld interfaces for VR locomotion may not provide a believable self-motion experience and can contribute to unwanted side effects such as motion sickness, disorientation, or increased cognitive load. This paper demonstrates how using a seated leaning-based locomotion interface &#x2013;HeadJoystick&#x2013; in VR ground-based navigation affects user experience, usability, and performance. In three within-subject studies, we compared controller (touchpad/thumbstick) with a more embodied interface (&#x201D;HeadJoystick&#x201D;) where users moved their head and/or leaned in the direction of desired locomotion. In both conditions, users sat on a regular office chair and used it to control virtual rotations. In the first study, 24 participants used HeadJoystick versus Controller in three complementary tasks including reach-the-target, follow-the-path, and racing (dynamic obstacle avoidance). In the second study, 18 participants repeatedly used HeadJoystick versus Controller (8 one-minute trials each) in a reach-the-target task. To evaluate potential benefits of different brake mechanisms, in the third study 18 participants were asked to stop within each target area for one second. All three studies consistently showed advantages of HeadJoystick over Controller: we observed improved performance in all tasks, as well as higher user ratings for enjoyment, spatial presence, immersion, vection intensity, usability, ease of learning, ease of use, and rated potential for daily and long-term use, while reducing motion sickness and task load. Overall, our results suggest that leaning-based interfaces such as HeadJoystick provide an interesting and more embodied alternative to handheld interfaces in driving, reach-the-target, and follow-the-path tasks, and potentially a wider range of scenarios.", "abstracts": [ { "abstractType": "Regular", "content": "Using standard handheld interfaces for VR locomotion may not provide a believable self-motion experience and can contribute to unwanted side effects such as motion sickness, disorientation, or increased cognitive load. This paper demonstrates how using a seated leaning-based locomotion interface &#x2013;HeadJoystick&#x2013; in VR ground-based navigation affects user experience, usability, and performance. In three within-subject studies, we compared controller (touchpad/thumbstick) with a more embodied interface (&#x201D;HeadJoystick&#x201D;) where users moved their head and/or leaned in the direction of desired locomotion. In both conditions, users sat on a regular office chair and used it to control virtual rotations. In the first study, 24 participants used HeadJoystick versus Controller in three complementary tasks including reach-the-target, follow-the-path, and racing (dynamic obstacle avoidance). In the second study, 18 participants repeatedly used HeadJoystick versus Controller (8 one-minute trials each) in a reach-the-target task. To evaluate potential benefits of different brake mechanisms, in the third study 18 participants were asked to stop within each target area for one second. All three studies consistently showed advantages of HeadJoystick over Controller: we observed improved performance in all tasks, as well as higher user ratings for enjoyment, spatial presence, immersion, vection intensity, usability, ease of learning, ease of use, and rated potential for daily and long-term use, while reducing motion sickness and task load. Overall, our results suggest that leaning-based interfaces such as HeadJoystick provide an interesting and more embodied alternative to handheld interfaces in driving, reach-the-target, and follow-the-path tasks, and potentially a wider range of scenarios.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Using standard handheld interfaces for VR locomotion may not provide a believable self-motion experience and can contribute to unwanted side effects such as motion sickness, disorientation, or increased cognitive load. This paper demonstrates how using a seated leaning-based locomotion interface –HeadJoystick– in VR ground-based navigation affects user experience, usability, and performance. In three within-subject studies, we compared controller (touchpad/thumbstick) with a more embodied interface (”HeadJoystick”) where users moved their head and/or leaned in the direction of desired locomotion. In both conditions, users sat on a regular office chair and used it to control virtual rotations. In the first study, 24 participants used HeadJoystick versus Controller in three complementary tasks including reach-the-target, follow-the-path, and racing (dynamic obstacle avoidance). In the second study, 18 participants repeatedly used HeadJoystick versus Controller (8 one-minute trials each) in a reach-the-target task. To evaluate potential benefits of different brake mechanisms, in the third study 18 participants were asked to stop within each target area for one second. All three studies consistently showed advantages of HeadJoystick over Controller: we observed improved performance in all tasks, as well as higher user ratings for enjoyment, spatial presence, immersion, vection intensity, usability, ease of learning, ease of use, and rated potential for daily and long-term use, while reducing motion sickness and task load. Overall, our results suggest that leaning-based interfaces such as HeadJoystick provide an interesting and more embodied alternative to handheld interfaces in driving, reach-the-target, and follow-the-path tasks, and potentially a wider range of scenarios.", "title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks", "normalizedTitle": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks", "fno": "09629264", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Collision Avoidance", "Ergonomics", "Human Computer Interaction", "Human Factors", "Interactive Devices", "Mobile Robots", "Navigation", "User Interfaces", "Virtual Reality", "Believable Self Motion Experience", "Controller", "Desired Locomotion", "Embodied Interface", "Follow The Path Tasks", "Higher User Ratings", "Increased Cognitive Load", "Interfaces Improve Ground Based VR Locomotion", "Leaning Based Interfaces", "Motion Sickness", "Racing Tasks", "Reach The Target Task", "Regular Office Chair", "Seated Leaning Based Locomotion Interface Head Joystick", "Standard Handheld Interfaces", "Study 18 Participants", "Task Load", "Unwanted Side Effects", "User Experience", "VR Ground Based Navigation", "Within Subject Studies", "Task Analysis", "Legged Locomotion", "Navigation", "User Experience", "Usability", "Throughput", "Brakes", "3 D User Interface", "Motion Sickness", "Cybersickness", "Locomotion", "Travel Techniques", "Virtual Reality" ], "authors": [ { "givenName": "Abraham M.", "surname": "Hashemian", "fullName": "Abraham M. Hashemian", "affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Burnaby, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Ashu", "surname": "Adhikari", "fullName": "Ashu Adhikari", "affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Burnaby, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Ernst", "surname": "Kruijff", "fullName": "Ernst Kruijff", "affiliation": "Institute of Visual Computing, Bonn-Rhein-Sieg University of Applied Sciences, Sankt Augustin, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Markus von der", "surname": "Heyde", "fullName": "Markus von der Heyde", "affiliation": "vdH-IT and the School of Interactive Arts & Technology, Simon Fraser University, Burnaby, BC, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Bernhard E.", "surname": "Riecke", "fullName": "Bernhard E. Riecke", "affiliation": "School of Interactive Arts & Technology, Simon Fraser University, Burnaby, BC, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1748-1768", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892253", "title": "Lean into it: Exploring leaning-based motion cueing interfaces for virtual reality movement", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892253/12OmNxETane", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/06/08580399", "title": "Virtual Locomotion: A Survey", "doi": null, "abstractUrl": "/journal/tg/2020/06/08580399/17D45VUZMU0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09793626", "title": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator", "doi": null, "abstractUrl": "/journal/tg/5555/01/09793626/1E5LEepCqTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08762207", "title": "Locomotion in Place in Virtual Reality: A Comparative Evaluation of Joystick, Teleport, and Leaning", "doi": null, "abstractUrl": "/journal/tg/2021/01/08762207/1bIeI0S82Aw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08809840", "title": "NaviBoard and NaviChair: Limited Translation Combined with Full Rotation for Efficient Virtual Locomotion", "doi": null, "abstractUrl": "/journal/tg/2021/01/08809840/1cHE3iFCYpy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798070", "title": "User-Centered Extension of a Locomotion Typology: Movement-Related Sensory Feedback and Spatial Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798070/1cJ18ja0QXC", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a346", "title": "Spring Stepper: A Seated VR Locomotion Controller", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a346/1oZBBswUSzK", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a395", "title": "Is Walking Necessary for Effective Locomotion and Interaction in VR?", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a395/1tnXRY815xS", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09627526", "articleId": "1yORLIqoJnq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09628050", "articleId": "1yXvIX3fy6I", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyUrIxxxC", "name": "ttg202303-09629264s1-supp1-3131422.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09629264s1-supp1-3131422.pdf", "extension": "pdf", "size": "780 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yXvIX3fy6I", "doi": "10.1109/TVCG.2021.3131230", "abstract": "We present a multi-sensor system for consistent 3D hand pose tracking and modeling that leverages the advantages of both wearable and optical sensors. Specifically, we employ a stretch-sensing soft glove and three IMUs in combination with an RGB-D camera. Different sensor modalities are fused based on the availability and confidence estimation, enabling seamless hand tracking in challenging environments with partial or even complete occlusion. To maximize the accuracy while maintaining high ease-of-use, we propose an automated user calibration that uses the RGB-D camera data to refine both the glove mapping model and the multi-IMU system parameters. Extensive experiments show that our setup outperforms the wearable-only approaches when the hand is in the field-of-view and outplays the camera-only methods when the hand is occluded.", "abstracts": [ { "abstractType": "Regular", "content": "We present a multi-sensor system for consistent 3D hand pose tracking and modeling that leverages the advantages of both wearable and optical sensors. Specifically, we employ a stretch-sensing soft glove and three IMUs in combination with an RGB-D camera. Different sensor modalities are fused based on the availability and confidence estimation, enabling seamless hand tracking in challenging environments with partial or even complete occlusion. To maximize the accuracy while maintaining high ease-of-use, we propose an automated user calibration that uses the RGB-D camera data to refine both the glove mapping model and the multi-IMU system parameters. Extensive experiments show that our setup outperforms the wearable-only approaches when the hand is in the field-of-view and outplays the camera-only methods when the hand is occluded.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a multi-sensor system for consistent 3D hand pose tracking and modeling that leverages the advantages of both wearable and optical sensors. Specifically, we employ a stretch-sensing soft glove and three IMUs in combination with an RGB-D camera. Different sensor modalities are fused based on the availability and confidence estimation, enabling seamless hand tracking in challenging environments with partial or even complete occlusion. To maximize the accuracy while maintaining high ease-of-use, we propose an automated user calibration that uses the RGB-D camera data to refine both the glove mapping model and the multi-IMU system parameters. Extensive experiments show that our setup outperforms the wearable-only approaches when the hand is in the field-of-view and outplays the camera-only methods when the hand is occluded.", "title": "Self-Calibrated Multi-Sensor Wearable for Hand Tracking and Modeling", "normalizedTitle": "Self-Calibrated Multi-Sensor Wearable for Hand Tracking and Modeling", "fno": "09628050", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Data Gloves", "Gesture Recognition", "Image Colour Analysis", "Object Tracking", "Optical Sensors", "Pose Estimation", "Sensor Fusion", "Soft Sensors", "Solid Modelling", "Wearable Sensors", "3 D Hand Pose Tracking", "Camera Only Methods", "Glove Mapping Model", "Multi IMU System Parameters", "Multisensor System", "Optical Sensors", "RGB D Camera", "Stretch Sensing Soft Glove", "Wearable Sensors", "Three Dimensional Displays", "Heating Systems", "Cameras", "Computational Modeling", "Calibration", "Solid Modeling", "Wearable Sensors", "Hand Tracking", "Wearable Sensors" ], "authors": [ { "givenName": "Nikhil", "surname": "Gosala", "fullName": "Nikhil Gosala", "affiliation": "Department of Computer Science, University of Freiburg, Freiburg im Breisgau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Fangjinhua", "surname": "Wang", "fullName": "Fangjinhua Wang", "affiliation": "Department of Computer Science, ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Zhaopeng", "surname": "Cui", "fullName": "Zhaopeng Cui", "affiliation": "State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hanxue", "surname": "Liang", "fullName": "Hanxue Liang", "affiliation": "Department of Computer Science, ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Oliver", "surname": "Glauser", "fullName": "Oliver Glauser", "affiliation": "Department of Computer Science, ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Shihao", "surname": "Wu", "fullName": "Shihao Wu", "affiliation": "Department of Computer Science, ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "Olga", "surname": "Sorkine-Hornung", "fullName": "Olga Sorkine-Hornung", "affiliation": "Department of Computer Science, ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1769-1784", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bsn/2011/4431/0/4431a200", "title": "Wireless Hand Gesture Capture through Wearable Passive Tag Sensing", "doi": null, "abstractUrl": "/proceedings-article/bsn/2011/4431a200/12OmNBAIAOf", "parentPublication": { "id": "proceedings/bsn/2011/4431/0", "title": "Wearable and Implantable Body Sensor Networks, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2013/3022/0/3022a436", "title": "Wearable Smartphone: Wearable Hybrid Framework for Hand and Foot Gesture Interaction on Smartphone", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a436/12OmNqIQSbm", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204355", "title": "High level activity recognition using low resolution wearable vision", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204355/12OmNvT2p2e", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2004/2244/0/01410485", "title": "A robust hand tracking and gesture recognition method for wearable visual interfaces and its applications", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410485/12OmNvlxJpQ", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a705", "title": "Real-Time Hand Pose Estimation from RGB-D Sensor", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a705/12OmNzlly47", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcrait/2022/8192/0/819200a232", "title": "Artificial Intelligence Hand Spatial Position Predictor Based on Data Gloves and Jetson Xavier NX", "doi": null, "abstractUrl": "/proceedings-article/gcrait/2022/819200a232/1Hcnbghxs5i", "parentPublication": { "id": "proceedings/gcrait/2022/8192/0", "title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798296", "title": "Development of Sensitive Glove Type Wearable Robot System", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798296/1cJ1ahxGiWc", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a380", "title": "A Preliminary Investigation into a Deep Learning Implementation for Hand Tracking on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a380/1qpzzXysef6", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a386", "title": "Detection-Guided 3D Hand Tracking for Mobile AR Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a386/1yeD59qEabK", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0446", "title": "Model-based 3D Hand Reconstruction via Self-Supervised Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0446/1yeJWXqdk3u", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09629264", "articleId": "1yXvJdO9qaQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "09632437", "articleId": "1yYPmKqcmpq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyObLqjqE", "name": "ttg202303-09628050s1-supp1-3131230.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09628050s1-supp1-3131230.pdf", "extension": "pdf", "size": "1.23 MB", "__typename": "WebExtraType" }, { "id": "1KmyPd8Nv1K", "name": "ttg202303-09628050s1-supp2-3131230.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09628050s1-supp2-3131230.mp4", "extension": "mp4", "size": "229 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yYPmKqcmpq", "doi": "10.1109/TVCG.2021.3131712", "abstract": "3D reconstruction from single-view images is a long-standing research problem. There have been various methods based on point clouds and volumetric representations. In spite of success in 3D models generation, it is quite challenging for these approaches to deal with models with complex topology and fine geometric details. Thanks to the recent advance of deep shape representations, learning the structure and detail representation using deep neural networks is a promising direction. In this article, we propose a novel approach named STD-Net to reconstruct 3D models utilizing mesh representation that is well suited for characterizing complex structures and geometry details. Our method consists of (1) an auto-encoder network for recovering the structure of an object with bounding box representation from a single-view image; (2) a topology-adaptive GCN for updating vertex position for meshes of complex topology; and (3) a unified mesh deformation block that deforms the structural boxes into structure-aware meshes. Evaluation on ShapeNet and PartNet shows that STD-Net has better performance than state-of-the-art methods in reconstructing complex structures and fine geometric details.", "abstracts": [ { "abstractType": "Regular", "content": "3D reconstruction from single-view images is a long-standing research problem. There have been various methods based on point clouds and volumetric representations. In spite of success in 3D models generation, it is quite challenging for these approaches to deal with models with complex topology and fine geometric details. Thanks to the recent advance of deep shape representations, learning the structure and detail representation using deep neural networks is a promising direction. In this article, we propose a novel approach named STD-Net to reconstruct 3D models utilizing mesh representation that is well suited for characterizing complex structures and geometry details. Our method consists of (1) an auto-encoder network for recovering the structure of an object with bounding box representation from a single-view image; (2) a topology-adaptive GCN for updating vertex position for meshes of complex topology; and (3) a unified mesh deformation block that deforms the structural boxes into structure-aware meshes. Evaluation on ShapeNet and PartNet shows that STD-Net has better performance than state-of-the-art methods in reconstructing complex structures and fine geometric details.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D reconstruction from single-view images is a long-standing research problem. There have been various methods based on point clouds and volumetric representations. In spite of success in 3D models generation, it is quite challenging for these approaches to deal with models with complex topology and fine geometric details. Thanks to the recent advance of deep shape representations, learning the structure and detail representation using deep neural networks is a promising direction. In this article, we propose a novel approach named STD-Net to reconstruct 3D models utilizing mesh representation that is well suited for characterizing complex structures and geometry details. Our method consists of (1) an auto-encoder network for recovering the structure of an object with bounding box representation from a single-view image; (2) a topology-adaptive GCN for updating vertex position for meshes of complex topology; and (3) a unified mesh deformation block that deforms the structural boxes into structure-aware meshes. Evaluation on ShapeNet and PartNet shows that STD-Net has better performance than state-of-the-art methods in reconstructing complex structures and fine geometric details.", "title": "STD-Net: Structure-Preserving and Topology-Adaptive Deformation Network for Single-View 3D Reconstruction", "normalizedTitle": "STD-Net: Structure-Preserving and Topology-Adaptive Deformation Network for Single-View 3D Reconstruction", "fno": "09632437", "hasPdf": true, "idPrefix": "tg", "keywords": [ "CAD", "Computer Vision", "Geometry", "Graph Theory", "Image Recognition", "Image Reconstruction", "Image Representation", "Learning Artificial Intelligence", "Mesh Generation", "Neural Nets", "Object Recognition", "Solid Modelling", "Topology", "Auto Encoder Network", "Box Representation", "Complex Structures", "Complex Topology", "Deep Neural Networks", "Deep Shape Representations", "Fine Geometric Details", "Geometry Details", "Mesh Representation", "Point Clouds", "Recent Advance", "Research Problem", "Single View 3 D Reconstruction", "Single View Image", "STD Net", "Structural Boxes", "Structure Aware Meshes", "Structure Preserving", "Topology Adaptive Deformation Network", "Topology Adaptive GCN", "Unified Mesh Deformation Block", "Volumetric Representations", "Three Dimensional Displays", "Shape", "Image Reconstruction", "Solid Modeling", "Topology", "Periodic Structures", "Deep Learning", "Single View Reconstruction", "Deformation Driven Method", "Structure Preservation", "Topology Adaptivity" ], "authors": [ { "givenName": "Aihua", "surname": "Mao", "fullName": "Aihua Mao", "affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, Guangdong Province, China", "__typename": "ArticleAuthorType" }, { "givenName": "Canglan", "surname": "Dai", "fullName": "Canglan Dai", "affiliation": "School of Computer Science and Engineering, South China University of Technology, Guangzhou, Guangdong Province, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qing", "surname": "Liu", "fullName": "Qing Liu", "affiliation": "School of Software Engineering, South China University of Technology, Guangzhou, Guangdong Province, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jie", "surname": "Yang", "fullName": "Jie Yang", "affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lin", "surname": "Gao", "fullName": "Lin Gao", "affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ying", "surname": "He", "fullName": "Ying He", "affiliation": "School of Computer Science and Engineering, Nanyang Technological University, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Yong-Jin", "surname": "Liu", "fullName": "Yong-Jin Liu", "affiliation": "BNRist, MOE-Key Laboratory of Pervasive Computing, Department of Computer Science and Technology, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1785-1798", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cad-graphics/2013/2576/0/06815000", "title": "Inversion Free and Topology Compatible Tetrahedral Mesh Warping Driven by Boundary Surface Deformation", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06815000/12OmNBQC8er", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460081", "title": "Octree-Based Topology-Preserving Isosurface Simplification", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460081/12OmNBkfRla", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/2/195020923", "title": "Landmark-based Shape Deformation with Topology-Preserving Constraints", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195020923/12OmNC943EZ", "parentPublication": { "id": "proceedings/iccv/2003/1950/2", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d016", "title": "Mesh Deformation Based on Preserving Distances and Angles", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d016/12OmNvEyR9C", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2007/2928/0/29280200", "title": "Multi-resolution Meshes Deformation Based on Pyramid Coordinates", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2007/29280200/12OmNzy7uQF", "parentPublication": { "id": "proceedings/cgiv/2007/2928/0", "title": "Computer Graphics, Imaging and Visualisation (CGIV 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/08/06171183", "title": "Detail-Preserving Controllable Deformation from Sparse Examples", "doi": null, "abstractUrl": "/journal/tg/2012/08/06171183/13rRUygBw76", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8551", "title": "Neural Template: Topology-aware Reconstruction and Disentangled Generation of 3D Meshes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8551/1H0NuMEsMaQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j963", "title": "Deep Mesh Reconstruction From Single RGB Images via Topology Modification Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j963/1hVlL4sHcYw", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/02/09583888", "title": "TopoCluster: A Localized Data Structure for Topology-Based Visualization", "doi": null, "abstractUrl": "/journal/tg/2023/02/09583888/1xSHOBpErN6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h449", "title": "TearingNet: Point Cloud Autoencoder to Learn Topology-Friendly Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h449/1yeIhIgTQxG", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09628050", "articleId": "1yXvIX3fy6I", "__typename": "AdjacentArticleType" }, "next": { "fno": "09632413", "articleId": "1yYPkLFxoTC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yYPkLFxoTC", "doi": "10.1109/TVCG.2021.3131824", "abstract": "We present RCMVis, a visual analytics system to support interactive Route Choice Modeling analysis. It aims to model which characteristics of routes, such as distance and the number of traffic lights, affect travelers&#x2019; route choice behaviors and how much they affect the choice during their trips. Through close collaboration with domain experts, we designed a visual analytics framework for Route Choice Modeling. The framework supports three interactive analysis stages: exploration, modeling, and reasoning. In the exploration stage, we help analysts interactively explore trip data from multiple origin-destination (OD) pairs and choose a subset of data they want to focus on. To this end, we provide coordinated multiple OD views with different foci that allow analysts to inspect, rank, and compare OD pairs in terms of their multidimensional attributes. In the modeling stage, we integrate a <inline-formula><tex-math notation=\"LaTeX\">Z_$k$_Z</tex-math></inline-formula>-medoids clustering method and a path-size logit model into our system to enable analysts to model route choice behaviors from trips with support for feature selection, hyperparameter tuning, and model comparison. Finally, in the reasoning stage, we help analysts rationalize and refine the model by selectively inspecting the trips that strongly support the modeling result. For evaluation, we conducted a case study and interviews with domain experts. The domain experts discovered unexpected insights from numerous modeling results, allowing them to explore the hyperparameter space more effectively to gain better results. In addition, they gained OD- and road-level insights into which data mainly supported the modeling result, enabling further discussion of the model.", "abstracts": [ { "abstractType": "Regular", "content": "We present RCMVis, a visual analytics system to support interactive Route Choice Modeling analysis. It aims to model which characteristics of routes, such as distance and the number of traffic lights, affect travelers&#x2019; route choice behaviors and how much they affect the choice during their trips. Through close collaboration with domain experts, we designed a visual analytics framework for Route Choice Modeling. The framework supports three interactive analysis stages: exploration, modeling, and reasoning. In the exploration stage, we help analysts interactively explore trip data from multiple origin-destination (OD) pairs and choose a subset of data they want to focus on. To this end, we provide coordinated multiple OD views with different foci that allow analysts to inspect, rank, and compare OD pairs in terms of their multidimensional attributes. In the modeling stage, we integrate a <inline-formula><tex-math notation=\"LaTeX\">$k$</tex-math><alternatives><mml:math><mml:mi>k</mml:mi></mml:math><inline-graphic xlink:href=\"seo-ieq1-3131824.gif\"/></alternatives></inline-formula>-medoids clustering method and a path-size logit model into our system to enable analysts to model route choice behaviors from trips with support for feature selection, hyperparameter tuning, and model comparison. Finally, in the reasoning stage, we help analysts rationalize and refine the model by selectively inspecting the trips that strongly support the modeling result. For evaluation, we conducted a case study and interviews with domain experts. The domain experts discovered unexpected insights from numerous modeling results, allowing them to explore the hyperparameter space more effectively to gain better results. In addition, they gained OD- and road-level insights into which data mainly supported the modeling result, enabling further discussion of the model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present RCMVis, a visual analytics system to support interactive Route Choice Modeling analysis. It aims to model which characteristics of routes, such as distance and the number of traffic lights, affect travelers’ route choice behaviors and how much they affect the choice during their trips. Through close collaboration with domain experts, we designed a visual analytics framework for Route Choice Modeling. The framework supports three interactive analysis stages: exploration, modeling, and reasoning. In the exploration stage, we help analysts interactively explore trip data from multiple origin-destination (OD) pairs and choose a subset of data they want to focus on. To this end, we provide coordinated multiple OD views with different foci that allow analysts to inspect, rank, and compare OD pairs in terms of their multidimensional attributes. In the modeling stage, we integrate a --medoids clustering method and a path-size logit model into our system to enable analysts to model route choice behaviors from trips with support for feature selection, hyperparameter tuning, and model comparison. Finally, in the reasoning stage, we help analysts rationalize and refine the model by selectively inspecting the trips that strongly support the modeling result. For evaluation, we conducted a case study and interviews with domain experts. The domain experts discovered unexpected insights from numerous modeling results, allowing them to explore the hyperparameter space more effectively to gain better results. In addition, they gained OD- and road-level insights into which data mainly supported the modeling result, enabling further discussion of the model.", "title": "RCMVis: A Visual Analytics System for Route Choice Modeling", "normalizedTitle": "RCMVis: A Visual Analytics System for Route Choice Modeling", "fno": "09632413", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Pattern Clustering", "Road Traffic", "Transportation", "Domain Experts", "Exploration Stage", "Interactive Analysis Stages", "Model Comparison", "Model Route Choice", "Modeling Result", "Modeling Stage", "Multiple OD Views", "Multiple Origin Destination Pairs", "Numerous Modeling Results", "Path Size Logit Model", "RCM Vis", "Support Interactive Route Choice Modeling Analysis", "Travelers", "Trip Data", "Trips", "Visual Analytics Framework", "Visual Analytics System", "Analytical Models", "Visual Analytics", "Roads", "Data Models", "Trajectory", "Computational Modeling", "Data Visualization", "Route Choice Modeling", "Urban Planning", "Trajectory Data", "Origin Destination", "Visual Analytics" ], "authors": [ { "givenName": "DongHwa", "surname": "Shin", "fullName": "DongHwa Shin", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Jaemin", "surname": "Jo", "fullName": "Jaemin Jo", "affiliation": "College of Computing and Informatics, Sungkyunkwan University, Suwon, Gyeonggi-do, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Bohyoung", "surname": "Kim", "fullName": "Bohyoung Kim", "affiliation": "Division of Biomedical Engineering, Hankuk University of Foreign Studies, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Hyunjoo", "surname": "Song", "fullName": "Hyunjoo Song", "affiliation": "School of Computer Science and Engineering, Soongsil University, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Shin-Hyung", "surname": "Cho", "fullName": "Shin-Hyung Cho", "affiliation": "School of Civil and Environmental Engineering, Georgia Institute of Technology, Atlanta, GA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jinwook", "surname": "Seo", "fullName": "Jinwook Seo", "affiliation": "Department of Computer Science and Engineering, Seoul National University, Seoul, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1799-1817", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2006/0591/0/04035757", "title": "Toward a Multi-Analyst, Collaborative Framework for Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035757/12OmNqAU6pq", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2015/7599/0/07312767", "title": "A visual analytics loop for supporting model development", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2015/07312767/12OmNxTEiPj", "parentPublication": { "id": "proceedings/vizsec/2015/7599/0", "title": "2015 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2015/9783/0/07347679", "title": "Visual analysis of route choice behaviour based on GPS trajectories", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347679/12OmNyKJiqF", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156361", "title": "OD-Wheel: Visual design to explore OD patterns of a central region", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156361/12OmNyfdORA", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070022", "title": "Visual Analytics Infrastructures: From Data Management to Exploration", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070022/13rRUx0gelz", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070047", "title": "Real-Time Visual Analytics for Text Streams", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070047/13rRUxAStVJ", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070030", "title": "Visual Analytics Support for Intelligence Analysis", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876049", "title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a174", "title": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a174/1cMF7meccAo", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a121", "title": "A Mixed-Initiative Visual Analytics Approach for Qualitative Causal Modeling", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a121/1yXubl1hwk0", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09632437", "articleId": "1yYPmKqcmpq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09640557", "articleId": "1z98whOAZ4Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmySZRVFRu", "name": "ttg202303-09632413s1-supp1-3131824.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09632413s1-supp1-3131824.pdf", "extension": "pdf", "size": "176 kB", "__typename": "WebExtraType" }, { "id": "1KmySIcPe2A", "name": "ttg202303-09632413s1-supp2-3131824.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09632413s1-supp2-3131824.mp4", "extension": "mp4", "size": "211 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1z98whOAZ4Q", "doi": "10.1109/TVCG.2021.3133081", "abstract": "We present a deep learning-based method for propagating spatially-varying visual material attributes (e.g., texture maps or image stylizations) to larger samples of the same or similar materials. For training, we leverage images of the material taken under multiple illuminations and a dedicated data augmentation policy, making the transfer robust to novel illumination conditions and affine deformations. Our model relies on a supervised image-to-image translation framework and is agnostic to the transferred domain; we showcase a semantic segmentation, a normal map, and a stylization. Following an image analogies approach, the method only requires the training data to contain the same visual structures as the input guidance. Our approach works at interactive rates, making it suitable for material edit applications. We thoroughly evaluate our learning methodology in a controlled setup providing quantitative measures of performance. Last, we demonstrate that training the model on a single material is enough to generalize to materials of the same type without the need for massive datasets.", "abstracts": [ { "abstractType": "Regular", "content": "We present a deep learning-based method for propagating spatially-varying visual material attributes (e.g., texture maps or image stylizations) to larger samples of the same or similar materials. For training, we leverage images of the material taken under multiple illuminations and a dedicated data augmentation policy, making the transfer robust to novel illumination conditions and affine deformations. Our model relies on a supervised image-to-image translation framework and is agnostic to the transferred domain; we showcase a semantic segmentation, a normal map, and a stylization. Following an image analogies approach, the method only requires the training data to contain the same visual structures as the input guidance. Our approach works at interactive rates, making it suitable for material edit applications. We thoroughly evaluate our learning methodology in a controlled setup providing quantitative measures of performance. Last, we demonstrate that training the model on a single material is enough to generalize to materials of the same type without the need for massive datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a deep learning-based method for propagating spatially-varying visual material attributes (e.g., texture maps or image stylizations) to larger samples of the same or similar materials. For training, we leverage images of the material taken under multiple illuminations and a dedicated data augmentation policy, making the transfer robust to novel illumination conditions and affine deformations. Our model relies on a supervised image-to-image translation framework and is agnostic to the transferred domain; we showcase a semantic segmentation, a normal map, and a stylization. Following an image analogies approach, the method only requires the training data to contain the same visual structures as the input guidance. Our approach works at interactive rates, making it suitable for material edit applications. We thoroughly evaluate our learning methodology in a controlled setup providing quantitative measures of performance. Last, we demonstrate that training the model on a single material is enough to generalize to materials of the same type without the need for massive datasets.", "title": "Neural Photometry-Guided Visual Attribute Transfer", "normalizedTitle": "Neural Photometry-Guided Visual Attribute Transfer", "fno": "09640557", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Deep Learning Artificial Intelligence", "Image Segmentation", "Image Texture", "Photometry", "Affine Deformations", "Dedicated Data Augmentation Policy", "Deep Learning Based Method", "Illumination Conditions", "Image Analogies Approach", "Image Stylizations", "Learning Methodology", "Material Edit Applications", "Multiple Illuminations", "Neural Photometry Guided Visual Attribute Transfer", "Normal Map", "Quantitative Measures", "Semantic Segmentation", "Supervised Image To Image Translation Framework", "Texture Maps", "Transferred Domain", "Visual Structures", "Visualization", "Lighting", "Training", "Semantics", "Image Segmentation", "Image Color Analysis", "Geometry", "Artificial Intelligence", "Artificial Neural Network", "Machine Vision", "Image Texture", "Graphics", "Computational Photography" ], "authors": [ { "givenName": "Carlos", "surname": "Rodriguez-Pardo", "fullName": "Carlos Rodriguez-Pardo", "affiliation": "SEDDI, Madrid, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Elena", "surname": "Garces", "fullName": "Elena Garces", "affiliation": "SEDDI, Madrid, Spain", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1818-1830", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032f180", "title": "What is Around the Camera?", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f180/12OmNCdBDIs", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552962", "title": "Shape-guided segmentation for fine-grained visual categorization", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552962/12OmNvkpl3Z", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118217", "title": "Color image segmentation with detection of highlights and local illumination induced by inter-reflections", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118217/12OmNwNeYuM", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a027", "title": "Instant Mixed Reality Lighting from Casual Scanning", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a027/12OmNx5GTYC", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2014/4284/0/4284a122", "title": "Color Transfer into Scenes", "doi": null, "abstractUrl": "/proceedings-article/icdh/2014/4284a122/12OmNxxvAKJ", "parentPublication": { "id": "proceedings/icdh/2014/4284/0", "title": "2014 5th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a022", "title": "Joint Material and Illumination Estimation from Photo Sets in the Wild", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a022/17D45XuDNHq", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8541", "title": "PhotoScene: Photorealistic Material and Lighting Transfer for Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8541/1H1nmFHmaoE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2020/9891/0/09108684", "title": "Geometry-Guided Adaptation for Road Segmentation", "doi": null, "abstractUrl": "/proceedings-article/crv/2020/09108684/1kpIFuyIHxm", "parentPublication": { "id": "proceedings/crv/2020/9891/0", "title": "2020 17th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/springsim/2020/370/0/09185423", "title": "An Object State Estimation for the Peg Transfer Task in Computer-Guided Surgical Training", "doi": null, "abstractUrl": "/proceedings-article/springsim/2020/09185423/1mP5YRCw86I", "parentPublication": { "id": "proceedings/springsim/2020/370/0", "title": "2020 Spring Simulation Conference (SpringSim)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09632413", "articleId": "1yYPkLFxoTC", "__typename": "AdjacentArticleType" }, "next": { "fno": "09645242", "articleId": "1zc6DjegSGY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyRuRsAqQ", "name": "ttg202303-09640557s1-supp1-3133081.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09640557s1-supp1-3133081.pdf", "extension": "pdf", "size": "32.1 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1zc6DjegSGY", "doi": "10.1109/TVCG.2021.3133511", "abstract": "Augmented Reality (AR) embeds digital information into objects of the physical world. Data can be shown <italic>in-situ</italic>, thereby enabling real-time visual comparisons and object search in real-life user tasks, such as comparing products and looking up scores in a sports game. While there have been studies on designing AR interfaces for situated information retrieval, there has only been limited research on AR object labeling for visual search tasks in the spatial environment. In this article, we identify and categorize different design aspects in AR label design and report on a formal user study on labels for out-of-view objects to support visual search tasks in AR. We design three visualization techniques for out-of-view object labeling in AR, which respectively encode the relative physical position (height-encoded), the rotational direction (angle-encoded), and the label values (value-encoded) of the objects. We further implement two traditional in-view object labeling techniques, where labels are placed either next to the respective objects (situated) or at the edge of the AR FoV (boundary). We evaluate these five different label conditions in three visual search tasks for static objects. Our study shows that out-of-view object labels are beneficial when searching for objects outside the FoV, spatial orientation, and when comparing multiple spatially sparse objects. Angle-encoded labels with directional cues of the surrounding objects have the overall best performance with the highest user satisfaction. We discuss the implications of our findings for future immersive AR interface design.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) embeds digital information into objects of the physical world. Data can be shown <italic>in-situ</italic>, thereby enabling real-time visual comparisons and object search in real-life user tasks, such as comparing products and looking up scores in a sports game. While there have been studies on designing AR interfaces for situated information retrieval, there has only been limited research on AR object labeling for visual search tasks in the spatial environment. In this article, we identify and categorize different design aspects in AR label design and report on a formal user study on labels for out-of-view objects to support visual search tasks in AR. We design three visualization techniques for out-of-view object labeling in AR, which respectively encode the relative physical position (height-encoded), the rotational direction (angle-encoded), and the label values (value-encoded) of the objects. We further implement two traditional in-view object labeling techniques, where labels are placed either next to the respective objects (situated) or at the edge of the AR FoV (boundary). We evaluate these five different label conditions in three visual search tasks for static objects. Our study shows that out-of-view object labels are beneficial when searching for objects outside the FoV, spatial orientation, and when comparing multiple spatially sparse objects. Angle-encoded labels with directional cues of the surrounding objects have the overall best performance with the highest user satisfaction. We discuss the implications of our findings for future immersive AR interface design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) embeds digital information into objects of the physical world. Data can be shown in-situ, thereby enabling real-time visual comparisons and object search in real-life user tasks, such as comparing products and looking up scores in a sports game. While there have been studies on designing AR interfaces for situated information retrieval, there has only been limited research on AR object labeling for visual search tasks in the spatial environment. In this article, we identify and categorize different design aspects in AR label design and report on a formal user study on labels for out-of-view objects to support visual search tasks in AR. We design three visualization techniques for out-of-view object labeling in AR, which respectively encode the relative physical position (height-encoded), the rotational direction (angle-encoded), and the label values (value-encoded) of the objects. We further implement two traditional in-view object labeling techniques, where labels are placed either next to the respective objects (situated) or at the edge of the AR FoV (boundary). We evaluate these five different label conditions in three visual search tasks for static objects. Our study shows that out-of-view object labels are beneficial when searching for objects outside the FoV, spatial orientation, and when comparing multiple spatially sparse objects. Angle-encoded labels with directional cues of the surrounding objects have the overall best performance with the highest user satisfaction. We discuss the implications of our findings for future immersive AR interface design.", "title": "Labeling Out-of-View Objects in Immersive Analytics to Support Situated Visual Searching", "normalizedTitle": "Labeling Out-of-View Objects in Immersive Analytics to Support Situated Visual Searching", "fno": "09645242", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Data Analysis", "Data Visualisation", "Encoding", "Information Retrieval", "Natural Language Processing", "Sport", "User Interfaces", "Angle Encoded Labels", "AR Fo V", "AR Interface Design", "AR Object Labeling", "Augmented Reality", "Digital Information", "Immersive Analytics", "Multiple Spatially Sparse Objects", "Out Of View Object Labeling Techniques", "Situated Information Retrieval", "Sports Game", "Static Objects", "Surrounding Objects", "Visual Search Tasks", "Visualization Techniques", "Visualization", "Labeling", "Task Analysis", "Search Problems", "Data Visualization", "Three Dimensional Displays", "Clutter", "Object Labeling", "Mixed Augmented Reality", "Immersive Analytics", "Situated Analytics", "Data Visualization" ], "authors": [ { "givenName": "Tica", "surname": "Lin", "fullName": "Tica Lin", "affiliation": "John A. Paulson School of Engineering and Applied Sciences, Harvard University, Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yalong", "surname": "Yang", "fullName": "Yalong Yang", "affiliation": "Department of Computer Science, Virginia Tech, Blacksburg, VA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Johanna", "surname": "Beyer", "fullName": "Johanna Beyer", "affiliation": "John A. Paulson School of Engineering and Applied Sciences, Harvard University, Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hanspeter", "surname": "Pfister", "fullName": "Hanspeter Pfister", "affiliation": "John A. Paulson School of Engineering and Applied Sciences, Harvard University, Cambridge, MA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1831-1844", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2014/2874/0/2874a243", "title": "Boundary Labeling Methods for Dynamic Focus Regions", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a243/12OmNBOllhJ", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f910", "title": "Unsupervised Semantic Scene Labeling for Streaming Data", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f910/12OmNsdo6q1", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b178", "title": "Scene Labeling Using Beam Search under Mutex Constraints", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b178/12OmNyQphbb", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761504", "title": "Object labeling for recognition using vocabulary trees", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761504/12OmNzRHOOe", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874390", "title": "Investigating Search Among Physical and Virtual Objects Under Different Lighting Conditions", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874390/1GjwKHZsfIc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2023/2056/0/205600a155", "title": "Discriminative Sampling of Proposals in Self-Supervised Transformers for Weakly Supervised Object Localization", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2023/205600a155/1KzyZTHx2sE", "parentPublication": { "id": "proceedings/wacvw/2023/2056/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797804", "title": "Semantic Labeling and Object Registration for Augmented Reality Language Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797804/1cJ0NXcPJGo", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798358", "title": "In-Situ Labeling for Augmented Reality Language Learning", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798358/1cJ0VFN6eIw", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09207965", "title": "Mixed Labeling: Integrating Internal and External Labels", "doi": null, "abstractUrl": "/journal/tg/2022/04/09207965/1nuwBNaxzjy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cds/2021/0428/0/042800a296", "title": "Active Object Searching on Mobile Robot Using Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/cds/2021/042800a296/1uZxzf21kC4", "parentPublication": { "id": "proceedings/cds/2021/0428/0", "title": "2021 2nd International Conference on Computing and Data Science (CDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09640557", "articleId": "1z98whOAZ4Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "09645189", "articleId": "1zc6CdFskcU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1zc6CdFskcU", "doi": "10.1109/TVCG.2021.3134105", "abstract": "Restoring high-fidelity textures for 3D reconstructed models are an increasing demand in AR/VR, cultural heritage protection, entertainment, and other relevant fields. Due to geometric errors and camera pose drifting, existing texture mapping algorithms are either plagued by blurring and ghosting or suffer from undesirable visual seams. In this paper, we propose a novel tri-directional similarity texture synthesis method to eliminate the texture inconsistency in RGB-D 3D reconstruction and generate visually realistic texture mapping results. In addition to RGB color information, we incorporate a novel color image texture detail layer serving as an additional context to improve the effectiveness and robustness of the proposed method. First, we select an optimal texture image for each triangle face of the reconstructed model to avoid texture blurring and ghosting. During the selection procedure, the texture details are weighted to avoid generating texture chart partitions across high-frequency areas. Then, we optimize the camera pose of each texture image to align with the reconstructed 3D shape. Next, we propose a tri-directional similarity function to resynthesize the image context within the boundary stripe of texture charts, which can significantly diminish the occurrence of texture seams. Finally, we introduce a global color harmonization method to address the color inconsistency between texture images captured from different viewpoints. The experimental results demonstrate that the proposed method outperforms state-of-the-art texture mapping methods and effectively overcomes texture tearing, blurring, and ghosting artifacts.", "abstracts": [ { "abstractType": "Regular", "content": "Restoring high-fidelity textures for 3D reconstructed models are an increasing demand in AR/VR, cultural heritage protection, entertainment, and other relevant fields. Due to geometric errors and camera pose drifting, existing texture mapping algorithms are either plagued by blurring and ghosting or suffer from undesirable visual seams. In this paper, we propose a novel tri-directional similarity texture synthesis method to eliminate the texture inconsistency in RGB-D 3D reconstruction and generate visually realistic texture mapping results. In addition to RGB color information, we incorporate a novel color image texture detail layer serving as an additional context to improve the effectiveness and robustness of the proposed method. First, we select an optimal texture image for each triangle face of the reconstructed model to avoid texture blurring and ghosting. During the selection procedure, the texture details are weighted to avoid generating texture chart partitions across high-frequency areas. Then, we optimize the camera pose of each texture image to align with the reconstructed 3D shape. Next, we propose a tri-directional similarity function to resynthesize the image context within the boundary stripe of texture charts, which can significantly diminish the occurrence of texture seams. Finally, we introduce a global color harmonization method to address the color inconsistency between texture images captured from different viewpoints. The experimental results demonstrate that the proposed method outperforms state-of-the-art texture mapping methods and effectively overcomes texture tearing, blurring, and ghosting artifacts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Restoring high-fidelity textures for 3D reconstructed models are an increasing demand in AR/VR, cultural heritage protection, entertainment, and other relevant fields. Due to geometric errors and camera pose drifting, existing texture mapping algorithms are either plagued by blurring and ghosting or suffer from undesirable visual seams. In this paper, we propose a novel tri-directional similarity texture synthesis method to eliminate the texture inconsistency in RGB-D 3D reconstruction and generate visually realistic texture mapping results. In addition to RGB color information, we incorporate a novel color image texture detail layer serving as an additional context to improve the effectiveness and robustness of the proposed method. First, we select an optimal texture image for each triangle face of the reconstructed model to avoid texture blurring and ghosting. During the selection procedure, the texture details are weighted to avoid generating texture chart partitions across high-frequency areas. Then, we optimize the camera pose of each texture image to align with the reconstructed 3D shape. Next, we propose a tri-directional similarity function to resynthesize the image context within the boundary stripe of texture charts, which can significantly diminish the occurrence of texture seams. Finally, we introduce a global color harmonization method to address the color inconsistency between texture images captured from different viewpoints. The experimental results demonstrate that the proposed method outperforms state-of-the-art texture mapping methods and effectively overcomes texture tearing, blurring, and ghosting artifacts.", "title": "Seamless Texture Optimization for RGB-D Reconstruction", "normalizedTitle": "Seamless Texture Optimization for RGB-D Reconstruction", "fno": "09645189", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Colour Analysis", "Image Reconstruction", "Image Texture", "Optimisation", "Solid Modelling", "Stereo Image Processing", "3 D Reconstructed Models", "Camera Pose Drifting", "Color Image Texture Detail Layer", "Color Inconsistency", "Ghosting Artifacts", "Global Color Harmonization Method", "High Fidelity Textures", "High Frequency Areas", "Image Context", "Novel Tri Directional Similarity Texture Synthesis Method", "Optimal Texture Image", "RGB Color Information", "RGB D 3 D Reconstruction", "Seamless Texture Optimization", "Texture Blurring", "Texture Chart Partitions", "Texture Inconsistency", "Texture Mapping Algorithms", "Texture Seams", "Texture Tearing", "Tri Directional Similarity Function", "Visual Seams", "Visually Realistic Texture Mapping Results", "Image Reconstruction", "Cameras", "Image Color Analysis", "Three Dimensional Displays", "Geometry", "Color", "Solid Modeling", "3 D Reconstruction", "RGB D Reconstruction", "Texture Mapping", "Texture Optimization" ], "authors": [ { "givenName": "Yanping", "surname": "Fu", "fullName": "Yanping Fu", "affiliation": "Anhui Provincial Key Laboratory of Multimodal Cognitive Computation, School of Computer Science and Technology, Anhui University, Hefei, Anhui, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qingan", "surname": "Yan", "fullName": "Qingan Yan", "affiliation": "InnoPeak Technology, Inc., Palo Alto, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jie", "surname": "Liao", "fullName": "Jie Liao", "affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huajian", "surname": "Zhou", "fullName": "Huajian Zhou", "affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jin", "surname": "Tang", "fullName": "Jin Tang", "affiliation": "Anhui Provincial Key Laboratory of Multimodal Cognitive Computation, School of Computer Science and Technology, Anhui University, Hefei, Anhui, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chunxia", "surname": "Xiao", "fullName": "Chunxia Xiao", "affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1845-1859", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2008/2174/0/04761913", "title": "Seamless image-based texture atlases using multi-band blending", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761913/12OmNwBjP1L", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isccs/2011/4443/0/4443a287", "title": "Fast Texture Synthesis Using Feature Matching", "doi": null, "abstractUrl": "/proceedings-article/isccs/2011/4443a287/12OmNyO8tOn", "parentPublication": { "id": "proceedings/isccs/2011/4443/0", "title": "Computer Science and Society, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a039", "title": "Towards Illumination-Invariant 3D Reconstruction Using ToF RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a039/12OmNzT7OpK", "parentPublication": { "id": "3dv/2014/7000/1", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e645", "title": "Texture Mapping for 3D Reconstruction with RGB-D Sensor", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e645/17D45Wuc36V", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b413", "title": "3D Reconstruction and Texture Optimization Using a Sparse Set of RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b413/18j8FdScGbe", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2018/8497/0/849700a001", "title": "Keyframe-Based Texture Mapping for RGBD Human Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2018/849700a001/1a3x6hGWsso", "parentPublication": { "id": "proceedings/icvrv/2018/8497/0", "title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b269", "title": "TextureFusion: High-Quality Texture Acquisition for Real-Time RGB-D Scanning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b269/1m3obd1zLG0", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f949", "title": "Joint Texture and Geometry Optimization for RGB-D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f949/1m3ogA88vw4", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b556", "title": "Adversarial Texture Optimization From RGB-D Scans", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b556/1m3onF36nBe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/03/09184935", "title": "Real-Time Globally Consistent Dense 3D Reconstruction With Online Texturing", "doi": null, "abstractUrl": "/journal/tp/2022/03/09184935/1mNmW14Jo5O", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09645242", "articleId": "1zc6DjegSGY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09645360", "articleId": "1zc6DFbD4wo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyPSvYvuM", "name": "ttg202303-09645189s1-supp1-3134105.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645189s1-supp1-3134105.pdf", "extension": "pdf", "size": "2.53 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1zc6DFbD4wo", "doi": "10.1109/TVCG.2021.3133592", "abstract": "Immersive virtual reality environments are gaining popularity for studying and exploring crowded three-dimensional structures. When reaching very high structural densities, the natural depiction of the scene produces impenetrable clutter and requires visibility and occlusion management strategies for exploration and orientation. Strategies developed to address the crowdedness in desktop applications, however, inhibit the feeling of immersion. They result in nonimmersive, desktop-style outside-in viewing in virtual reality. This article proposes <italic>Nanotilus</italic>&#x2014;a new visibility and guidance approach for very dense environments that generates an <italic>endoscopic</italic> inside-out experience instead of outside-in viewing, preserving the immersive aspect of virtual reality. The approach consists of two novel, tightly coupled mechanisms that control scene sparsification simultaneously with camera path planning. The sparsification strategy is localized around the camera and is realized as a multi-scale, multi-shell, variety-preserving technique. When Nanotilus dives into the structures to capture internal details residing on multiple scales, it guides the camera using depth-based path planning. In addition to sparsification and path planning, we complete the tour generation with an animation controller, textual annotation, and text-to-visualization conversion. We demonstrate the generated guided tours on mesoscopic biological models &#x2013; SARS-CoV-2 and HIV. We evaluate the Nanotilus experience with a baseline outside-in sparsification and navigational technique in a formal user study with 29 participants. While users can maintain a better overview using the outside-in sparsification, the study confirms our hypothesis that Nanotilus leads to stronger engagement and immersion.", "abstracts": [ { "abstractType": "Regular", "content": "Immersive virtual reality environments are gaining popularity for studying and exploring crowded three-dimensional structures. When reaching very high structural densities, the natural depiction of the scene produces impenetrable clutter and requires visibility and occlusion management strategies for exploration and orientation. Strategies developed to address the crowdedness in desktop applications, however, inhibit the feeling of immersion. They result in nonimmersive, desktop-style outside-in viewing in virtual reality. This article proposes <italic>Nanotilus</italic>&#x2014;a new visibility and guidance approach for very dense environments that generates an <italic>endoscopic</italic> inside-out experience instead of outside-in viewing, preserving the immersive aspect of virtual reality. The approach consists of two novel, tightly coupled mechanisms that control scene sparsification simultaneously with camera path planning. The sparsification strategy is localized around the camera and is realized as a multi-scale, multi-shell, variety-preserving technique. When Nanotilus dives into the structures to capture internal details residing on multiple scales, it guides the camera using depth-based path planning. In addition to sparsification and path planning, we complete the tour generation with an animation controller, textual annotation, and text-to-visualization conversion. We demonstrate the generated guided tours on mesoscopic biological models &#x2013; SARS-CoV-2 and HIV. We evaluate the Nanotilus experience with a baseline outside-in sparsification and navigational technique in a formal user study with 29 participants. While users can maintain a better overview using the outside-in sparsification, the study confirms our hypothesis that Nanotilus leads to stronger engagement and immersion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Immersive virtual reality environments are gaining popularity for studying and exploring crowded three-dimensional structures. When reaching very high structural densities, the natural depiction of the scene produces impenetrable clutter and requires visibility and occlusion management strategies for exploration and orientation. Strategies developed to address the crowdedness in desktop applications, however, inhibit the feeling of immersion. They result in nonimmersive, desktop-style outside-in viewing in virtual reality. This article proposes Nanotilus—a new visibility and guidance approach for very dense environments that generates an endoscopic inside-out experience instead of outside-in viewing, preserving the immersive aspect of virtual reality. The approach consists of two novel, tightly coupled mechanisms that control scene sparsification simultaneously with camera path planning. The sparsification strategy is localized around the camera and is realized as a multi-scale, multi-shell, variety-preserving technique. When Nanotilus dives into the structures to capture internal details residing on multiple scales, it guides the camera using depth-based path planning. In addition to sparsification and path planning, we complete the tour generation with an animation controller, textual annotation, and text-to-visualization conversion. We demonstrate the generated guided tours on mesoscopic biological models – SARS-CoV-2 and HIV. We evaluate the Nanotilus experience with a baseline outside-in sparsification and navigational technique in a formal user study with 29 participants. While users can maintain a better overview using the outside-in sparsification, the study confirms our hypothesis that Nanotilus leads to stronger engagement and immersion.", "title": "Nanotilus: Generator of Immersive Guided-Tours in Crowded 3D Environments", "normalizedTitle": "Nanotilus: Generator of Immersive Guided-Tours in Crowded 3D Environments", "fno": "09645360", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Data Visualisation", "Path Planning", "Virtual Reality", "Animation Controller", "Camera Path Planning", "Control Scene Sparsification", "Crowded 3 D Environments", "Dense Environments", "Depth Based Path Planning", "Desktop Applications", "Experience Instead", "Formal User Study", "Generated Guided Tours", "Guidance Approach", "High Structural Densities", "Immersive Aspect", "Immersive Guided Tours", "Immersive Virtual Reality Environments", "Impenetrable Clutter", "Mesoscopic Biological Models SARS Co V 2", "Multiscale", "Nanotilus Dives", "Nanotilus Experience", "Natural Depiction", "Occlusion Management Strategies", "Sparsification Strategy", "Stronger Engagement", "Studying Exploring Crowded Three Dimensional Structures", "Tightly Coupled Mechanisms", "Tour Generation", "Variety Preserving Technique", "Viewing", "Visibility", "Solid Modeling", "Nanobioscience", "Biological System Modeling", "Three Dimensional Displays", "Navigation", "Cameras", "Data Visualization", "VR Immersive", "Visibility Management", "Path Planning", "Storytelling", "Visualization" ], "authors": [ { "givenName": "Ruwayda", "surname": "Alharbi", "fullName": "Ruwayda Alharbi", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" }, { "givenName": "Ondřej", "surname": "Strnad", "fullName": "Ondřej Strnad", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" }, { "givenName": "Laura R.", "surname": "Luidolt", "fullName": "Laura R. Luidolt", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Manuela", "surname": "Waldner", "fullName": "Manuela Waldner", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Kouřil", "fullName": "David Kouřil", "affiliation": "Masaryk University, Brno, Czech Republic", "__typename": "ArticleAuthorType" }, { "givenName": "Ciril", "surname": "Bohak", "fullName": "Ciril Bohak", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" }, { "givenName": "Tobias", "surname": "Klein", "fullName": "Tobias Klein", "affiliation": "Nanographics, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Eduard", "surname": "Gröller", "fullName": "Eduard Gröller", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Ivan", "surname": "Viola", "fullName": "Ivan Viola", "affiliation": "King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1860-1875", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/kelvar/2016/2344/0/07563675", "title": "Neozoa: An immersive, interactive sandbox for the study of competing", "doi": null, "abstractUrl": "/proceedings-article/kelvar/2016/07563675/12OmNrAdsxl", "parentPublication": { "id": "proceedings/kelvar/2016/2344/0", "title": "2016 IEEE Virtual Reality Workshop on K-12 Embodied Learning through Virtual & Augmented Reality (KELVAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892331", "title": "Advertising perception with immersive virtual reality devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892291", "title": "An immersive approach to visualizing perceptual disturbances", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892291/12OmNzGlRCn", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446124", "title": "Virtual Immersion: Simulating Immersive Experiences in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446124/13bd1fZBGcL", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/05/mcg2014050014", "title": "Quo Vadis CAVE: Does Immersive Visualization Still Matter?", "doi": null, "abstractUrl": "/magazine/cg/2014/05/mcg2014050014/13rRUNvgzcu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798256", "title": "[DC] Joint Locomotion with Virtual Agents in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798256/1cJ0W1LjdW8", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797909", "title": "MOSIS: Immersive Virtual Field Environments for Earth Sciences", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797909/1cJ182qfnpK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09524465", "title": "Geometry-Guided Dense Perspective Network for Speech-Driven Facial Animation", "doi": null, "abstractUrl": "/journal/tg/2022/12/09524465/1wpqCsqBU6Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2022/06/09580681", "title": "Modeling and Defense of Social Virtual Reality Attacks Inducing Cybersickness", "doi": null, "abstractUrl": "/journal/tq/2022/06/09580681/1xPo5KfQN1K", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09664291", "title": "EHTask: Recognizing User Tasks From Eye and Head Movements in Immersive Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/04/09664291/1zHDIPIlNBe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09645189", "articleId": "1zc6CdFskcU", "__typename": "AdjacentArticleType" }, "next": { "fno": "09645173", "articleId": "1zc6CvdsNMc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyVqR6Sxa", "name": "ttg202303-09645360s1-supp2-3133592.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645360s1-supp2-3133592.mp4", "extension": "mp4", "size": "46.8 MB", "__typename": "WebExtraType" }, { "id": "1KmyVYNSw1y", "name": "ttg202303-09645360s1-supp1-3133592.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645360s1-supp1-3133592.mp4", "extension": "mp4", "size": "49.3 MB", "__typename": "WebExtraType" }, { "id": "1KmyVRP0eFa", "name": "ttg202303-09645360s1-supp5-3133592.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645360s1-supp5-3133592.pdf", "extension": "pdf", "size": "666 kB", "__typename": "WebExtraType" }, { "id": "1KmyVB4FyRa", "name": "ttg202303-09645360s1-supp3-3133592.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645360s1-supp3-3133592.mp4", "extension": "mp4", "size": "46.6 MB", "__typename": "WebExtraType" }, { "id": "1KmyVNIXffq", "name": "ttg202303-09645360s1-supp4-3133592.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645360s1-supp4-3133592.pdf", "extension": "pdf", "size": "5.52 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1zc6CvdsNMc", "doi": "10.1109/TVCG.2021.3134083", "abstract": "We present the framework <italic>GUCCI</italic> (Guided Cardiac Cohort Investigation), which provides a guided visual analytics workflow to analyze cohort-based measured blood flow data in the aorta. In the past, many specialized techniques have been developed for the visual exploration of such data sets for a better understanding of the influence of morphological and hemodynamic conditions on cardiovascular diseases. However, there is a lack of dedicated techniques that allow visual comparison of multiple data sets and defined cohorts, which is essential to characterize pathologies. <italic>GUCCI</italic> offers visual analytics techniques and novel visualization methods to guide the user through the comparison of predefined cohorts, such as healthy volunteers and patients with a pathologically altered aorta. The combination of overview and glyph-based depictions together with statistical cohort-specific information allows investigating differences and similarities of the time-dependent data. Our framework was evaluated in a qualitative user study with three radiologists specialized in cardiac imaging and two experts in medical blood flow visualization. They were able to discover cohort-specific characteristics, which supports the derivation of standard values as well as the assessment of pathology-related severity and the need for treatment.", "abstracts": [ { "abstractType": "Regular", "content": "We present the framework <italic>GUCCI</italic> (Guided Cardiac Cohort Investigation), which provides a guided visual analytics workflow to analyze cohort-based measured blood flow data in the aorta. In the past, many specialized techniques have been developed for the visual exploration of such data sets for a better understanding of the influence of morphological and hemodynamic conditions on cardiovascular diseases. However, there is a lack of dedicated techniques that allow visual comparison of multiple data sets and defined cohorts, which is essential to characterize pathologies. <italic>GUCCI</italic> offers visual analytics techniques and novel visualization methods to guide the user through the comparison of predefined cohorts, such as healthy volunteers and patients with a pathologically altered aorta. The combination of overview and glyph-based depictions together with statistical cohort-specific information allows investigating differences and similarities of the time-dependent data. Our framework was evaluated in a qualitative user study with three radiologists specialized in cardiac imaging and two experts in medical blood flow visualization. They were able to discover cohort-specific characteristics, which supports the derivation of standard values as well as the assessment of pathology-related severity and the need for treatment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present the framework GUCCI (Guided Cardiac Cohort Investigation), which provides a guided visual analytics workflow to analyze cohort-based measured blood flow data in the aorta. In the past, many specialized techniques have been developed for the visual exploration of such data sets for a better understanding of the influence of morphological and hemodynamic conditions on cardiovascular diseases. However, there is a lack of dedicated techniques that allow visual comparison of multiple data sets and defined cohorts, which is essential to characterize pathologies. GUCCI offers visual analytics techniques and novel visualization methods to guide the user through the comparison of predefined cohorts, such as healthy volunteers and patients with a pathologically altered aorta. The combination of overview and glyph-based depictions together with statistical cohort-specific information allows investigating differences and similarities of the time-dependent data. Our framework was evaluated in a qualitative user study with three radiologists specialized in cardiac imaging and two experts in medical blood flow visualization. They were able to discover cohort-specific characteristics, which supports the derivation of standard values as well as the assessment of pathology-related severity and the need for treatment.", "title": "<italic>GUCCI</italic> - Guided Cardiac Cohort Investigation of Blood Flow Data", "normalizedTitle": "GUCCI - Guided Cardiac Cohort Investigation of Blood Flow Data", "fno": "09645173", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Blood Vessels", "Cardiovascular System", "Data Analysis", "Data Visualisation", "Diseases", "Flow Visualisation", "Haemodynamics", "Medical Computing", "Medical Image Processing", "Blood Flow Data", "Cardiac Cohort Investigation", "Cardiac Imaging", "Cohort Specific Characteristics", "Dedicated Techniques", "Defined Cohorts", "Glyph Based Depictions", "Guided Visual Analytics Workflow", "Hemodynamic Conditions", "Medical Blood Flow Visualization", "Morphological Conditions", "Multiple Data Sets", "Novel Visualization Methods", "Overview", "Pathologically Altered Aorta", "Pathologies GUCC Ioffers Visual Analytics Techniques", "Predefined Cohorts", "Specialized Techniques", "Statistical Cohort Specific Information", "Time Dependent Data", "Visual Comparison", "Visual Exploration", "Data Visualization", "Blood", "Biomedical Imaging", "Visual Analytics", "Heart", "Valves", "Pathology", "Medical Visualization", "Cohort Analysis", "Measured Blood Flow Data", "Cardiac Diseases" ], "authors": [ { "givenName": "Monique", "surname": "Meuschke", "fullName": "Monique Meuschke", "affiliation": "Department of Simulation and Graphics, University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Uli", "surname": "Niemann", "fullName": "Uli Niemann", "affiliation": "University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Benjamin", "surname": "Behrendt", "fullName": "Benjamin Behrendt", "affiliation": "University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Gutberlet", "fullName": "Matthias Gutberlet", "affiliation": "University of Leipzig – Heart Centre, Leipzig, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bernhard", "surname": "Preim", "fullName": "Bernhard Preim", "affiliation": "Department of Simulation and Graphics, University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Kai", "surname": "Lawonn", "fullName": "Kai Lawonn", "affiliation": "Department of Theoretical Computer Science, University of Jena, Jena, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1876-1892", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ichi/2016/6117/0/6117a517", "title": "CoRAD: Visual Analytics for Cohort Analysis", "doi": null, "abstractUrl": "/proceedings-article/ichi/2016/6117a517/12OmNBKmXrc", "parentPublication": { "id": "proceedings/ichi/2016/6117/0", "title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596137", "title": "Illustrative visualization of cardiac and aortic blood flow from 4D MRI data", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596137/12OmNzC5SHi", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/06/ttg2013060900", "title": "Visual Analysis of Cardiac 4D MRI Blood Flow Using Line Predicates", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013060900/13rRUxAAST9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876009", "title": "Interactive Visual Analysis of Image-Centric Cohort Study Data", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876009/13rRUxASu0L", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122773", "title": "Semi-Automatic Vortex Extraction in 4D PC-MRI Cardiac Blood Flow Data using Line Predicates", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122773/13rRUygBw79", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a241", "title": "Augmented Reality Simulation of Cardiac Circulation Using APPLearn (Heart)", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a241/17D45Wt3ExJ", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmbs/2022/6770/0/677000a416", "title": "Classification of cardiac cohorts based on morphological and hemodynamic features derived from 4D PC-MRI data", "doi": null, "abstractUrl": "/proceedings-article/cmbs/2022/677000a416/1GhVUzGnaAo", "parentPublication": { "id": "proceedings/cmbs/2022/6770/0", "title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09912359", "title": "CohortVA: A Visual Analytic System for Interactive Exploration of Cohorts based on Historical Data", "doi": null, "abstractUrl": "/journal/tg/2023/01/09912359/1HeiT0PQBWg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807213", "title": "Selection Bias Tracking and Detailed Subset Comparison for High-Dimensional Data", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807213/1cG6uHFRwqI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09241732", "title": "Visual cohort comparison for spatial single-cell omics-data", "doi": null, "abstractUrl": "/journal/tg/2021/02/09241732/1oijQyHFwVa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09645360", "articleId": "1zc6DFbD4wo", "__typename": "AdjacentArticleType" }, "next": { "fno": "09928368", "articleId": "1HJuKaJzi36", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyRW475kc", "name": "ttg202303-09645173s1-supp1-3134083.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09645173s1-supp1-3134083.pdf", "extension": "pdf", "size": "507 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1KmyNRPfdXG", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1HJuKaJzi36", "doi": "10.1109/TVCG.2022.3217008", "abstract": "Simulating large scale expansion of thin structures, such as in growing leaves, is challenging. Solid-shells have a number of potential advantages over conventional thin-shell methods, but have thus far only been investigated for small plastic deformation cases. In response, we present a new general-purpose FEM growth framework for handling a wide range of challenging growth scenarios using the solid-shell element. Solid-shells are a middle-ground between traditional volume and thin-shell elements where volumetric characteristics are retained while being treatable as a 2D manifold much like thin-shells. These elements are adaptable to accommodate the many techniques that are required for simulating large and intricate plastic deformations, including morphogen diffusion, plastic embedding, strain-aware adaptive remeshing, and collision handling. We demonstrate the capabilities of growing solid-shells in reproducing buckling, rippling, curling, and collision deformations, relevant towards animating growing leaves, flowers, and other thin structures. Solid-shells are compared side-by-side with thin-shells to examine their bending behavior and runtime performance. The experiments demonstrate that solid-shells are a viable alternative to thin-shells for simulating large and intricate growth deformations.", "abstracts": [ { "abstractType": "Regular", "content": "Simulating large scale expansion of thin structures, such as in growing leaves, is challenging. Solid-shells have a number of potential advantages over conventional thin-shell methods, but have thus far only been investigated for small plastic deformation cases. In response, we present a new general-purpose FEM growth framework for handling a wide range of challenging growth scenarios using the solid-shell element. Solid-shells are a middle-ground between traditional volume and thin-shell elements where volumetric characteristics are retained while being treatable as a 2D manifold much like thin-shells. These elements are adaptable to accommodate the many techniques that are required for simulating large and intricate plastic deformations, including morphogen diffusion, plastic embedding, strain-aware adaptive remeshing, and collision handling. We demonstrate the capabilities of growing solid-shells in reproducing buckling, rippling, curling, and collision deformations, relevant towards animating growing leaves, flowers, and other thin structures. Solid-shells are compared side-by-side with thin-shells to examine their bending behavior and runtime performance. The experiments demonstrate that solid-shells are a viable alternative to thin-shells for simulating large and intricate growth deformations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Simulating large scale expansion of thin structures, such as in growing leaves, is challenging. Solid-shells have a number of potential advantages over conventional thin-shell methods, but have thus far only been investigated for small plastic deformation cases. In response, we present a new general-purpose FEM growth framework for handling a wide range of challenging growth scenarios using the solid-shell element. Solid-shells are a middle-ground between traditional volume and thin-shell elements where volumetric characteristics are retained while being treatable as a 2D manifold much like thin-shells. These elements are adaptable to accommodate the many techniques that are required for simulating large and intricate plastic deformations, including morphogen diffusion, plastic embedding, strain-aware adaptive remeshing, and collision handling. We demonstrate the capabilities of growing solid-shells in reproducing buckling, rippling, curling, and collision deformations, relevant towards animating growing leaves, flowers, and other thin structures. Solid-shells are compared side-by-side with thin-shells to examine their bending behavior and runtime performance. The experiments demonstrate that solid-shells are a viable alternative to thin-shells for simulating large and intricate growth deformations.", "title": "Large Growth Deformations of Thin Tissue Using Solid-Shells", "normalizedTitle": "Large Growth Deformations of Thin Tissue Using Solid-Shells", "fno": "09928368", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Bending", "Buckling", "Diffusion", "Finite Element Analysis", "Plastic Deformation", "Shells Structures", "Buckling", "Collision Handling", "Curling", "Finite Element Analysis", "Morphogen Diffusion", "Plastic Deformations", "Plastic Embedding", "Rippling", "Solid Shell Element", "Strain Aware Adaptive Remeshing", "Thin Tissue", "Thin Shells", "Finite Element Analysis", "Strain", "Plastics", "Solid Modeling", "Bending", "Adaptation Models", "Deformable Models", "Animation", "Applications", "Computer Graphics", "Finite Element Methods", "Physically Based Modelling" ], "authors": [ { "givenName": "Danny", "surname": "Huang", "fullName": "Danny Huang", "affiliation": "University of Saskatchewan, Saskatoon, SK, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Ian", "surname": "Stavness", "fullName": "Ian Stavness", "affiliation": "University of Saskatchewan, Saskatoon, SK, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1893-1909", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdma/2010/4286/2/4286b362", "title": "Simulation Modeling of Transient Temperatures and Stresses Fields During Quenching 7075 Aluminum Cone-Shaped Shells", "doi": null, "abstractUrl": "/proceedings-article/icdma/2010/4286b362/12OmNrkBwtT", "parentPublication": { "id": "proceedings/icdma/2010/4286/2", "title": "2010 International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/canet/2006/0543/0/04055205", "title": "Algorithmization of the decision of classes of multidimensional problems of magneto-elasticity of thin plates and shells", "doi": null, "abstractUrl": "/proceedings-article/canet/2006/04055205/12OmNvnwVoE", "parentPublication": { "id": "proceedings/canet/2006/0543/0", "title": "2006 2nd IEEE/IFIP International Conference in Central Asia on Internet. ICI 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2013/0820/0/06632621", "title": "Real-time rendering of burning solid objects in video games", "doi": null, "abstractUrl": "/proceedings-article/cgames/2013/06632621/12OmNyeECBH", "parentPublication": { "id": "proceedings/cgames/2013/0820/0", "title": "2013 18th International Conference on Computer Games: AI, Animation, Mobile, Interactive Multimedia, Educational & Serious Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cdciem/2011/4350/0/4350a868", "title": "Influence of Multiple Cutouts on the Buckling of Large-Scale Thin-Walled Cylindrical Shells of Desulphurizing Tower under Wind Loading", "doi": null, "abstractUrl": "/proceedings-article/cdciem/2011/4350a868/12OmNz4SOAV", "parentPublication": { "id": "proceedings/cdciem/2011/4350/0", "title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/2/3962c042", "title": "Dynamic Characteristics of Elastic Circular Toroidal Shells", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962c042/12OmNz6iOrU", "parentPublication": { "id": "proceedings/icmtma/2010/3962/2", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisai/2021/0692/0/069200a178", "title": "Finite element modelling and failure behaviour analysis of Carbon Fibre-reinforced Plastic thin-walled tube with cutouts under quasi-static loading", "doi": null, "abstractUrl": "/proceedings-article/cisai/2021/069200a178/1BmOgEJWUFO", "parentPublication": { "id": "proceedings/cisai/2021/0692/0", "title": "2021 International Conference on Computer Information Science and Artificial Intelligence (CISAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10043789", "title": "A Parametric Design Method for Engraving Patterns on Thin Shells", "doi": null, "abstractUrl": "/journal/tg/5555/01/10043789/1KJsjepjFi8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2019/1003/0/08884456", "title": "Surface visualization of flexible elastic shells", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2019/08884456/1eEUXmGJ5jq", "parentPublication": { "id": "proceedings/ewdts/2019/1003/0", "title": "2019 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2020/4109/0/410900a217", "title": "Optimization of FHB5.10 Electrical Appliance Shell Injection Molding Process Based on Moldflow and BP Neural Network", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2020/410900a217/1t2mBlOLr1e", "parentPublication": { "id": "proceedings/wcmeim/2020/4109/0", "title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaa/2021/3730/0/373000a906", "title": "Buckling of a Toroidal Shell with Varying Wall Thickness Under Uniform External Pressure", "doi": null, "abstractUrl": "/proceedings-article/icaa/2021/373000a906/1zL1NMRzGdW", "parentPublication": { "id": "proceedings/icaa/2021/3730/0", "title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09645173", "articleId": "1zc6CvdsNMc", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1KmyTXx6aYg", "name": "ttg202303-09928368s1-supp1-3217008.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202303-09928368s1-supp1-3217008.mp4", "extension": "mp4", "size": "121 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1y11sTji3vO", "doi": "10.1109/TVCG.2021.3112912", "abstract": "Presents the introductory editorial for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the introductory editorial for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the introductory editorial for this issue of the publication.", "title": "Editor&#x0027;s Note", "normalizedTitle": "Editor's Note", "fno": "09586410", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Klaus", "surname": "Mueller", "fullName": "Klaus Mueller", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4342-4346", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "mags/an/2022/03/09875139", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2022/03/09875139/1GlbXTIEwaQ", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2022/04/09972860", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2022/04/09972860/1ISVNzFCZu8", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2020/02/08956009", "title": "Editor&#x0027;s Note", "doi": null, "abstractUrl": "/journal/td/2020/02/08956009/1gtJY06WATe", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/01/09031986", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2020/01/09031986/1i6VhktGnkc", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/06/09082801", "title": "Editor&#x0027;s Note", "doi": null, "abstractUrl": "/journal/tg/2020/06/09082801/1jrU0RsEpnG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/02/09103673", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2020/02/09103673/1keqEV28ioE", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/04/09257115", "title": "Editor&#x0027;s Note", "doi": null, "abstractUrl": "/journal/td/2021/04/09257115/1oFCKncAhqM", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2020/04/09263260", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2020/04/09263260/1oReM0ot75m", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2021/10/09408530", "title": "Editor&#x0027;s Note", "doi": null, "abstractUrl": "/journal/td/2021/10/09408530/1sVEVpV9zNK", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/an/2021/03/09546090", "title": "From the Editor&#x0027;s Desk", "doi": null, "abstractUrl": "/magazine/an/2021/03/09546090/1x6zEFuXbH2", "parentPublication": { "id": "mags/an", "title": "IEEE Annals of the History of Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09139395", "articleId": "1ls93653hoQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1ls93653hoQ", "doi": "10.1109/TVCG.2020.3006995", "abstract": "Dense flow visualization is a popular visualization paradigm. Traditionally, the various models and methods in this area use a continuous formulation, resting upon the solid foundation of functional analysis. In this work, we examine a discrete formulation of dense flow visualization. From probability theory, we derive a similarity matrix that measures the similarity between different points in the flow domain, leading to the discovery of a whole new class of visualization models. Using this matrix, we propose a novel visualization approach consisting of the computation of spectral embeddings, i.e., characteristic domain maps, defined by particle mixture probabilities. These embeddings are scalar fields that give insight into the mixing processes of the flow on different scales. The approach of spectral embeddings is already well studied in image segmentation, and we see that spectral embeddings are connected to Fourier expansions and frequencies. We showcase the utility of our method using different 2D and 3D flows.", "abstracts": [ { "abstractType": "Regular", "content": "Dense flow visualization is a popular visualization paradigm. Traditionally, the various models and methods in this area use a continuous formulation, resting upon the solid foundation of functional analysis. In this work, we examine a discrete formulation of dense flow visualization. From probability theory, we derive a similarity matrix that measures the similarity between different points in the flow domain, leading to the discovery of a whole new class of visualization models. Using this matrix, we propose a novel visualization approach consisting of the computation of spectral embeddings, i.e., characteristic domain maps, defined by particle mixture probabilities. These embeddings are scalar fields that give insight into the mixing processes of the flow on different scales. The approach of spectral embeddings is already well studied in image segmentation, and we see that spectral embeddings are connected to Fourier expansions and frequencies. We showcase the utility of our method using different 2D and 3D flows.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dense flow visualization is a popular visualization paradigm. Traditionally, the various models and methods in this area use a continuous formulation, resting upon the solid foundation of functional analysis. In this work, we examine a discrete formulation of dense flow visualization. From probability theory, we derive a similarity matrix that measures the similarity between different points in the flow domain, leading to the discovery of a whole new class of visualization models. Using this matrix, we propose a novel visualization approach consisting of the computation of spectral embeddings, i.e., characteristic domain maps, defined by particle mixture probabilities. These embeddings are scalar fields that give insight into the mixing processes of the flow on different scales. The approach of spectral embeddings is already well studied in image segmentation, and we see that spectral embeddings are connected to Fourier expansions and frequencies. We showcase the utility of our method using different 2D and 3D flows.", "title": "A Discrete Probabilistic Approach to Dense Flow Visualization", "normalizedTitle": "A Discrete Probabilistic Approach to Dense Flow Visualization", "fno": "09139395", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Flow Visualisation", "Fourier Analysis", "Image Segmentation", "Probability", "Discrete Probabilistic Approach", "Dense Flow Visualization", "Discrete Formulation", "Flow Domain", "Spectral Embeddings", "Functional Analysis", "Image Segmentation", "Fourier Expansions", "Complexity Theory", "Technological Innovation", "Organizations", "Collaboration", "Bibliographies", "Decision Making", "Industrial Engineering", "Flow Visualization", "Volume Visualization", "Spectral Methods" ], "authors": [ { "givenName": "Daniel", "surname": "Preuß", "fullName": "Daniel Preuß", "affiliation": "COVIDAG, the University of Duisburg-Essen, Duisburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Tino", "surname": "Weinkauf", "fullName": "Tino Weinkauf", "affiliation": "KTH Royal Institute of Technology, Stockholm, Sweden", "__typename": "ArticleAuthorType" }, { "givenName": "Jens", "surname": "Krüger", "fullName": "Jens Krüger", "affiliation": "COVIDAG, the University of Duisburg-Essen, Duisburg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4347-4358", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cimca/2005/2504/2/250420426", "title": "Using potential theory and dense texture-based visualization for external motion applications", "doi": null, "abstractUrl": "/proceedings-article/cimca/2005/250420426/12OmNAle6iP", "parentPublication": { "id": "proceedings/cimca/2005/2504/1", "title": "Computational Intelligence for Modelling, Control and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156352", "title": "Parallel unsteady flow line integral convolution for high-performance dense visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156352/12OmNAlvI6a", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2015/9785/0/07429490", "title": "Explicit frequency control for high-quality texture-based flow visualization", "doi": null, "abstractUrl": "/proceedings-article/scivis/2015/07429490/12OmNB836Ir", "parentPublication": { "id": "proceedings/scivis/2015/9785/0", "title": "2015 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118d406", "title": "DAISY Filter Flow: A Generalized Discrete Approach to Dense Correspondences", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d406/12OmNCd2rQ9", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visapp/2014/8133/3/07295138", "title": "Cross-spectral stereo correspondence using dense flow fields", "doi": null, "abstractUrl": "/proceedings-article/visapp/2014/07295138/12OmNviZlyL", "parentPublication": { "id": "proceedings/visapp/2014/8133/2", "title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2010/6685/0/05429598", "title": "An advection-reaction model for flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429598/12OmNxaNGlD", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2012/0863/0/06183583", "title": "Dense flow visualization using wave interference", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2012/06183583/12OmNzBwGGw", "parentPublication": { "id": "proceedings/pacificvis/2012/0863/0", "title": "Visualization Symposium, IEEE Pacific", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/06/v0609", "title": "Comparative Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2004/06/v0609/13rRUwgQpqB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011121949", "title": "Flow Radar Glyphs—Static Visualization of Unsteady Flow with Uncertainty", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011121949/13rRUxC0SOU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i371", "title": "RAFT-3D: Scene Flow using Rigid-Motion Embeddings", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i371/1yeIxeg2RrO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09586410", "articleId": "1y11sTji3vO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09140426", "articleId": "1lsnHpufYqY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1lsnHpufYqY", "doi": "10.1109/TVCG.2020.3009003", "abstract": "We present exploratory research of virtual reality techniques and mnemonic devices to assist in retrieving knowledge from scholarly articles. We used abstracts of scientific publications to represent knowledge in scholarly articles; participants were asked to read, remember, and retrieve knowledge from a set of abstracts. We conducted an experiment to compare participants&#x2019; recall and recognition performance in three different conditions: a control condition without a pre-specified strategy to test baseline individual memory ability, a condition using an image-based variant of a mnemonic called a &#x201C;memory palace,&#x201D; and a condition using a virtual reality-based variant of a memory palace. Our analyses show that using a virtual reality-based memory palace variant greatly increased the amount of knowledge retrieved and retained over the baseline, and it shows a moderate improvement over the other image-based memory palace variant. Anecdotal feedback from participants suggested that personalizing a memory palace variant would be appreciated. Our results support the value of virtual reality for some high-level cognitive tasks and help improve future applications of virtual reality and visualization.", "abstracts": [ { "abstractType": "Regular", "content": "We present exploratory research of virtual reality techniques and mnemonic devices to assist in retrieving knowledge from scholarly articles. We used abstracts of scientific publications to represent knowledge in scholarly articles; participants were asked to read, remember, and retrieve knowledge from a set of abstracts. We conducted an experiment to compare participants&#x2019; recall and recognition performance in three different conditions: a control condition without a pre-specified strategy to test baseline individual memory ability, a condition using an image-based variant of a mnemonic called a &#x201C;memory palace,&#x201D; and a condition using a virtual reality-based variant of a memory palace. Our analyses show that using a virtual reality-based memory palace variant greatly increased the amount of knowledge retrieved and retained over the baseline, and it shows a moderate improvement over the other image-based memory palace variant. Anecdotal feedback from participants suggested that personalizing a memory palace variant would be appreciated. Our results support the value of virtual reality for some high-level cognitive tasks and help improve future applications of virtual reality and visualization.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present exploratory research of virtual reality techniques and mnemonic devices to assist in retrieving knowledge from scholarly articles. We used abstracts of scientific publications to represent knowledge in scholarly articles; participants were asked to read, remember, and retrieve knowledge from a set of abstracts. We conducted an experiment to compare participants’ recall and recognition performance in three different conditions: a control condition without a pre-specified strategy to test baseline individual memory ability, a condition using an image-based variant of a mnemonic called a “memory palace,” and a condition using a virtual reality-based variant of a memory palace. Our analyses show that using a virtual reality-based memory palace variant greatly increased the amount of knowledge retrieved and retained over the baseline, and it shows a moderate improvement over the other image-based memory palace variant. Anecdotal feedback from participants suggested that personalizing a memory palace variant would be appreciated. Our results support the value of virtual reality for some high-level cognitive tasks and help improve future applications of virtual reality and visualization.", "title": "A Virtual Reality Memory Palace Variant Aids Knowledge Retrieval from Scholarly Articles", "normalizedTitle": "A Virtual Reality Memory Palace Variant Aids Knowledge Retrieval from Scholarly Articles", "fno": "09140426", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Data Visualisation", "Document Handling", "Image Processing", "Information Retrieval", "Natural Language Processing", "Virtual Reality", "Scholarly Articles", "Mnemonic Devices", "Control Condition", "Image Based Memory Palace Variant", "Virtual Reality Memory Palace Variant Aids Knowledge Retrieval", "Anecdotal Feedback", "High Level Cognitive Tasks", "Natural Language Document", "Virtual Reality", "Natural Language Processing", "Memory Management", "Spatial Analysis", "Human Factors", "Virtual Reality", "Mnemonic Devices", "Natural Language Documents", "Human Memory", "Spatialization", "Spatial Memory" ], "authors": [ { "givenName": "Fumeng", "surname": "Yang", "fullName": "Fumeng Yang", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jing", "surname": "Qian", "fullName": "Jing Qian", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Johannes", "surname": "Novotny", "fullName": "Johannes Novotny", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Badre", "fullName": "David Badre", "affiliation": "Department of Cognitive, Linguistic & Psychological Sciences, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Cullen D.", "surname": "Jackson", "fullName": "Cullen D. Jackson", "affiliation": "Beth Israel Deaconess Medical Center, Harvard University, Boston, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "David H.", "surname": "Laidlaw", "fullName": "David H. Laidlaw", "affiliation": "Department of Computer Science, Brown University, Providence, RI, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4359-4373", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2012/4660/0/06402590", "title": "Why should my students use AR? A comparative review of the educational impacts of augmented-reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402590/12OmNxd4txi", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1995/7084/0/70840028", "title": "Realizing the full potential of virtual reality: human factors issues that could stand in the way", "doi": null, "abstractUrl": "/proceedings-article/vrais/1995/70840028/12OmNylKB6n", "parentPublication": { "id": "proceedings/vrais/1995/7084/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1995/7084/0/70840048", "title": "Virtual-reality monitoring", "doi": null, "abstractUrl": "/proceedings-article/vrais/1995/70840048/12OmNzUPpwc", "parentPublication": { "id": "proceedings/vrais/1995/7084/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446457", "title": "Memory Task Performance Across Augmented and Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446457/13bd1fph1yg", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/03/09790021", "title": "Situated VR: Toward a Congruent Hybrid Reality Without Experiential Artifacts", "doi": null, "abstractUrl": "/magazine/cg/2022/03/09790021/1E0Nh45Ca64", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2022/9956/0/995600a999", "title": "Evaluating the spread of Omicron COVID-19 variant in Spain", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2022/995600a999/1F8zifCZZTy", "parentPublication": { "id": "proceedings/ccgrid/2022/9956/0", "title": "2022 22nd International Symposium on Cluster, Cloud and Internet Computing (CCGrid)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a692", "title": "One day in a Roman Domus: Human Factors and Educational Properties Involved in a Virtual Heritage Application", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a692/1J7WxP5sjmg", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a157", "title": "Human Factors Assessment in VR-based Firefighting Training in Maritime: A Pilot Study", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a157/1olHyrrUZuU", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a159", "title": "VR-based Training on Handling LNG Related Emergency in the Maritime Industry", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a159/1yBF5Wqysak", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a106", "title": "Towards a Standard Approach for the Design of a both Physical and Virtual Museum", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a106/1zxLC7rtrYA", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09139395", "articleId": "1ls93653hoQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "09123589", "articleId": "1kTxwwg0epW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y11oiAuchy", "name": "ttg202112-09140426s1-supp1-3009003.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202112-09140426s1-supp1-3009003.pdf", "extension": "pdf", "size": "929 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1kTxwwg0epW", "doi": "10.1109/TVCG.2020.3004195", "abstract": "In augmented reality, it is important to achieve visual consistency between inserted virtual objects and the real scene. As specular and transparent objects can produce caustics, which affect the appearance of inserted virtual objects, we herein propose a framework for differential rendering beyond the Lambertian-world assumption. Our key idea is to jointly optimize illumination and parameters of specular and transparent objects. To estimate the parameters of transparent objects efficiently, the psychophysical scaling method is introduced while considering visual characteristics of the human eye to obtain the step size for estimating the refractive index. We verify our technique on multiple real scenes, and the experimental results show that the fusion effects are visually consistent.", "abstracts": [ { "abstractType": "Regular", "content": "In augmented reality, it is important to achieve visual consistency between inserted virtual objects and the real scene. As specular and transparent objects can produce caustics, which affect the appearance of inserted virtual objects, we herein propose a framework for differential rendering beyond the Lambertian-world assumption. Our key idea is to jointly optimize illumination and parameters of specular and transparent objects. To estimate the parameters of transparent objects efficiently, the psychophysical scaling method is introduced while considering visual characteristics of the human eye to obtain the step size for estimating the refractive index. We verify our technique on multiple real scenes, and the experimental results show that the fusion effects are visually consistent.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In augmented reality, it is important to achieve visual consistency between inserted virtual objects and the real scene. As specular and transparent objects can produce caustics, which affect the appearance of inserted virtual objects, we herein propose a framework for differential rendering beyond the Lambertian-world assumption. Our key idea is to jointly optimize illumination and parameters of specular and transparent objects. To estimate the parameters of transparent objects efficiently, the psychophysical scaling method is introduced while considering visual characteristics of the human eye to obtain the step size for estimating the refractive index. We verify our technique on multiple real scenes, and the experimental results show that the fusion effects are visually consistent.", "title": "An Improved Augmented-Reality Framework for Differential Rendering Beyond the Lambertian-World Assumption", "normalizedTitle": "An Improved Augmented-Reality Framework for Differential Rendering Beyond the Lambertian-World Assumption", "fno": "09123589", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Refractive Index", "Rendering Computer Graphics", "Virtual Reality", "Differential Rendering", "Lambertian World Assumption", "Transparent Objects", "Visual Characteristics", "Improved Augmented Reality Framework", "Augmented Reality", "Visual Consistency", "Inserted Virtual Objects", "Lighting", "Estimation", "Rendering Computer Graphics", "Light Sources", "Augmented Reality", "Visualization", "Cameras", "Augmented Reality", "Specular And Transparent Objects", "Global Illumination", "Light Estimation", "Material Estimation", "Joint Optimization" ], "authors": [ { "givenName": "Aijia", "surname": "Zhang", "fullName": "Aijia Zhang", "affiliation": "College of Communication Engineering, Jilin University, Changchun, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yan", "surname": "Zhao", "fullName": "Yan Zhao", "affiliation": "College of Communication Engineering, Jilin University, Changchun, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shigang", "surname": "Wang", "fullName": "Shigang Wang", "affiliation": "College of Communication Engineering, Jilin University, Changchun, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4374-4386", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cw/2004/2140/0/21400039", "title": "A Rapid Rendering Method for Caustics Arising from Refraction by Transparent Objects", "doi": null, "abstractUrl": "/proceedings-article/cw/2004/21400039/12OmNvTTc81", "parentPublication": { "id": "proceedings/cw/2004/2140/0", "title": "2004 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f844", "title": "Learning Non-Lambertian Object Intrinsics Across ShapeNet Categories", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f844/12OmNvq5jEO", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a913", "title": "Cartoon Rendering Illumination Model Based on Phong", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a037", "title": "Tiled Frustum Culling for Differential Rendering on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a037/12OmNwqx4aS", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460071", "title": "Shading derivation from an unspecified object for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460071/12OmNzAohXY", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/05/ttg2013050749", "title": "Interactive Rendering of Acquired Materials on Dynamic Geometry Using Frequency Analysis", "doi": null, "abstractUrl": "/journal/tg/2013/05/ttg2013050749/13rRUyp7tWW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09904431", "title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights", "doi": null, "abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a711", "title": "GAN2X: Non-Lambertian Inverse Rendering of Image GANs", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a711/1KYsuznLNpm", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10077440", "title": "NeRC: Rendering Planar Caustics by Learning Implicit Neural Representations", "doi": null, "abstractUrl": "/journal/tg/5555/01/10077440/1LFQ6PMpeik", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbase/2022/9639/0/963900a027", "title": "Interactive Image-Space Rendering of Dispersions", "doi": null, "abstractUrl": "/proceedings-article/cbase/2022/963900a027/1MBRh4FHK3m", "parentPublication": { "id": "proceedings/cbase/2022/9639/0", "title": "2022 International Conference on Cloud Computing, Big Data Applications and Software Engineering (CBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09140426", "articleId": "1lsnHpufYqY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09117062", "articleId": "1kGg69DDrFe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1kGg69DDrFe", "doi": "10.1109/TVCG.2020.3002245", "abstract": "In this article, we investigate the effects of active transient vibration and visuo-haptic illusion to augment the perceived softness of haptic proxy objects. We introduce a system combining active transient vibration at the fingertip with visuo-haptic illusions. In our hand-held device, a voice coil actuator transmits active transient vibrations to the index fingertip, while a force sensor measures the force applied on passive proxy objects to create visuo-haptic illusions in virtual reality. We conducted three user studies to understand both the vibrotactile effect and its combined effect with visuo-haptic illusions. A preliminary study confirmed that active transient vibrations can intuitively alter the perceived softness of a proxy object. Our first study demonstrated that those same active transient vibrations can generate different perceptions of softness depending on the material of the proxy object used. In our second study, we evaluated the combination of active transient vibration and visuo-haptic illusion, and found that both significantly influence perceived softness, with with the visuo-haptic effect being dominant. Our third study further investigated the vibrotactile effect while controlling for the visuo-haptic illusion. The combination of these two methods allows users to effectively perceive various levels of softness when interacting with haptic proxy objects.", "abstracts": [ { "abstractType": "Regular", "content": "In this article, we investigate the effects of active transient vibration and visuo-haptic illusion to augment the perceived softness of haptic proxy objects. We introduce a system combining active transient vibration at the fingertip with visuo-haptic illusions. In our hand-held device, a voice coil actuator transmits active transient vibrations to the index fingertip, while a force sensor measures the force applied on passive proxy objects to create visuo-haptic illusions in virtual reality. We conducted three user studies to understand both the vibrotactile effect and its combined effect with visuo-haptic illusions. A preliminary study confirmed that active transient vibrations can intuitively alter the perceived softness of a proxy object. Our first study demonstrated that those same active transient vibrations can generate different perceptions of softness depending on the material of the proxy object used. In our second study, we evaluated the combination of active transient vibration and visuo-haptic illusion, and found that both significantly influence perceived softness, with with the visuo-haptic effect being dominant. Our third study further investigated the vibrotactile effect while controlling for the visuo-haptic illusion. The combination of these two methods allows users to effectively perceive various levels of softness when interacting with haptic proxy objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this article, we investigate the effects of active transient vibration and visuo-haptic illusion to augment the perceived softness of haptic proxy objects. We introduce a system combining active transient vibration at the fingertip with visuo-haptic illusions. In our hand-held device, a voice coil actuator transmits active transient vibrations to the index fingertip, while a force sensor measures the force applied on passive proxy objects to create visuo-haptic illusions in virtual reality. We conducted three user studies to understand both the vibrotactile effect and its combined effect with visuo-haptic illusions. A preliminary study confirmed that active transient vibrations can intuitively alter the perceived softness of a proxy object. Our first study demonstrated that those same active transient vibrations can generate different perceptions of softness depending on the material of the proxy object used. In our second study, we evaluated the combination of active transient vibration and visuo-haptic illusion, and found that both significantly influence perceived softness, with with the visuo-haptic effect being dominant. Our third study further investigated the vibrotactile effect while controlling for the visuo-haptic illusion. The combination of these two methods allows users to effectively perceive various levels of softness when interacting with haptic proxy objects.", "title": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality", "normalizedTitle": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality", "fno": "09117062", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Actuators", "Force Sensors", "Haptic Interfaces", "Vibrations", "Virtual Reality", "Visuo Haptic Illusion", "Perceived Softness", "Haptic Proxy Objects", "Active Transient Vibration", "Proxy Object", "Visuo Haptic Effect", "Haptic Interfaces", "Vibrations", "Transient Analysis", "Data Visualization", "Transient Analysis", "Rendering Computer Graphics", "Transient Vibration", "Visuo Haptic Illusion", "Virtual Reality", "Haptics", "Softness", "Softness Rendering", "Softness Perception" ], "authors": [ { "givenName": "Inrak", "surname": "Choi", "fullName": "Inrak Choi", "affiliation": "Department of Mechanical Engineering, Stanford University, Stanford, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yiwei", "surname": "Zhao", "fullName": "Yiwei Zhao", "affiliation": "Department of Mechanical Engineering, Stanford University, Stanford, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eric J.", "surname": "Gonzalez", "fullName": "Eric J. Gonzalez", "affiliation": "Department of Mechanical Engineering, Stanford University, Stanford, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Sean", "surname": "Follmer", "fullName": "Sean Follmer", "affiliation": "Department of Mechanical Engineering, Stanford University, Stanford, CA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4387-4400", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/whc/2009/3858/0/04810828", "title": "Haptic, visual and visuo-haptic softness judgments for objects with deformable surfaces", "doi": null, "abstractUrl": "/proceedings-article/whc/2009/04810828/12OmNAhOUOf", "parentPublication": { "id": "proceedings/whc/2009/3858/0", "title": "World Haptics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2007/3005/0/30050003", "title": "Visuo-Haptic Interface for Hair", "doi": null, "abstractUrl": "/proceedings-article/cw/2007/30050003/12OmNwEJ12y", "parentPublication": { "id": "proceedings/cw/2007/3005/0", "title": "2007 International Conference on Cyberworlds (CW'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780233", "title": "Visuo-Haptic Display Using Head-Mounted Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948417", "title": "Comprehensive workspace calibration for visuo-haptic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948417/12OmNxIRxTh", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549404", "title": "HARP: A framework for visuo-haptic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549404/12OmNzBwGx8", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07124506", "title": "Direct Visuo-Haptic 4D Volume Rendering Using Respiratory Motion Models", "doi": null, "abstractUrl": "/journal/th/2015/04/07124506/13rRUwInvfi", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2010/02/tth2010020109", "title": "Rendering Softness: Integration of Kinesthetic and Cutaneous Information in a Haptic Device", "doi": null, "abstractUrl": "/journal/th/2010/02/tth2010020109/13rRUwwJWFX", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/04/tth2011040321", "title": "Collocation Accuracy of Visuo-Haptic System: Metrics and Calibration", "doi": null, "abstractUrl": "/journal/th/2011/04/tth2011040321/13rRUxASuhM", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2018/1174/0/08658439", "title": "Visuo-haptic Simulations to Improve Students&#x2019; Understanding of Friction Concepts", "doi": null, "abstractUrl": "/proceedings-article/fie/2018/08658439/18j986wHygM", "parentPublication": { "id": "proceedings/fie/2018/1174/0", "title": "2018 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09665216", "title": "HaptoMapping: Visuo-Haptic Augmented Reality by Embedding User-Imperceptible Tactile Display Control Signals in a Projected Image", "doi": null, "abstractUrl": "/journal/tg/2023/04/09665216/1zJiKwg69PO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09123589", "articleId": "1kTxwwg0epW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09128014", "articleId": "1l3uoODPxAI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1l3uoODPxAI", "doi": "10.1109/TVCG.2020.3002166", "abstract": "Biologists often perform clustering analysis to derive meaningful patterns, relationships, and structures from data instances and attributes. Though clustering plays a pivotal role in biologists&#x2019; data exploration, it takes non-trivial efforts for biologists to find the best grouping in their data using existing tools. Visual cluster analysis is currently performed either programmatically or through menus and dialogues in many tools, which require parameter adjustments over several steps of trial-and-error. In this article, we introduce Geono-Cluster, a novel visual analysis tool designed to support cluster analysis for biologists who do not have formal data science training. Geono-Cluster enables biologists to apply their domain expertise into clustering results by visually demonstrating how their expected clustering outputs should look like with a small sample of data instances. The system then predicts users&#x2019; intentions and generates potential clustering results. Our study follows the design study protocol to derive biologists&#x2019; tasks and requirements, design the system, and evaluate the system with experts on their own dataset. Results of our study with six biologists provide initial evidence that Geono-Cluster enables biologists to create, refine, and evaluate clustering results to effectively analyze their data and gain data-driven insights. At the end, we discuss lessons learned and implications of our study.", "abstracts": [ { "abstractType": "Regular", "content": "Biologists often perform clustering analysis to derive meaningful patterns, relationships, and structures from data instances and attributes. Though clustering plays a pivotal role in biologists&#x2019; data exploration, it takes non-trivial efforts for biologists to find the best grouping in their data using existing tools. Visual cluster analysis is currently performed either programmatically or through menus and dialogues in many tools, which require parameter adjustments over several steps of trial-and-error. In this article, we introduce Geono-Cluster, a novel visual analysis tool designed to support cluster analysis for biologists who do not have formal data science training. Geono-Cluster enables biologists to apply their domain expertise into clustering results by visually demonstrating how their expected clustering outputs should look like with a small sample of data instances. The system then predicts users&#x2019; intentions and generates potential clustering results. Our study follows the design study protocol to derive biologists&#x2019; tasks and requirements, design the system, and evaluate the system with experts on their own dataset. Results of our study with six biologists provide initial evidence that Geono-Cluster enables biologists to create, refine, and evaluate clustering results to effectively analyze their data and gain data-driven insights. At the end, we discuss lessons learned and implications of our study.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Biologists often perform clustering analysis to derive meaningful patterns, relationships, and structures from data instances and attributes. Though clustering plays a pivotal role in biologists’ data exploration, it takes non-trivial efforts for biologists to find the best grouping in their data using existing tools. Visual cluster analysis is currently performed either programmatically or through menus and dialogues in many tools, which require parameter adjustments over several steps of trial-and-error. In this article, we introduce Geono-Cluster, a novel visual analysis tool designed to support cluster analysis for biologists who do not have formal data science training. Geono-Cluster enables biologists to apply their domain expertise into clustering results by visually demonstrating how their expected clustering outputs should look like with a small sample of data instances. The system then predicts users’ intentions and generates potential clustering results. Our study follows the design study protocol to derive biologists’ tasks and requirements, design the system, and evaluate the system with experts on their own dataset. Results of our study with six biologists provide initial evidence that Geono-Cluster enables biologists to create, refine, and evaluate clustering results to effectively analyze their data and gain data-driven insights. At the end, we discuss lessons learned and implications of our study.", "title": "Geono-Cluster: Interactive Visual Cluster Analysis for Biologists", "normalizedTitle": "Geono-Cluster: Interactive Visual Cluster Analysis for Biologists", "fno": "09128014", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Bioinformatics", "Data Analysis", "Data Visualisation", "Interactive Systems", "Pattern Clustering", "Statistical Analysis", "Geono Cluster", "Interactive Visual Cluster Analysis", "Biologists", "Clustering Analysis", "Visual Analysis Tool", "Formal Data Science Training", "Trial And Error", "Clustering Methods", "Human Factors", "Task Analysis", "Data Visualization", "Biological System Modeling", "Data Models", "Interactive Clustering", "Visual Analytics", "Human In The Loop", "Model Selection", "Demonstration Based Paradigm" ], "authors": [ { "givenName": "Subhajit", "surname": "Das", "fullName": "Subhajit Das", "affiliation": "Georgia Institute of Technology, Atlanta, GA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Bahador", "surname": "Saket", "fullName": "Bahador Saket", "affiliation": "Georgia Institute of Technology, Atlanta, GA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Bum Chul", "surname": "Kwon", "fullName": "Bum Chul Kwon", "affiliation": "IBM Research, Yorktown Heights, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Alex", "surname": "Endert", "fullName": "Alex Endert", "affiliation": "Georgia Institute of Technology, Atlanta, GA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4401-4412", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icee/2010/3997/0/3997e842", "title": "The Industrial Pollution Management Based on Cluster Analysis", "doi": null, "abstractUrl": "/proceedings-article/icee/2010/3997e842/12OmNCmpcRZ", "parentPublication": { "id": "proceedings/icee/2010/3997/0", "title": "International Conference on E-Business and E-Government", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2016/1451/0/07465248", "title": "Interactive visual co-cluster analysis of bipartite graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2016/07465248/12OmNwtn3yl", "parentPublication": { "id": "proceedings/pacificvis/2016/1451/0", "title": "2016 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a243", "title": "Interactive Visual Analysis on Large Attributed Networks", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a243/12OmNx8fi7B", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibmw/2009/5121/0/05332101", "title": "Cluster validation: An integrative method for cluster analysis", "doi": null, "abstractUrl": "/proceedings-article/bibmw/2009/05332101/12OmNxX3urt", "parentPublication": { "id": "proceedings/bibmw/2009/5121/0", "title": "2009 IEEE International Conference on Bioinformatics and Biomedicine Workshop", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08047300", "title": "Cluster-Based Visual Abstraction for Multivariate Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2018/09/08047300/13rRUILLkvy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg2011122581", "title": "DICON: Interactive Visual Analysis of Multidimensional Clusters", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg2011122581/13rRUxcbnH8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2004/11/k1370", "title": "Cluster Analysis for Gene Expression Data: A Survey", "doi": null, "abstractUrl": "/journal/tk/2004/11/k1370/13rRUy2YLTf", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2004/04/k0448", "title": "A Human-Computer Interactive Method for Projected Clustering", "doi": null, "abstractUrl": "/journal/tk/2004/04/k0448/13rRUyYSWli", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2018/8481/0/848100a659", "title": "Stable Hierarchical Clustering Analysis Based on New Designed Cluster Validity Index", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2018/848100a659/17D45VsBU6s", "parentPublication": { "id": "proceedings/icmcce/2018/8481/0", "title": "2018 3rd International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09904480", "title": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction", "doi": null, "abstractUrl": "/journal/tg/2023/01/09904480/1H0GkV5P1qo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09117062", "articleId": "1kGg69DDrFe", "__typename": "AdjacentArticleType" }, "next": { "fno": "09128027", "articleId": "1l3unTAaNuE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1l3unTAaNuE", "doi": "10.1109/TVCG.2020.3005680", "abstract": "We present a system for designing indoor scenes with convertible furniture layouts. Such layouts are useful for scenarios where an indoor scene has multiple purposes and requires layout conversion, such as merging multiple small furniture objects into a larger one or changing the locus of the furniture. We aim at planning the motion for the convertible layouts of a scene with the most efficient conversion process. To achieve this, our system first establishes object-level correspondences between the layout of a given source and that of a reference to compute a target layout, where the objects are re-arranged in the source layout with respect to the reference layout. After that, our system initializes the movement paths of objects between the source and target layouts based on various mechanical constraints. A joint space-time optimization is then performed to program a control stream of object translations, rotations, and stops, under which the movements of all objects are efficient and the potential object collisions are avoided. We demonstrate the effectiveness of our system through various design examples of multi-purpose, indoor scenes with convertible layouts.", "abstracts": [ { "abstractType": "Regular", "content": "We present a system for designing indoor scenes with convertible furniture layouts. Such layouts are useful for scenarios where an indoor scene has multiple purposes and requires layout conversion, such as merging multiple small furniture objects into a larger one or changing the locus of the furniture. We aim at planning the motion for the convertible layouts of a scene with the most efficient conversion process. To achieve this, our system first establishes object-level correspondences between the layout of a given source and that of a reference to compute a target layout, where the objects are re-arranged in the source layout with respect to the reference layout. After that, our system initializes the movement paths of objects between the source and target layouts based on various mechanical constraints. A joint space-time optimization is then performed to program a control stream of object translations, rotations, and stops, under which the movements of all objects are efficient and the potential object collisions are avoided. We demonstrate the effectiveness of our system through various design examples of multi-purpose, indoor scenes with convertible layouts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a system for designing indoor scenes with convertible furniture layouts. Such layouts are useful for scenarios where an indoor scene has multiple purposes and requires layout conversion, such as merging multiple small furniture objects into a larger one or changing the locus of the furniture. We aim at planning the motion for the convertible layouts of a scene with the most efficient conversion process. To achieve this, our system first establishes object-level correspondences between the layout of a given source and that of a reference to compute a target layout, where the objects are re-arranged in the source layout with respect to the reference layout. After that, our system initializes the movement paths of objects between the source and target layouts based on various mechanical constraints. A joint space-time optimization is then performed to program a control stream of object translations, rotations, and stops, under which the movements of all objects are efficient and the potential object collisions are avoided. We demonstrate the effectiveness of our system through various design examples of multi-purpose, indoor scenes with convertible layouts.", "title": "Motion Planning for Convertible Indoor Scene Layout Design", "normalizedTitle": "Motion Planning for Convertible Indoor Scene Layout Design", "fno": "09128027", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Furniture", "Object Detection", "Optimisation", "Path Planning", "Motion Planning", "Convertible Indoor Scene Layout Design", "Convertible Furniture Layouts", "Layout Conversion", "Object Level Correspondences", "Target Layout", "Source Layout", "Reference Layout", "Object Translations", "Potential Object Collisions", "Joint Space Time Optimization", "Image Analysis", "Motion Planning", "Planning", "Optimization", "Layout", "Indoor Scene Synthesis", "Motion Planning", "Convertible Layout" ], "authors": [ { "givenName": "Guoming", "surname": "Xiong", "fullName": "Guoming Xiong", "affiliation": "Virtual Reality and Interactive Techniques Institute, East China Jiaotong University, Nanchang, Jiangxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qiang", "surname": "Fu", "fullName": "Qiang Fu", "affiliation": "School of Digital Media and Design Arts, Beijing University of Posts and Telecommunications, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongbo", "surname": "Fu", "fullName": "Hongbo Fu", "affiliation": "School of Creative Media, City University of Hong Kong, Hong Kong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Bin", "surname": "Zhou", "fullName": "Bin Zhou", "affiliation": "School of Computer Science, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guoliang", "surname": "Luo", "fullName": "Guoliang Luo", "affiliation": "Virtual Reality and Interactive Techniques Institute, East China Jiaotong University, Nanchang, Jiangxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhigang", "surname": "Deng", "fullName": "Zhigang Deng", "affiliation": "Department of Computer Science, University of Houston, Houston, TX, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4413-4424", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391a936", "title": "Learning Informative Edge Maps for Indoor Scene Layout Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a936/12OmNvSKNTq", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733a041", "title": "Automated Layout Synthesis and Visualization from Images of Interior or Exterior Spaces", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733a041/12OmNyfdOKF", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08039524", "title": "A Data-Driven Approach for Furniture and Indoor Scene Colorization", "doi": null, "abstractUrl": "/journal/tg/2018/09/08039524/13rRUy3gn7D", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000d926", "title": "Automatic 3D Indoor Scene Modeling from Single Panorama", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d926/17D45VtKiys", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f899", "title": "Human-Centric Indoor Scene Synthesis Using Stochastic Grammar", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f899/17D45W9KVIW", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08546278", "title": "Indoor Scene Layout Estimation from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546278/17D45XvMcb4", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h783", "title": "iPLAN: Interactive and Procedural Layout Planning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h783/1H0N7lmNc1q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10018465", "title": "<sc>SceneHGN</sc>: Hierarchical Graph Networks for 3D Indoor Scene Generation with Fine-Grained Geometry", "doi": null, "abstractUrl": "/journal/tp/5555/01/10018465/1K0DC1ki5P2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a029", "title": "Flexible Indoor Scene Synthesis via a Multi-object Particle Swarm Intelligence Optimization Algorithm and User Intentions", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a029/1fHkncxu9Hi", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2021/1865/0/186500a323", "title": "Layout Structure Assisted Indoor Image Generation", "doi": null, "abstractUrl": "/proceedings-article/mipr/2021/186500a323/1xPso7x537W", "parentPublication": { "id": "proceedings/mipr/2021/1865/0", "title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09128014", "articleId": "1l3uoODPxAI", "__typename": "AdjacentArticleType" }, "next": { "fno": "09507320", "articleId": "1vNfMheqZ2w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y11nrV2LVm", "name": "ttg202112-09128027s1-supp1-3005680.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202112-09128027s1-supp1-3005680.mp4", "extension": "mp4", "size": "92.7 MB", "__typename": "WebExtraType" }, { "id": "1y11n8Dsk3C", "name": "ttg202112-09128027s1-supp2-3005680.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202112-09128027s1-supp2-3005680.pdf", "extension": "pdf", "size": "442 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1y11mYZWHfO", "title": "Dec.", "year": "2021", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1vNfMheqZ2w", "doi": "10.1109/TVCG.2021.3101854", "abstract": "Bau and Mackays OctoPocus dynamic guide helps novices learn, execute, and remember 2D surface gestures. We adapt OctoPocus to 3D mid-air gestures in Virtual Reality (VR) using an optimization-based recognizer, and by introducing an optional exploration mode to help visualize the spatial complexity of guides in a 3D gesture set. A replication of the original experiment protocol is used to compare OctoPocus in VR with a VR implementation of a crib-sheet. Results show that despite requiring 0.9s more reaction time than crib-sheet, OctoPocus enables participants to execute gestures 1.8s faster with 13.8 percent more accuracy during training, while remembering a comparable number of gestures. Subjective ratings support these results, 75 percent of participants found OctoPocus easier to learn and 83 percent found it more accurate. We contribute an implementation and empirical evidence demonstrating that an adaptation of the OctoPocus guide to VR is feasible and beneficial.", "abstracts": [ { "abstractType": "Regular", "content": "Bau and Mackays OctoPocus dynamic guide helps novices learn, execute, and remember 2D surface gestures. We adapt OctoPocus to 3D mid-air gestures in Virtual Reality (VR) using an optimization-based recognizer, and by introducing an optional exploration mode to help visualize the spatial complexity of guides in a 3D gesture set. A replication of the original experiment protocol is used to compare OctoPocus in VR with a VR implementation of a crib-sheet. Results show that despite requiring 0.9s more reaction time than crib-sheet, OctoPocus enables participants to execute gestures 1.8s faster with 13.8 percent more accuracy during training, while remembering a comparable number of gestures. Subjective ratings support these results, 75 percent of participants found OctoPocus easier to learn and 83 percent found it more accurate. We contribute an implementation and empirical evidence demonstrating that an adaptation of the OctoPocus guide to VR is feasible and beneficial.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Bau and Mackays OctoPocus dynamic guide helps novices learn, execute, and remember 2D surface gestures. We adapt OctoPocus to 3D mid-air gestures in Virtual Reality (VR) using an optimization-based recognizer, and by introducing an optional exploration mode to help visualize the spatial complexity of guides in a 3D gesture set. A replication of the original experiment protocol is used to compare OctoPocus in VR with a VR implementation of a crib-sheet. Results show that despite requiring 0.9s more reaction time than crib-sheet, OctoPocus enables participants to execute gestures 1.8s faster with 13.8 percent more accuracy during training, while remembering a comparable number of gestures. Subjective ratings support these results, 75 percent of participants found OctoPocus easier to learn and 83 percent found it more accurate. We contribute an implementation and empirical evidence demonstrating that an adaptation of the OctoPocus guide to VR is feasible and beneficial.", "title": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality", "normalizedTitle": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality", "fno": "09507320", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Gesture Recognition", "Human Computer Interaction", "Learning Artificial Intelligence", "Solid Modelling", "Virtual Reality", "Optimization Based Recognizer", "Optional Exploration Mode", "VR Implementation", "Crib Sheet", "Octo Pocus Guide", "Dynamic Guide", "Virtual Reality", "2 D Surface Gestures", "3 D Mid Air Gestures", "Three Dimensional Displays", "Feedforward Systems", "Virtual Reality", "User Interfaces", "Virtual Reality", "Solid Modeling", "Rendering Computer Graphics", "User Interfaces", "Evaluation Methodology", "Artificial", "Augmented", "And Virtual Realities" ], "authors": [ { "givenName": "Katherine", "surname": "Fennedy", "fullName": "Katherine Fennedy", "affiliation": "Information Systems Technology and Design Pillar, Singapore University of Technology and Design, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Jeremy", "surname": "Hartmann", "fullName": "Jeremy Hartmann", "affiliation": "Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Quentin", "surname": "Roy", "fullName": "Quentin Roy", "affiliation": "Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Simon Tangi", "surname": "Perrault", "fullName": "Simon Tangi Perrault", "affiliation": "Information Systems Technology and Design Pillar, Singapore University of Technology and Design, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Vogel", "fullName": "Daniel Vogel", "affiliation": "Cheriton School of Computer Science, University of Waterloo, Waterloo, ON, Canada", "__typename": "ArticleAuthorType" } ], "replicability": { "isEnabled": true, "codeDownloadUrl": "https://github.com/kfennedy/OctoPocus-in-VR.git", "codeRepositoryUrl": "https://github.com/kfennedy/OctoPocus-in-VR", "__typename": "ArticleReplicabilityType" }, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2021-12-01 00:00:00", "pubType": "trans", "pages": "4425-4438", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2017/6716/0/07893332", "title": "Mid-air modeling with Boolean operations in VR", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893332/12OmNyGbI5i", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699224", "title": "Mid-Air Fingertip-Based User Interaction in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699224/19F1UrKVyhy", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a582", "title": "Multi-Touch Smartphone-Based Progressive Refinement VR Selection", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a582/1CJcBfmyX5K", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a964", "title": "Mid-air Haptic Texture Exploration in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a964/1CJeOwwf1Nm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a637", "title": "Blending On-Body and Mid-Air Interaction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a637/1JrRmvhGko0", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798106", "title": "VR-MOOCs: A Learning Management System for VR Education", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798106/1cJ0Pvi3gwo", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vl-hcc/2020/6901/0/09127275", "title": "User Elicited Hand Gestures for VR-based Navigation of Architectural Designs", "doi": null, "abstractUrl": "/proceedings-article/vl-hcc/2020/09127275/1lvQ0XY7lza", "parentPublication": { "id": "proceedings/vl-hcc/2020/6901/0", "title": "2020 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a713", "title": "Exploring Body Gestures for Small Object Selection in Dense Environment in HMD VR for Data Visualization Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a713/1tnX59fALbG", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a556", "title": "Personal Identifiability of User Tracking Data During VR Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a556/1tnXbEAaBdm", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09128027", "articleId": "1l3unTAaNuE", "__typename": "AdjacentArticleType" }, "next": { "fno": "09130956", "articleId": "1l6OgqspUL6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1y11p6PudHy", "name": "ttg202112-09507320s1-supp1-3101854.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202112-09507320s1-supp1-3101854.mp4", "extension": "mp4", "size": "58.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }