<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JDERM</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Dermatol</journal-id>
      <journal-title>JMIR Dermatology</journal-title>
      <issn pub-type="epub">2562-0959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta><article-id pub-id-type="pmid">39475766</article-id>
      <article-id pub-id-type="publisher-id">v5i2e35497</article-id>
      <article-id pub-id-type="doi">10.2196/35497</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Review</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Review</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Current Landscape of Generative Adversarial Networks for Facial Deidentification in Dermatology: Systematic Review and Evaluation</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Dellavalle</surname>
            <given-names>Robert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Sivesind</surname>
            <given-names>Torunn</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mars</surname>
            <given-names>Maurice</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Parker</surname>
            <given-names>Eva</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Park</surname>
            <given-names>Christine</given-names>
          </name>
          <degrees>BA</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Dermatology</institution>
            <institution>Duke University Medical Center</institution>
            <addr-line>40 Duke Medicine Cir.</addr-line>
            <addr-line>Durham, NC, 27710</addr-line>
            <country>United States</country>
            <phone>1 7757728063</phone>
            <email>cp268@duke.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0066-366X</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Jeong</surname>
            <given-names>Hyeon Ki</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6680-2012</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Henao</surname>
            <given-names>Ricardo</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4980-845X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Kheterpal</surname>
            <given-names>Meenal</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0460-6400</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Dermatology</institution>
        <institution>Duke University Medical Center</institution>
        <addr-line>Durham, NC</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Biostatistics and Bioinformatics</institution>
        <institution>Duke University</institution>
        <addr-line>Durham, NC</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Christine Park <email>cp268@duke.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Apr-Jun</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>27</day>
        <month>5</month>
        <year>2022</year>
      </pub-date>
      <volume>5</volume>
      <issue>2</issue>
      <elocation-id>e35497</elocation-id>
      <history>
        <date date-type="received">
          <day>8</day>
          <month>12</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>2</day>
          <month>3</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>27</day>
          <month>3</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>16</day>
          <month>4</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Christine Park, Hyeon Ki Jeong, Ricardo Henao, Meenal Kheterpal. Originally published in JMIR Dermatology (http://derma.jmir.org), 27.05.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Dermatology Research, is properly cited. The complete bibliographic information, a link to the original publication on http://derma.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://derma.jmir.org/2022/2/e35497" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Deidentifying facial images is critical for protecting patient anonymity in the era of increasing tools for automatic image analysis in dermatology.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this paper was to review the current literature in the field of automatic facial deidentification algorithms.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We conducted a systematic search using a combination of headings and keywords to encompass the concepts of facial deidentification and privacy preservation. The MEDLINE (via PubMed), Embase (via Elsevier), and Web of Science (via Clarivate) databases were queried from inception to May 1, 2021. Studies of incorrect design and outcomes were excluded during the screening and review process.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 18 studies reporting on various methodologies of facial deidentification algorithms were included in the final review. The study methods were rated individually regarding their utility for use cases in dermatology pertaining to skin color and pigmentation preservation, texture preservation, data utility, and human detection. Most studies that were notable in the literature addressed feature preservation while sacrificing skin color and texture.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Facial deidentification algorithms are sparse and inadequate for preserving both facial features and skin pigmentation and texture quality in facial photographs. A novel approach is needed to ensure greater patient anonymity, while increasing data access for automated image analysis in dermatology for improved patient care.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>facial recognition</kwd>
        <kwd>deidentification</kwd>
        <kwd>facial photographs</kwd>
        <kwd>HIPAA</kwd>
        <kwd>dermatology</kwd>
        <kwd>guidelines</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Facial Deidentification in Dermatology</title>
        <p>Over the last several years, there has been an explosion of artificial intelligence (AI) and deep learning for dermatological image analysis. These tools have demonstrated efficacy in the diagnosis and quantification of skin conditions at par with or surpassing human performance [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Additionally, there have been use cases in dermatology where the human eye is unable to precisely quantify the burden of disease, while AI can be used to support the clinical decision-making process with better consistency [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>].</p>
        <p>Facial image data are needed for developing models that evaluate attributes such as redness (ie, acne and rosacea models), texture (ie, wrinkles and aging models), pigmentation (ie, melasma, seborrheic keratoses, aging, and postinflammatory hyperpigmentation models), and skin lesions. To advance AI in dermatology, image data are needed at scale. For patient data to be used for research, consent may be obtained; however, for data at scale where this is not possible, adequate deidentification must be applied to images. Traditionally, journals have required facial feature concealment that typically covers the eyes, but these guidelines are largely insufficient to meet the ethical and legal guidelines from the Health Insurance Portability and Accountability Act for patient privacy and identity protection [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Facial features, tattoos, jewelry, birthmarks, and other identity-informative background features are additional features that are considered identifying; facial feature deidentification is considered the most challenging task, given a lack of expert consensus and a lack of testing infrastructure and quantitative metrics for adequacy of automatic and manual facial image deidentification algorithms.</p>
        <p>Identity protection challenges extend to other industries involved with facial images as well as video privacy. Hence, there have been increasing efforts to propose facial deidentification algorithms in the literature with corresponding use cases. Ideally, the methods should both hide the original identity of participants and preserve data reusability. We hypothesize that automated facial deidentification algorithms currently proposed in the literature may be useful for dermatological research use. To this end, we conducted a systematic review to search for studies reporting facial deidentification and summarized their proposed methodology and application to image analysis in dermatology.</p>
      </sec>
      <sec>
        <title>Comparison of Different Facial Deidentification Algorithms</title>
        <p>Conventional methods of ad hoc facial deidentification use blur [<xref ref-type="bibr" rid="ref7">7</xref>], pixelation [<xref ref-type="bibr" rid="ref8">8</xref>], masking, random swapping, perturbation, and face region replacement [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref18">18</xref>] to obfuscate parts or entire images to protect visual privacy. This set of obfuscating techniques prevent the rendering of the original image, but they do not necessarily guarantee preservation of privacy (ie, masks and blur can be removed) and often compromise data utility (ie, preservation of dermatological characteristics with diagnostic value) [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. To test if these techniques protect privacy, studies have explored whether these methods can fool computer and human detection. Many studies have successfully avoided detection by use of computer algorithms but have found that human eyes can easily notice the alteration [<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref24">24</xref>]. Furthermore, simply applying distorting filters to images risks identity revelation after reconstruction [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
        <p>The <italic>k</italic>-anonymity–based algorithms were proposed as one of the original feasible approaches in solving this issue of data utility after deidentification [<xref ref-type="bibr" rid="ref25">25</xref>]. Briefly, the <italic>k</italic>-anonymity–based methods and their variations deidentify an image by replacing the face with the average of <italic>k</italic> images from a given collection of images, and they achieve privacy protection with a rate lower than 1/<italic>k</italic>. The most commonly used <italic>k</italic>-algorithm is from the <italic>k</italic>-Same family [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. However, one of the key issues with the variations of the <italic>k</italic>-Same algorithm is the introduction of ghosting artifacts caused by the misalignment of images. The ghosting artifacts compromise privacy protection by making the images appear unnatural. The ghosting effect can be overcome by employing a large <italic>k</italic> in the algorithms, but this requires a large image collection, otherwise it results in a lack of distinction among the deidentified faces; this is because the number of discriminative faces in the deidentified face set is limited by the total number of images divided by <italic>k</italic>. This is problematic for applications in skin image analysis in dermatology because adequate privacy protection is achieved with averaging a greater number of images, which, in turn, will dilute redness, pigmentation, and other image attributes that are critical to dermatologic data utility. In other words, there is an intrinsic trade-off when choosing <italic>k</italic> between identifiability and preservation of dermatological features.</p>
        <p>The <italic>k</italic>-Same-M algorithm was developed to eliminate the ghosting effects in order to enhance privacy protection with minimal loss of data utility [<xref ref-type="bibr" rid="ref26">26</xref>]. This algorithm uses an active appearance model (AAM), which is an algorithm that can reconstruct an image representation based on its shape and texture [<xref ref-type="bibr" rid="ref26">26</xref>]. In this way, an AAM coupled with the <italic>k</italic>-based algorithms can help reduce the ghosting effect in the deidentified images by ensuring a better alignment of the synthesized identity onto the original images. However, the reconstructed images from an AAM are still averaged images from the respective data set and, hence, some important aspect of data utility, such as facial expression, could be compromised.</p>
        <p>Another technique for achieving facial deidentification is through the use of machine learning methods involving deep neural networks [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. Convolutional neural networks (CNNs) are effective in extracting features from raw faces and, hence, facilitate image transformation into target outcomes. Limitations associated with methods involving CNNs and convolutional autoencoders are that they are time costly because they require a large sample size to be trained and optimized. Specifically for CNNs, these are supervised algorithms that also need labels for ground-truth classifications. Furthermore, the output images are still not natural enough to effectively preserve privacy.</p>
        <p>Generative neural networks (GNNs) constitute a novel method to generate realistic face surrogates that can be used for deidentification. This quality can be exploited to retain skin attribute quality from a source image of interest. These also allow for retaining certain aspects of the original data, such as age, gender, and facial expressions, while replacing sensitive personal attributes with artificial objects, such as facial features. GNNs are originally based on generative adversarial networks (GAN), which combine a generative model that produces a synthetic image and a discriminator (ie, critic) network that classifies the synthetic image as either real or artificial. This method works by training the discriminator network as a standard classifier to distinguish between the two image sources as real or artificial and training the generative network as an image-generating model that can fool the discriminator network, with the goal of generating the most realistic-appearing synthetic images [<xref ref-type="bibr" rid="ref32">32</xref>]. The model is improved in an adversarial manner via back-propagation with both generative and discriminator networks to identify the generator’s parameters that should be optimized to make the generated images increasingly challenging for the discriminator. After completion of training, the output images from the generator network should be indistinguishable from the real images for the discriminator as well as look visually convincing for humans [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref35">35</xref>].</p>
        <p>The use of GANs in facial deidentification is intriguing due to their potential for disentanglement of facial features and skin attributes. Theoretically, facial images can be deidentified by a GAN that recognizes facial features, such as eyes, nose, and lips, and then replaces them with features from another facial image, while continuing to preserve the realistic-appearing facial image as well as features of interest, such as redness, pigmentation, texture, and skin lesions. Hence, based on their high data utility, GANs hold the promise of privacy protection by completely changing image identification by human and automated detection. This study focused on reviewing the GAN-based models published to date for facial deidentification for dermatologic use cases. We also evaluated the performance of top-performing GANs in deidentifying dermatological images while preserving the important facial and skin quality features in these images.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Search Strategy</title>
        <p>We conducted a systematic search using a combination of headings and keywords to encompass the concepts of facial deidentification and privacy preservation. The MEDLINE (via PubMed), Embase (via Elsevier), and Web of Science (via Clarivate) databases were queried from inception to May 1, 2021. We also performed referential backtracking on the most recent studies to ensure inclusion of all relevant articles. Studies of incorrect design and outcomes were excluded during the screening and review process. The search strategies are outlined in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
      </sec>
      <sec>
        <title>Definitions and Inclusion and Exclusion Criteria</title>
        <p>Facial features were defined as identifying features associated with an individual, including the eyebrows, eyes, nose, mouth, and ears. For deidentification in dermatologic use cases, these features are important to remove and replace. The skin was then defined as the remaining facial area bounded by the hairline. Preservation of skin quality by algorithms was evaluated as to how well the algorithms preserved the quality of the skin tone and texture from the input images. We included studies that focused on variations of the GAN algorithm for the purpose of facial deidentification in images, video, or both. Studies were excluded if they focused on any other facial deidentification algorithms due to low preservation of pixel-level skin quality based on the methodology.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>This study was approved by the Institutional Review Board (Retrospective cutaneous dermato-oncological conditions treated by dermatology service) for protocol No. Pro00100765. Patient consent was not required due to the nature of this study.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>A total of 18 studies using GAN methodology were included in the final review (<xref rid="figure1" ref-type="fig">Figure 1</xref>). <xref ref-type="table" rid="table1">Table 1</xref> [<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref53">53</xref>] summarizes the different types of GAN algorithms and the goals of all the studies as well as an evaluation of their ability to preserve skin quality (ie, color and texture), capacity for data utility, and demonstration of adequate facial deidentification with human eyes based on the results illustrated in the studies. We then applied two of the best GAN-based algorithms that were publicly available to the SD-260 (260 classes of skin diseases) data set [<xref ref-type="bibr" rid="ref54">54</xref>], a public data set of images of dermatological conditions, to assess whether the output images appropriately preserved skin quality.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) diagram.</p>
          </caption>
          <graphic xlink:href="derma_v5i2e35497_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Overview of included GAN-based studies.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="150"/>
            <col width="160"/>
            <col width="290"/>
            <col width="100"/>
            <col width="0"/>
            <col width="90"/>
            <col width="0"/>
            <col width="90"/>
            <col width="0"/>
            <col width="120"/>
            <thead>
              <tr valign="top">
                <td>Author, year</td>
                <td>Method of facial deidentification</td>
                <td>Novelty in proposed method of facial deidentification</td>
                <td colspan="4">Skin attribute preservation</td>
                <td colspan="2">Data utility</td>
                <td>Facial deidentification (human)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>Color</td>
                <td colspan="2">Texture</td>
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Pan et al, 2019 [<xref ref-type="bibr" rid="ref36">36</xref>]</td>
                <td><italic>k</italic>-Same-Siamese-GAN<sup>a</sup></td>
                <td>Maintenance of high resolution of images to preserve their utility</td>
                <td colspan="2">Partial</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Song et al, 2019 [<xref ref-type="bibr" rid="ref37">37</xref>]</td>
                <td>Evolutionary GAN</td>
                <td>Structural similarity index and the distance between the original face and the deidentified face</td>
                <td colspan="2">Partial</td>
                <td>Partial</td>
                <td colspan="2">Low</td>
                <td colspan="2">No</td>
              </tr>
              <tr valign="top">
                <td>Agarwal et al, 2021 [<xref ref-type="bibr" rid="ref38">38</xref>]</td>
                <td>StyleGAN and GAN</td>
                <td>Preservation of emotion and nonbiometric facial attributes of a target face</td>
                <td colspan="2">N/A<sup>b</sup></td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Nitzan et al, 2020 [<xref ref-type="bibr" rid="ref39">39</xref>]</td>
                <td>Disentanglement coupled with GAN</td>
                <td>Disentanglement of identity from other facial attributes with minimal training</td>
                <td colspan="2">Yes</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">No</td>
              </tr>
              <tr valign="top">
                <td>Lin et al, 2021 [<xref ref-type="bibr" rid="ref40">40</xref>]</td>
                <td>Facial privacy GAN for social robots</td>
                <td>Strengthened feature-extraction ability to improve the discriminatory accuracy</td>
                <td colspan="2">Partial</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Partial</td>
              </tr>
              <tr valign="top">
                <td>Maximov et al, 2020 [<xref ref-type="bibr" rid="ref41">41</xref>]</td>
                <td>Conditional identity anonymization GAN</td>
                <td>Development of a model for image and video anonymization with removal of identifying characteristics of faces and bodies</td>
                <td colspan="2">Yes</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Brkic et al, 2017 [<xref ref-type="bibr" rid="ref42">42</xref>]</td>
                <td>Conditional GAN</td>
                <td>Production of realistic deidentified human images that avoid human- and machine-based recognition</td>
                <td colspan="2">N/A</td>
                <td>N/A</td>
                <td colspan="2">Low</td>
                <td colspan="2">N/A</td>
              </tr>
              <tr valign="top">
                <td>Meden et al, 2017 [<xref ref-type="bibr" rid="ref43">43</xref>]</td>
                <td>Generative neural network</td>
                <td>Synthesis of artificial surrogate faces with preservation of nonidentity-related aspects of the data for data use</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Mirjalili et al, 2017 [<xref ref-type="bibr" rid="ref44">44</xref>]</td>
                <td>Convolutional autoencoder using semiadversarial network</td>
                <td>Autoencoder-based transformation of an input face image</td>
                <td colspan="2">N/A</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">No</td>
              </tr>
              <tr valign="top">
                <td>Radford et al, 2016 [<xref ref-type="bibr" rid="ref45">45</xref>]</td>
                <td>DCGAN<sup>c</sup></td>
                <td>Unsupervised GAN</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">No</td>
              </tr>
              <tr valign="top">
                <td>Wu et al, 2019 [<xref ref-type="bibr" rid="ref46">46</xref>]</td>
                <td>Privacy-protective GAN</td>
                <td>Privacy protection, utility preservation, and structure similarity</td>
                <td colspan="2">N/A</td>
                <td>Partial</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Hukkelås et al, 2019 [<xref ref-type="bibr" rid="ref47">47</xref>]</td>
                <td>Conditional GAN</td>
                <td>Novel generator architecture for face anonymization via synthesis of realistic faces</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Ren et al, 2018 [<xref ref-type="bibr" rid="ref48">48</xref>]</td>
                <td>Multitask extension of GAN</td>
                <td>Deidentification in video with preservation of action</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Sun et al, 2018 [<xref ref-type="bibr" rid="ref49">49</xref>]</td>
                <td>DCGAN</td>
                <td>Novel head inpainting obfuscation technique</td>
                <td colspan="2">Partial</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Sun et al, 2018 [<xref ref-type="bibr" rid="ref50">50</xref>]</td>
                <td>GAN</td>
                <td>New hybrid approach for identity obfuscation in photos via head replacement</td>
                <td colspan="2">Partial</td>
                <td>No</td>
                <td colspan="2">Low</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Bao et al, 2018 [<xref ref-type="bibr" rid="ref51">51</xref>]</td>
                <td>GAN</td>
                <td>Disentanglement of identity and attributes from faces for recombination into different identities and attributes for identity-preserving face synthesis in open domains</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">No</td>
              </tr>
              <tr valign="top">
                <td>Li et al, 2019 [<xref ref-type="bibr" rid="ref52">52</xref>]</td>
                <td>Adaptive embedding integration network</td>
                <td>High-fidelity face swapping</td>
                <td colspan="2">Yes</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">Yes</td>
              </tr>
              <tr valign="top">
                <td>Nirkin et al, 2019 [<xref ref-type="bibr" rid="ref53">53</xref>]</td>
                <td>Face-swapping GAN</td>
                <td>Face re-enactment with adjustment for pose and expression variations</td>
                <td colspan="2">No</td>
                <td>No</td>
                <td colspan="2">High</td>
                <td colspan="2">Yes</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>GAN: generative adversarial network.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>N/A: not applicable; this information was not reported in this study.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>DCGAN: deep convolutional generative adversarial network.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Disentanglement-Coupled GAN</title>
        <p>One of the algorithms we chose was the disentanglement-coupled GAN presented by Nitzan et al [<xref ref-type="bibr" rid="ref39">39</xref>]. The goal of this model is to generate an image by combining the identity of a given identity image with the attributes extracted from an attribute image. The author generates 70,000 images using StyleGAN [<xref ref-type="bibr" rid="ref55">55</xref>], which are then used as the training data set. Identity is preserved by penalizing the identity difference between the identity image and attribute image. Attribute preservation is achieved by penalizing the difference in pixel-level and facial landmarks between identity image and attribute image. The network architecture is illustrated in <xref rid="figure2" ref-type="fig">Figure 2</xref>.</p>
        <p>The performance of this method was compared against previously published methods, such as latent optimization for representation disentanglement [<xref ref-type="bibr" rid="ref56">56</xref>], FaceShifter [<xref ref-type="bibr" rid="ref52">52</xref>], and face-swapping GAN [<xref ref-type="bibr" rid="ref53">53</xref>], for qualitative assessment; the performance was also compared against the adversarial latent autoencoder (ALAE) method [<xref ref-type="bibr" rid="ref57">57</xref>] and the pixel2style2pixel (pSp) method [<xref ref-type="bibr" rid="ref58">58</xref>] for quantitative assessment. Qualitatively, the authors demonstrated that their method showed better preservation for facial expression (ie, attribute image), head shape, and hair (ie, identity image) compared to the other models noted above. Quantitatively, the reconstruction performance was assessed by measuring pixel-wise reconstruction and preservation of semantic features, followed by comparison of the outcome to that of ALAE and pSp methods. This evaluation indicated that the pSp method showed better performance, but the author emphasized that their method was mainly for disentanglement and was not necessarily designed to reconstruct pixel-level information for reconstruction. This indicates that the model was able to replace and preserve realistic facial features, head shape, hair, and expressions due to superior performance of the disentanglement component while compromising pixel-level detail.</p>
        <p>When applying the disentanglement-coupled GAN to the SD-260 data set, there were two sources for the input data: one for <italic>identity</italic> and another for <italic>attribute</italic>. For this model, we experimented with whether the attributes, such as redness and pigmentation, of the faces from the dermatological images could be encoded in a new identity. <xref rid="figure3" ref-type="fig">Figure 3</xref>A shows the qualitative results derived from the model: in the data set where the images of interest, with redness and pigmentation, are the <italic>attribute</italic> images, there is no transfer of skin features of interest, only transfer of facial positions and expressions. <xref rid="figure3" ref-type="fig">Figure 3</xref>B shows that when the images of interest are the <italic>identity</italic> images, features are transferred without pixel-level accuracy to preserve high data utility for dermatology use. Overall, we can see that while the model generates realistic faces, it is unable to preserve pixel-level details of the faces.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Disentanglement scheme. Solid lines indicate data flow and dashed lines indicate data loss. The identity and attribute codes are first extracted from two input images using encoders <italic>E<sub>id</sub></italic> and <italic>E<sub>attr</sub></italic>, respectively. Through the mapping network <italic>M</italic>, the concatenated codes are mapped to <italic>W</italic>, the latent space of the pretrained generator <italic>G</italic>, which, in turn, generates the resulting image. An adversarial loss <italic>L<sub>adv</sub></italic> ensures proper mapping to the <italic>W</italic> space. Identity preservation is encouraged using <italic>L<sub>id</sub></italic>, which penalizes differences in identity between <italic>I<sub>id</sub></italic> and <italic>I<sub>out</sub></italic>. Attribute preservation is encouraged using <italic>L<sub>rec</sub></italic> and <italic>L<sub>lnd</sub></italic>, which penalize pixel-level and facial landmark differences, respectively, between <italic>I<sub>attr</sub></italic> and <italic>I<sub>out</sub></italic> (reproduced from Nitzan et al [<xref ref-type="bibr" rid="ref39">39</xref>], with permission from Yotam Nitzan). D<sub>w</sub>: discriminator; Elnd: landmark encoder; z: latent code.</p>
          </caption>
          <graphic xlink:href="derma_v5i2e35497_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Output using the disentanglement-coupled GAN on dermatological images derived from the SD-260 data set. (A) Identity images assuming the facial pose and alteration of facial features from the attribute images. The attribute images fail to transfer the features of interest (ie, redness and pigmentation). (B) When switching the identity images to the images with features of interest, the model fails to preserve the dermatological features. GAN: generative adversarial network; SD-260: 260 classes of skin diseases.</p>
          </caption>
          <graphic xlink:href="derma_v5i2e35497_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Conditional Identity Anonymization GAN</title>
        <p>The goal of this paper was to develop a model that can deidentify images and videos while preserving features for other computer vision tasks, such as detection, tracking, or recognition [<xref ref-type="bibr" rid="ref41">41</xref>]. The overview of the methodology is as follows. The method first extracted the landmarks of a given image that contained a sparse representation of the face with limited information on the identity. This allowed the generator to adjust to the face shape, which enabled better preservation of the input pose. The authors used only the face silhouette, the mouth, and the bridge of the nose instead of using all 68 landmarks in order to allow the network to freely choose the facial features. The method also extracted masked background images to allow the model to learn to generate faces and not the background. Once the landmark and the background were extracted, the method used a conditional GAN (CGAN) [<xref ref-type="bibr" rid="ref59">59</xref>] to generate realistic images by encoding the landmark and masked image and combining them with the identity images to feed into the decoder. The generated output image was then fed into the identity discriminator network to prevent the network from generating faces similar to the training data set and to ensure facial anonymization. The model architecture is shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>.</p>
        <p>The model was trained and evaluated on three public data sets: CelebA (CelebFaces Attributes), MOTS (Multi-Object Tracking and Segmentation), and Labeled Faces in the Wild. The performance of the model was assessed by using face detection and reidentification metrics with other existing methods, such as blurring and pixelization. When compared with a state-of-the-art facial deidentification method by Gafni et al [<xref ref-type="bibr" rid="ref60">60</xref>], conditional identity anonymization GAN (CIAGAN) showed better deidentification rates by computer detection on two different data sets. The authors concluded that their method can both deidentify the source images better and generate much more diverse images compared to Gafni et al’s method.</p>
        <p>When we applied the CIAGAN to the SD-260 data set, we first processed the landmarks of the dermatological images. Then, we allowed the model to deidentify each individual’s face from the processed landmark and background images. The model was pretrained using 1200 identities from the CelebA data set. <xref rid="figure5" ref-type="fig">Figure 5</xref> shows the result from this model. The qualitative results show a reduction in pixel-level resolution as well as poor preservation of the dermatological attributes of interest in the mid to lower part of the face, while preserving the skin features of interest (ie, redness and pigmentation) in the forehead area. While this is a good method for facial swapping, CGAN at this level fails to preserve significant areas of interest with high-utility pixel-level detail.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>CIAGAN model scheme. The model takes the image and its landmarks, the masked face, and the desired identity as input. The generator is an encoder-decoder model where the encoder embeds the image information into a low-dimensional space. The identity given as a one-hot label is encoded via a transposed convolutional neural network and is fed into the bottleneck of the generator. Then, the decoder decodes the combined information of source images and the identities into a generated image. The generator plays an adversarial game with a discriminator in a standard GAN setting. Finally, the identity discriminator network is introduced, whose goal is to provide a guiding signal to the generator about the desired identity of the generated face (reproduced from Maximov et al [<xref ref-type="bibr" rid="ref41">41</xref>], with permission from Laura Leal-Taixe). CIAGAN: conditional identity anonymization generative adversarial network; GAN: generative adversarial network.</p>
          </caption>
          <graphic xlink:href="derma_v5i2e35497_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Output using CIAGAN on dermatological images derived from the SD-260 data set. Images on the left serve as source images, and a facial swap is done on the mid and lower part of the face for the images on the right. Generated images are of poor quality and only partially preserve facial attributes. CIAGAN: conditional identity anonymization generative adversarial network; SD-260: 260 classes of skin diseases.</p>
          </caption>
          <graphic xlink:href="derma_v5i2e35497_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>Apart from the conventional facial deidentification methods, many of the advanced algorithms aim to preserve key facial features and expressions while maintaining privacy protection for the input images. Specifically, for GANs, there exist three major general limitations with these algorithms. Firstly, the outputs from these models that use face synthesis exhibit significant similarities between the synthetic and original images [<xref ref-type="bibr" rid="ref61">61</xref>], which can be detected via human evaluation. Many of the currently existing algorithms are effective at modifying the images to avoid identification by face recognition software [<xref ref-type="bibr" rid="ref17">17</xref>] but are not good enough to pass deidentification by humans. Thus, additional effort needs to be focused on addressing human detection, such as facial feature swap. Secondly, it is difficult to integrate the synthesized faces smoothly into the original image and make the images look unnatural, which compromises privacy protection [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref62">62</xref>]. Finally, synthetic faces can decrease data usability due to changes in skin attributes, such tone and texture, and due to changes in patient identity, such as age, gender, and race [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref63">63</xref>-<xref ref-type="bibr" rid="ref65">65</xref>]. Particularly for medical applications, even with the recently developed, well-intentioned algorithms, such as disentanglement and CIAGAN, the existing facial deidentification models fail to precisely and accurately preserve the color and texture of the facial skin for applications in their attempt to protect the identity of individuals with dermatological conditions, such as rosacea, melasma, among others, included in the data sets. Hence, the challenge involved with sharing large data sets that include facial images of patients with dermatological conditions, while adequately protecting their identity, remains unresolved.</p>
        <p>The current standards for deidentifying patient images involve blurring, pixelating, and masking out important identifying facial features, such as the eyes and eyebrows [<xref ref-type="bibr" rid="ref6">6</xref>]. Kuang et al [<xref ref-type="bibr" rid="ref66">66</xref>] showed that pixelation and blurring demonstrate high deidentification performance on computer detection compared to other advanced methods, such as privacy-protective GAN [<xref ref-type="bibr" rid="ref67">67</xref>], natural and effective obfuscation [<xref ref-type="bibr" rid="ref49">49</xref>], and AnonymousNet [<xref ref-type="bibr" rid="ref63">63</xref>], which is one of the reasons that they remain as popular methods of facial deidentification. However, these conventional methods are at risk of identity restoration via decoding and reconstruction.</p>
        <p>We propose that an ideal facial deidentification algorithm for dermatological application needs to (1) preserve facial architectural (ie, shape and gender) and skin features (ie, color and texture) to maintain data utility, while achieving adequate deidentification, and (2) avoid detection by computer and human analysis. To optimally protect the privacy of individuals in the images, the algorithm must be able to modify the image in a way that will be perceived as unaltered. In other words, the replacement identity will need to fuse well with the original content of the image. However, while altering the original content of the image, the skin attributes have to be preserved well enough so that the data utility of the data set involving the dermatological condition is not lost.</p>
        <p>Herein, we demonstrate the utility of GAN-based facial deidentification methods to serve as use cases for AI development in dermatology, such as models quantifying redness (acne, rosacea, dermatitis, etc), pigmentation (melasma, postinflammatory hyperpigmentation, lentigines, etc), and texture (aging-related changes, volumetric assessment for neurotoxins or fillers, etc). While GAN development efforts for facial deidentification are not currently focused on skin-based use cases, focusing future efforts to achieve these goals can lead to an optimal facial deidentification model for dermatology.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Although facial deidentification is a rapidly evolving field with several advanced algorithms for achieving facial deidentification by computer-level recognition, their application to dermatology use cases is currently suboptimal. However, GAN-based models have the potential to preserve skin attributes while replacing facial features that risk detection, holding promise to solve the dilemma of data sharing while preserving patient privacy and identity. Future work should focus on developing a model that can achieve both skin attribute preservation as well as detection avoidance by both computers and humans.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Search strategy.</p>
        <media xlink:href="derma_v5i2e35497_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 40 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AAM</term>
          <def>
            <p>active appearance model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ALAE</term>
          <def>
            <p>adversarial latent autoencoder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CelebA</term>
          <def>
            <p>CelebFaces Attributes</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">CGAN</term>
          <def>
            <p>conditional generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">CIAGAN</term>
          <def>
            <p>conditional identity anonymization generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">GAN</term>
          <def>
            <p>generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">GNN</term>
          <def>
            <p>generative neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">LORD</term>
          <def>
            <p>latent optimization for representation disentanglement</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">MOTS</term>
          <def>
            <p>Multi-Object Tracking and Segmentation</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">pSp</term>
          <def>
            <p>pixel2style2pixel</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">SD-260</term>
          <def>
            <p>260 classes of skin diseases</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Du-Harpur</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Watt</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Luscombe</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Lynch</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>What is AI? Applications of artificial intelligence to dermatology</article-title>
          <source>Br J Dermatol</source>
          <year>2020</year>
          <month>09</month>
          <volume>183</volume>
          <issue>3</issue>
          <fpage>423</fpage>
          <lpage>430</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31960407"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/bjd.18880</pub-id>
          <pub-id pub-id-type="medline">31960407</pub-id>
          <pub-id pub-id-type="pmcid">PMC7497072</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esteva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kuprel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Novoa</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Swetter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Blau</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Thrun</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>
          <source>Nature</source>
          <year>2017</year>
          <month>02</month>
          <day>02</day>
          <volume>542</volume>
          <issue>7639</issue>
          <fpage>115</fpage>
          <lpage>118</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28117445"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
          <pub-id pub-id-type="medline">28117445</pub-id>
          <pub-id pub-id-type="pii">nature21056</pub-id>
          <pub-id pub-id-type="pmcid">PMC8382232</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sarda</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Use of artificial intelligence in dermatology</article-title>
          <source>Indian J Dermatol</source>
          <year>2020</year>
          <volume>65</volume>
          <issue>5</issue>
          <fpage>352</fpage>
          <pub-id pub-id-type="doi">10.4103/ijd.ijd_418_20</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Way</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>de Oliveira Marinho</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hartford</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sayres</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kanada</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Eng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Nagpal</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>DeSalvo</surname>
              <given-names>KB</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Dunn</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Coz</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bui</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Development and assessment of an artificial intelligence-based tool for skin condition diagnosis by primary care physicians and nurse practitioners in teledermatology practices</article-title>
          <source>JAMA Netw Open</source>
          <year>2021</year>
          <month>04</month>
          <day>01</day>
          <volume>4</volume>
          <issue>4</issue>
          <fpage>e217249</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jamanetwork.com/journals/jamanetworkopen/fullarticle/10.1001/jamanetworkopen.2021.7249"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2021.7249</pub-id>
          <pub-id pub-id-type="medline">33909055</pub-id>
          <pub-id pub-id-type="pii">2779250</pub-id>
          <pub-id pub-id-type="pmcid">PMC8082316</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>International Committee of Medical Journal Editors</collab>
          </person-group>
          <article-title>Protection of patients' rights to privacy</article-title>
          <source>BMJ</source>
          <year>1995</year>
          <month>11</month>
          <day>11</day>
          <volume>311</volume>
          <issue>7015</issue>
          <fpage>1272</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/11644736"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bmj.311.7015.1272</pub-id>
          <pub-id pub-id-type="medline">11644736</pub-id>
          <pub-id pub-id-type="pmcid">PMC2551184</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roberts</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Troiano</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Spiegel</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>Standardization of guidelines for patient photograph deidentification</article-title>
          <source>Ann Plast Surg</source>
          <year>2016</year>
          <month>06</month>
          <volume>76</volume>
          <issue>6</issue>
          <fpage>611</fpage>
          <lpage>614</lpage>
          <pub-id pub-id-type="doi">10.1097/SAP.0000000000000817</pub-id>
          <pub-id pub-id-type="medline">27015333</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Neustaedter</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Greenberg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Boyle</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Blur filtration fails to preserve privacy for home-based video conferencing</article-title>
          <source>ACM Trans Comput Hum Interact</source>
          <year>2006</year>
          <month>03</month>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>36</lpage>
          <pub-id pub-id-type="doi">10.1145/1143518.1143519</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boult</surname>
              <given-names>TE</given-names>
            </name>
          </person-group>
          <article-title>PICO: Privacy through invertible cryptographic obscuration</article-title>
          <source>Proceedings of the Computer Vision for Interactive and Intelligent Environment Conference</source>
          <year>2005</year>
          <conf-name>The Computer Vision for Interactive and Intelligent Environment Conference</conf-name>
          <conf-date>November 17-18, 2005</conf-date>
          <conf-loc>Lexington, KY</conf-loc>
          <fpage>27</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1109/cviie.2005.16</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bitouk</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dhillon</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Belhumeur</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nayar</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Face swapping: Automatically replacing faces in photographs</article-title>
          <source>Proceedings of the Special Interest Group on Computer Graphics and Interactive Techniques Conference</source>
          <year>2008</year>
          <conf-name>The Special Interest Group on Computer Graphics and Interactive Techniques Conference</conf-name>
          <conf-date>August 11-15, 2008</conf-date>
          <conf-loc>Los Angeles, CA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1145/1399504.1360638</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boyle</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Greenberg</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The effects of filtered video on awareness and privacy</article-title>
          <source>Proceedings of the 2000 ACM Conference on Computer Supported Cooperative Work</source>
          <year>2000</year>
          <conf-name>2000 ACM Conference on Computer Supported Cooperative Work</conf-name>
          <conf-date>December 2-6, 2000</conf-date>
          <conf-loc>Philadelphia, PA</conf-loc>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <pub-id pub-id-type="doi">10.1145/358916.358935</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Crowley</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Coutaz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bérard</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Perceptual user interfaces: Things that see</article-title>
          <source>Commun ACM</source>
          <year>2000</year>
          <month>03</month>
          <volume>43</volume>
          <issue>3</issue>
          <fpage>54</fpage>
          <pub-id pub-id-type="doi">10.1145/330534.330540</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greenberg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kuzuoka</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Using digital but physical surrogates to mediate awareness, communication and privacy in media spaces</article-title>
          <source>Pers Technol</source>
          <year>1999</year>
          <month>12</month>
          <volume>3</volume>
          <issue>4</issue>
          <fpage>182</fpage>
          <lpage>198</lpage>
          <pub-id pub-id-type="doi">10.1007/bf01540552</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Airoldi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Malin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sweeney</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Integrating utility into face de-identification</article-title>
          <source>Proceedings of the 5th International Workshop on Privacy Enhancing Technologies</source>
          <year>2005</year>
          <conf-name>The 5th International Workshop on Privacy Enhancing Technologies</conf-name>
          <conf-date>May 30-June 1, 2005</conf-date>
          <conf-loc>Cavtat, Croatia</conf-loc>
          <publisher-name>International Workshop on Privacy Enhancing Technologies</publisher-name>
          <fpage>227</fpage>
          <lpage>242</lpage>
          <pub-id pub-id-type="doi">10.1007/11767831_15</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sweeney</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Torre</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Semi-supervised learning of multi-factor models for face de-identification</article-title>
          <source>Proceedings of the 26th IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2008</year>
          <conf-name>The 26th IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 23-28, 2008</conf-date>
          <conf-loc>Anchorage, AK</conf-loc>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2008.4587369</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hudson</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Techniques for addressing fundamental privacy and disruption tradeoffs in awareness support systems</article-title>
          <source>Proceedings of the 1996 ACM Conference on Computer Supported Cooperative Work</source>
          <year>1996</year>
          <conf-name>The 1996 ACM Conference on Computer Supported Cooperative Work</conf-name>
          <conf-date>November 16-20, 1996</conf-date>
          <conf-loc>Boston, MA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>248</fpage>
          <lpage>257</lpage>
          <pub-id pub-id-type="doi">10.1145/240080.240295</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Neustaedter</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Greenberg</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Balancing privacy and awareness in home media spaces</article-title>
          <source>Proceedings of the 5th International Conference on Ubiquitous Computing. Workshop on Ubicomp Communities: Privacy as Boundary Negotiation</source>
          <year>2003</year>
          <conf-name>The 5th International Conference on Ubiquitous Computing. Workshop on Ubicomp Communities: Privacy as Boundary Negotiation</conf-name>
          <conf-date>October 12, 2003</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
          <fpage>1</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.217.1778&#38;rep=rep1&#38;type=pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Newton</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sweeney</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Malin</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Preserving privacy by de-identifying face images</article-title>
          <source>IEEE Trans Knowl Data Eng</source>
          <year>2005</year>
          <month>02</month>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>232</fpage>
          <lpage>243</lpage>
          <pub-id pub-id-type="doi">10.1109/tkde.2005.32</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Stasko</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Evaluating image filtering based techniques in media space applications</article-title>
          <source>Proceedings of the 1998 ACM Conference on Computer Supported Cooperative Work</source>
          <year>1998</year>
          <conf-name>The 1998 ACM Conference on Computer Supported Cooperative Work</conf-name>
          <conf-date>November 14-18, 1998</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Association for Computing Machinery</publisher-name>
          <fpage>11</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1145/289444.289450</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nousi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Papadopoulos</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tefas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pitas</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Deep autoencoders for attribute preserving face de-identification</article-title>
          <source>Signal Process Image Commun</source>
          <year>2020</year>
          <month>02</month>
          <volume>81</volume>
          <fpage>115699</fpage>
          <pub-id pub-id-type="doi">10.1016/j.image.2019.115699</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Benenson</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schiele</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Faceless person recognition: Privacy implications in social media</article-title>
          <source>Proceedings of the 14th European Conference on Computer Vision</source>
          <year>2016</year>
          <conf-name>The 14th European Conference on Computer Vision</conf-name>
          <conf-date>October 8-16, 2016</conf-date>
          <conf-loc>Amsterdam, the Netherlands</conf-loc>
          <fpage>19</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-46487-9_2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Protecting privacy in shared photos via adversarial examples based stealth</article-title>
          <source>Secur Commun Netw</source>
          <year>2017</year>
          <volume>2017</volume>
          <fpage>1</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1155/2017/1897438</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schiele</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Adversarial image perturbation for privacy protection -- A game theory perspective</article-title>
          <source>Proceedings of the 2017 IEEE Computer Vision and Pattern Recognition</source>
          <year>2017</year>
          <conf-name>The 2017 IEEE Computer Vision and Pattern Recognition</conf-name>
          <conf-date>October 22-29, 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <fpage>1491</fpage>
          <lpage>1500</lpage>
          <pub-id pub-id-type="doi">10.1109/iccv.2017.165</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sim</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Controllable face privacy</article-title>
          <source>Proceedings of the 2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition</source>
          <year>2015</year>
          <conf-name>2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition</conf-name>
          <conf-date>May 4-8, 2015</conf-date>
          <conf-loc>Ljubljana, Slovenia</conf-loc>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/fg.2015.7285018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moosavi-Dezfooli</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Fawzi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fawzi</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Frossard</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Universal adversarial perturbations</article-title>
          <source>Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2017</year>
          <conf-name>The 2017 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>July 21-26, 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>86</fpage>
          <lpage>94</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2017.17</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sweeney</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>k-anonymity: A model for protecting privacy</article-title>
          <source>Int J Uncertain Fuzziness Knowl Based Syst</source>
          <year>2012</year>
          <month>05</month>
          <day>02</day>
          <volume>10</volume>
          <issue>05</issue>
          <fpage>557</fpage>
          <lpage>570</lpage>
          <pub-id pub-id-type="doi">10.1142/S0218488502001648</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sweeney</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Torre</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Model-based face de-identification</article-title>
          <source>Proceedings of the 2006 Conference on Computer Vision and Pattern Recognition Workshop</source>
          <year>2006</year>
          <conf-name>The 2006 Conference on Computer Vision and Pattern Recognition Workshop</conf-name>
          <conf-date>June 17-22, 2006</conf-date>
          <conf-loc>New York, NY</conf-loc>
          <fpage>161</fpage>
          <pub-id pub-id-type="doi">10.1109/cvprw.2006.125</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taigman</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ranzato</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>DeepFace: Closing the gap to human-level performance in face verification</article-title>
          <source>Proceedings of the 2014 IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2014</year>
          <conf-name>The 2014 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 23-28, 2014</conf-date>
          <conf-loc>Columbus, OH</conf-loc>
          <fpage>1701</fpage>
          <lpage>1708</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2014.220</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Tang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Deep learning face representation from predicting 10,000 classes</article-title>
          <source>Proceedings of the 2014 IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2014</year>
          <conf-name>The 2014 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 23-28, 2014</conf-date>
          <conf-loc>Columbus, OH</conf-loc>
          <fpage>1891</fpage>
          <lpage>1898</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2014.244</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schroff</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kalenichenko</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Philbin</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>FaceNet: A unified embedding for face recognition and clustering</article-title>
          <source>Proceedings of the 2015 IEEE Conference on Computer Vision and Pattern Recognition</source>
          <year>2015</year>
          <conf-name>The 2015 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 7-12, 2015</conf-date>
          <conf-loc>Boston, MA</conf-loc>
          <fpage>815</fpage>
          <lpage>823</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2015.7298682</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amos</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ludwiczuk</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Satyanarayanan</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>OpenFace: A general-purpose face recognition library with mobile applications</article-title>
          <source>CMU School of Computer Science</source>
          <year>2016</year>
          <month>06</month>
          <access-date>2022-05-12</access-date>
          <publisher-loc>Pittsburgh, PA</publisher-loc>
          <publisher-name>CMU School of Computer Science</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cs.cmu.edu/~satya/docdir/CMU-CS-16-118.pdf">https://www.cs.cmu.edu/~satya/docdir/CMU-CS-16-118.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Recursive Spatial Transformer (ReST) for alignment-free face recognition</article-title>
          <source>Proceedings of the 2017 IEEE International Conference on Computer Vision</source>
          <year>2017</year>
          <conf-name>2017 IEEE International Conference on Computer Vision</conf-name>
          <conf-date>October 22-29, 2017</conf-date>
          <conf-loc>Venice, Italy</conf-loc>
          <fpage>3792</fpage>
          <lpage>3800</lpage>
          <pub-id pub-id-type="doi">10.1109/iccv.2017.407</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodfellow</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Pouget-Abadie</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mirza</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Warde-Farley</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ozair</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Courville</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bengio</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Generative adversarial networks</article-title>
          <source>Commun ACM</source>
          <year>2020</year>
          <month>10</month>
          <day>22</day>
          <volume>63</volume>
          <issue>11</issue>
          <fpage>139</fpage>
          <lpage>144</lpage>
          <pub-id pub-id-type="doi">10.1145/3422622</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lyu</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Exposing deep fakes using inconsistent head poses</article-title>
          <source>Proceedings of the 2019 IEEE International Conference on Acoustics, Speech and Signal Processing</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE International Conference on Acoustics, Speech and Signal Processing</conf-name>
          <conf-date>May 12-17, 2019</conf-date>
          <conf-loc>Brighton, UK</conf-loc>
          <fpage>8261</fpage>
          <lpage>8265</lpage>
          <pub-id pub-id-type="doi">10.1109/icassp.2019.8683164</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Meng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ariyaeeinia</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Distinguishable de-identified faces</article-title>
          <source>Proceedings of the 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition</source>
          <year>2015</year>
          <conf-name>11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition</conf-name>
          <conf-date>May 4-8, 2015</conf-date>
          <conf-loc>Ljubljana, Slovenia</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/fg.2015.7285019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Dai</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Recent advances of generative adversarial networks in computer vision</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>14985</fpage>
          <lpage>15006</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2018.2886814</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Haung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ding</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>k-Same-Siamese-GAN: k-Same algorithm with generative adversarial network for facial image de-identification with hyperparameter tuning and mixed precision training</article-title>
          <source>Proceedings of the 16th IEEE International Conference on Advanced Video and Signal Based Surveillance</source>
          <year>2019</year>
          <conf-name>The 16th IEEE International Conference on Advanced Video and Signal Based Surveillance</conf-name>
          <conf-date>September 18-21, 2019</conf-date>
          <conf-loc>Taipei, Taiwan</conf-loc>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/avss.2019.8909866</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Song</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Learning structural similarity with evolutionary-GAN: A new face de-identification method</article-title>
          <source>Proceedings of the 6th International Conference on Behavioral, Economic and Socio-Cultural Computing</source>
          <year>2019</year>
          <conf-name>The 6th International Conference on Behavioral, Economic and Socio-Cultural Computing</conf-name>
          <conf-date>October 28-30, 2019</conf-date>
          <conf-loc>Beijing, China</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/besc48373.2019.8962993</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Agarwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chattopadhyay</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Privacy preservation through facial de-identification with simultaneous emotion preservation</article-title>
          <source>Signal Image Video Process</source>
          <year>2020</year>
          <month>11</month>
          <day>27</day>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>951</fpage>
          <lpage>958</lpage>
          <pub-id pub-id-type="doi">10.1007/s11760-020-01819-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nitzan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Bermano</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen-Or</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Face identity disentanglement via latent space mapping</article-title>
          <source>ACM Trans Graph</source>
          <year>2020</year>
          <month>12</month>
          <day>31</day>
          <volume>39</volume>
          <issue>6</issue>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <pub-id pub-id-type="doi">10.1145/3414685.3417826</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>FPGAN: Face de-identification method with generative adversarial networks for social robots</article-title>
          <source>Neural Netw</source>
          <year>2021</year>
          <month>01</month>
          <volume>133</volume>
          <fpage>132</fpage>
          <lpage>147</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neunet.2020.09.001</pub-id>
          <pub-id pub-id-type="medline">33217682</pub-id>
          <pub-id pub-id-type="pii">S0893-6080(20)30324-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maximov</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Elezi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Leal-Taix'e</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>CIAGAN: Conditional Identity Anonymization Generative Adversarial Networks</article-title>
          <source>Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2020</year>
          <conf-name>The 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 13-19, 2020</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
          <fpage>5446</fpage>
          <lpage>5455</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr42600.2020.00549</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brkic</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sikiric</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hrkac</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kalafatic</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>I know that person: Generative full body and face de-identification of people in images</article-title>
          <source>Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops</source>
          <year>2017</year>
          <conf-name>2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops</conf-name>
          <conf-date>July 21-26, 2017</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>1319</fpage>
          <lpage>1328</lpage>
          <pub-id pub-id-type="doi">10.1109/CVPRW.2017.173</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meden</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Mallı</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Fabijan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ekenel</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Štruc</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Peer</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Face deidentification with generative deep neural networks</article-title>
          <source>IET Signal Process</source>
          <year>2017</year>
          <month>12</month>
          <volume>11</volume>
          <issue>9</issue>
          <fpage>1046</fpage>
          <lpage>1054</lpage>
          <pub-id pub-id-type="doi">10.1049/iet-spr.2017.0049</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mirjalili</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Raschka</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Namboodiri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Semi-adversarial networks: Convolutional autoencoders for imparting privacy to face images</article-title>
          <source>Proceedings of the 2018 International Conference on Biometrics</source>
          <year>2018</year>
          <conf-name>The 2018 International Conference on Biometrics</conf-name>
          <conf-date>February 20-23, 2018</conf-date>
          <conf-loc>Gold Coast, Australia</conf-loc>
          <fpage>82</fpage>
          <lpage>89</lpage>
          <pub-id pub-id-type="doi">10.1109/icb2018.2018.00023</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radford</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Metz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chintala</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Unsupervised representation learning with deep convolutional generative adversarial networks</article-title>
          <source>Proceedings of the International Conference on Learning Representations</source>
          <year>2016</year>
          <conf-name>The International Conference on Learning Representations</conf-name>
          <conf-date>May 2-4, 2016</conf-date>
          <conf-loc>San Juan, Puerto Rico</conf-loc>
          <fpage>1</fpage>
          <lpage>16</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1511.06434"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Privacy-protective-GAN for privacy preserving face de-identification</article-title>
          <source>J Comput Sci Technol</source>
          <year>2019</year>
          <month>1</month>
          <day>18</day>
          <volume>34</volume>
          <issue>1</issue>
          <fpage>47</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1007/s11390-019-1898-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hukkelås</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mester</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lindseth</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>DeepPrivacy: A generative adversarial network for face anonymization</article-title>
          <source>Proceedings of the International Symposium on Visual Computing</source>
          <year>2019</year>
          <conf-name>International Symposium on Visual Computing</conf-name>
          <conf-date>October 7-9, 2019</conf-date>
          <conf-loc>Lake Tahoe, NV</conf-loc>
          <fpage>565</fpage>
          <lpage>578</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-33720-9_44</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ryoo</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Learning to anonymize faces for privacy preserving action detection</article-title>
          <source>Proceedings of the 15th European Conference on Computer Vision</source>
          <year>2018</year>
          <conf-name>The 15th European Conference on Computer Vision</conf-name>
          <conf-date>September 8-14, 2018</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <fpage>639</fpage>
          <lpage>655</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-01246-5_38</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Ma</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Oh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Van</surname>
              <given-names>GL</given-names>
            </name>
            <name name-style="western">
              <surname>Schiele</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Natural and effective obfuscation by head inpainting</article-title>
          <source>Proceedings of the 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2018</year>
          <conf-name>The 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 18-23, 2018</conf-date>
          <conf-loc>Salt Lake City, UT</conf-loc>
          <fpage>5050</fpage>
          <lpage>5059</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2018.00530</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Tewari</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Theobalt</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schiele</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A hybrid model for identity obfuscation by face replacement</article-title>
          <source>Proceedings of the 15th European Conference on Computer Vision</source>
          <year>2018</year>
          <conf-name>The 15th European Conference on Computer Vision</conf-name>
          <conf-date>September 8-14, 2018</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <fpage>570</fpage>
          <lpage>586</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-01246-5_34</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hua</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Towards open-set identity preserving face synthesis</article-title>
          <source>Proceedings of 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2018</year>
          <conf-name>The 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 18-23, 2018</conf-date>
          <conf-loc>Salt Lake City, UT</conf-loc>
          <fpage>6713</fpage>
          <lpage>6722</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2018.00702</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>FaceShifter: Towards high fidelity and occlusion aware face swapping</article-title>
          <source>Proceedings of 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2020</year>
          <conf-name>2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 14-19, 2020</conf-date>
          <conf-loc>Virtual</conf-loc>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/1912.13457.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nirkin</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Keller</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Hassner</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>FSGAN: Subject agnostic face swapping and reenactment</article-title>
          <source>Proceedings of the 2019 IEEE/CVF International Conference on Computer Vision</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE/CVF International Conference on Computer Vision</conf-name>
          <conf-date>27 October 27-November 2, 2019</conf-date>
          <conf-loc>Seoul, South Korea</conf-loc>
          <fpage>7183</fpage>
          <lpage>7192</lpage>
          <pub-id pub-id-type="doi">10.1109/iccv.2019.00728</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rosin</surname>
              <given-names>PL</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Self-paced balance learning for clinical skin disease recognition</article-title>
          <source>IEEE Trans Neural Netw Learn Syst</source>
          <year>2020</year>
          <month>8</month>
          <volume>31</volume>
          <issue>8</issue>
          <fpage>2832</fpage>
          <lpage>2846</lpage>
          <pub-id pub-id-type="doi">10.1109/tnnls.2019.2917524</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karras</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Laine</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aila</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>A style-based generator architecture for generative adversarial networks</article-title>
          <source>Proceedings of the 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 15-20, 2019</conf-date>
          <conf-loc>Long Beach, CA</conf-loc>
          <fpage>4396</fpage>
          <lpage>4405</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2019.00453</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gabbay</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hoshen</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Demystifying inter-class disentanglement</article-title>
          <source>Proceedings of the 8th International Conference on Learning Representations</source>
          <year>2020</year>
          <conf-name>The 8th International Conference on Learning Representations</conf-name>
          <conf-date>April 26-May 1, 2020</conf-date>
          <conf-loc>Virtual</conf-loc>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/pdf/1906.11796.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pidhorskyi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Adjeroh</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Doretto</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Adversarial latent autoencoders</article-title>
          <source>Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2020</year>
          <conf-name>The 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 13-19, 2020</conf-date>
          <conf-loc>Seattle, WA</conf-loc>
          <fpage>14092</fpage>
          <lpage>14101</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr42600.2020.01411</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Richardson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Alaluf</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Patashnik</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Nitzan</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Azar</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shapiro</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Encoding in style: A StyleGAN encoder for image-to-image translation</article-title>
          <source>Proceedings of the 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2021</year>
          <conf-name>The 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 20-25, 2021</conf-date>
          <conf-loc>Nashville, TN</conf-loc>
          <fpage>2287</fpage>
          <lpage>2296</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr46437.2021.00232</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mirza</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Osindero</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Conditional generative adversarial nets</article-title>
          <source>ArXiv</source>
          <comment>Preprint posted online on November 6, 2014
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1411.1784"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gafni</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Taigman</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Live face de-identification in video</article-title>
          <source>Proceedings of the 2019 IEEE/CVF International Conference on Computer Vision</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE/CVF International Conference on Computer Vision</conf-name>
          <conf-date>October 27-November 2, 2019</conf-date>
          <conf-loc>Seoul, South Korea</conf-loc>
          <fpage>9377</fpage>
          <lpage>9386</lpage>
          <pub-id pub-id-type="doi">10.1109/iccv.2019.00947</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Agarwal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Farid</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nagano</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Protecting world leaders against deep fakes</article-title>
          <source>Proceedings of the 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 15-20, 2019</conf-date>
          <conf-loc>Long Beach, CA</conf-loc>
          <fpage>38</fpage>
          <lpage>45</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/4mk6vfac"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Letournel</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Bugeau</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ta</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Domenger</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Face de-identification with expressions preservation</article-title>
          <source>Proceedings of the 2015 IEEE International Conference on Image Processing</source>
          <year>2015</year>
          <conf-name>The 2015 IEEE International Conference on Image Processing</conf-name>
          <conf-date>September 27-30, 2015</conf-date>
          <conf-loc>Quebec City, QC</conf-loc>
          <fpage>4366</fpage>
          <lpage>4370</lpage>
          <pub-id pub-id-type="doi">10.1109/icip.2015.7351631</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Natural face de-identification with measurable privacy</article-title>
          <source>Proceedings of the 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</source>
          <year>2019</year>
          <conf-name>The 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops</conf-name>
          <conf-date>June 16-17, 2019</conf-date>
          <conf-loc>Long Beach, CA</conf-loc>
          <fpage>56</fpage>
          <lpage>65</lpage>
          <pub-id pub-id-type="doi">10.1109/cvprw.2019.00013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Du</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Yi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Blasch</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Balancing privacy protection and utility preservation in face de-identification</article-title>
          <source>Proceedings of the 2014 IEEE International Joint Conference on Biometrics</source>
          <year>2014</year>
          <conf-name>The 2014 IEEE International Joint Conference on Biometrics</conf-name>
          <conf-date>September-October 2, 2014</conf-date>
          <conf-loc>Clearwater, FL</conf-loc>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/btas.2014.6996249</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Orekondy</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Fritz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schiele</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Automatic redaction of private information in images</article-title>
          <source>Proceedings of the 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</source>
          <year>2018</year>
          <conf-name>The 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 18-23, 2018</conf-date>
          <conf-loc>Salt Lake City, UT</conf-loc>
          <fpage>8466</fpage>
          <lpage>8475</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2018.00883</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kuang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Babaguchi</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Unnoticeable synthetic face replacement for image privacy protection</article-title>
          <source>Neurocomputing</source>
          <year>2021</year>
          <month>10</month>
          <volume>457</volume>
          <fpage>322</fpage>
          <lpage>333</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2021.06.061</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>PPGAN: Privacy-preserving Generative Adversarial Network</article-title>
          <source>Proceedings of the IEEE 25th International Conference on Parallel and Distributed Systems</source>
          <year>2019</year>
          <conf-name>The IEEE 25th International Conference on Parallel and Distributed Systems</conf-name>
          <conf-date>December, 4-6, 2019</conf-date>
          <conf-loc>Tianjin, China</conf-loc>
          <fpage>985</fpage>
          <lpage>989</lpage>
          <pub-id pub-id-type="doi">10.1109/icpads47876.2019.00150</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
