<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JDERM</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Dermatol</journal-id>
      <journal-title>JMIR Dermatology</journal-title>
      <issn pub-type="epub">2562-0959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta><article-id pub-id-type="pmid">39475773</article-id>
      <article-id pub-id-type="publisher-id">v5i3e39143</article-id>
      <article-id pub-id-type="doi">10.2196/39143</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Improving Skin Color Diversity in Cancer Detection: Deep Learning Approach</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Dellavalle</surname>
            <given-names>Robert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Sivesind</surname>
            <given-names>Torunn</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Ndabu</surname>
            <given-names>Theophile</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Marchetti</surname>
            <given-names>Michael</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Zhongqiang</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Rezk</surname>
            <given-names>Eman</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>School of Computational Science and Engineering</institution>
            <institution>McMaster University</institution>
            <addr-line>1280 Main Street West</addr-line>
            <addr-line>Hamilton, ON, L8S 4L8</addr-line>
            <country>Canada</country>
            <phone>1 905 525 9140</phone>
            <email>rezke@mcmaster.ca</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2531-0799</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>Eltorki</surname>
            <given-names>Mohamed</given-names>
          </name>
          <degrees>MBChB</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6978-0015</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" equal-contrib="yes">
          <name name-style="western">
            <surname>El-Dakhakhni</surname>
            <given-names>Wael</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-8617-261X</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>School of Computational Science and Engineering</institution>
        <institution>McMaster University</institution>
        <addr-line>Hamilton, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Faculty of Health Sciences</institution>
        <institution>McMaster University</institution>
        <addr-line>Hamilton, ON</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Eman Rezk <email>rezke@mcmaster.ca</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jul-Sep</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>19</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>5</volume>
      <issue>3</issue>
      <elocation-id>e39143</elocation-id>
      <history>
        <date date-type="received">
          <day>29</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>18</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>17</day>
          <month>7</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>4</day>
          <month>8</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Eman Rezk, Mohamed Eltorki, Wael El-Dakhakhni. Originally published in JMIR Dermatology (http://derma.jmir.org), 19.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Dermatology, is properly cited. The complete bibliographic information, a link to the original publication on http://derma.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://derma.jmir.org/2022/3/e39143" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The lack of dark skin images in pathologic skin lesions in dermatology resources hinders the accurate diagnosis of skin lesions in people of color. Artificial intelligence applications have further disadvantaged people of color because those applications are mainly trained with light skin color images.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this study is to develop a deep learning approach that generates realistic images of darker skin colors to improve dermatology data diversity for various malignant and benign lesions.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We collected skin clinical images for common malignant and benign skin conditions from DermNet NZ, the International Skin Imaging Collaboration, and Dermatology Atlas. Two deep learning methods, style transfer (ST) and deep blending (DB), were utilized to generate images with darker skin colors using the lighter skin images. The generated images were evaluated quantitively and qualitatively. Furthermore, a convolutional neural network (CNN) was trained using the generated images to assess the latter’s effect on skin lesion classification accuracy.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Image quality assessment showed that the ST method outperformed DB, as the former achieved a lower loss of realism score of 0.23 (95% CI 0.19-0.27) compared to 0.63 (95% CI 0.59-0.67) for the DB method. In addition, ST achieved a higher disease presentation with a similarity score of 0.44 (95% CI 0.40-0.49) compared to 0.17 (95% CI 0.14-0.21) for the DB method. The qualitative assessment completed on masked participants indicated that ST-generated images exhibited high realism, whereby 62.2% (1511/2430) of the votes for the generated images were classified as real. Eight dermatologists correctly diagnosed the lesions in the generated images with an average rate of 0.75 (360 correct diagnoses out of 480) for several malignant and benign lesions. Finally, the classification accuracy and the area under the curve (AUC) of the model when considering the generated images were 0.76 (95% CI 0.72-0.79) and 0.72 (95% CI 0.67-0.77), respectively, compared to the accuracy of 0.56 (95% CI 0.52-0.60) and AUC of 0.63 (95% CI 0.58-0.68) for the model without considering the generated images.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Deep learning approaches can generate realistic skin lesion images that improve the skin color diversity of dermatology atlases. The diversified image bank, utilized herein to train a CNN, demonstrates the potential of developing generalizable artificial intelligence skin cancer diagnosis applications.</p>
        </sec>
        <sec sec-type="registered-report">
          <title>International Registered Report Identifier (IRRID)</title>
          <p>RR2-10.2196/34896</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>deep learning</kwd>
        <kwd>neural network</kwd>
        <kwd>machine learning</kwd>
        <kwd>algorithm</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>skin tone diversity</kwd>
        <kwd>data augmentation</kwd>
        <kwd>skin cancer diagnosis</kwd>
        <kwd>generalizability</kwd>
        <kwd>skin</kwd>
        <kwd>cancer</kwd>
        <kwd>diagnosis</kwd>
        <kwd>diagnostic</kwd>
        <kwd>imaging</kwd>
        <kwd>dermatology</kwd>
        <kwd>digital health</kwd>
        <kwd>image generation</kwd>
        <kwd>generated image</kwd>
        <kwd>computer-generated</kwd>
        <kwd>lesion</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>The “white lens” phenomenon has led to the underrepresentation of dark skin pathology images in dermatology resources [<xref ref-type="bibr" rid="ref1">1</xref>]. A recent analysis of several dermatology textbooks utilized to educate dermatologists showed that dark skin images represent merely 4% to 18% of the total number of images [<xref ref-type="bibr" rid="ref2">2</xref>]. As a result, it is challenging for dermatologists to properly diagnose and treat skin pathology in people of color.</p>
      <p>Applications utilizing artificial intelligence (AI) have been developing at a rapid pace to aid clinicians in making diagnoses [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Deep learning (DL), a branch of AI, has been widely employed to develop models as accurate as specialist dermatologists in diagnosing skin cancer [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref8">8</xref>] and common skin conditions [<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. However, a major drawback facing the mainstream adoption of DL applications in dermatology is the paucity of training data diversity leading to nonrobust models [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>].</p>
      <p>Han et al [<xref ref-type="bibr" rid="ref15">15</xref>] developed a DL model to diagnose malignant and benign skin lesions using clinical images. According to their results, the performance of the model was highly dependent on the diversity of the training data. Thus, DL models trained on data with a certain skin color range could not be generalized when tested on data collected from a different population [<xref ref-type="bibr" rid="ref16">16</xref>]. Rahman et al [<xref ref-type="bibr" rid="ref17">17</xref>] utilized International Skin Imaging Collaboration (ISIC) images to train and test 5 DL models to diagnose various malignant and benign skin lesions [<xref ref-type="bibr" rid="ref18">18</xref>]. The models achieved a recall of 88%, 89%, 91%, 88%, and 84%, respectively, and the performance was further boosted by developing an ensemble of the implemented models that achieved a recall of 94%. ISIC images were also utilized to develop a DL framework, DermoExpert [<xref ref-type="bibr" rid="ref19">19</xref>], to classify up to 7 malignant and benign skin lesions. The framework was trained and tested on ISIC-2016, ISIC-2017, and ISIC-2018 images and achieved an AUC of 0.96, 0.95, and 0.97 for the 3 data sets, respectively.</p>
      <p>Although ISIC provides a large publicly available skin images archive, the images were mainly collected from the United States, Europe, and Australia [<xref ref-type="bibr" rid="ref13">13</xref>], where light skin colors are dominant. This was also confirmed by Kinyanjui et al [<xref ref-type="bibr" rid="ref20">20</xref>], who studied the skin tone distribution of ISIC images and showed that the skin tone of the images primarily ranged from very light to intermediate. Thus, the aforementioned models trained and tested on ISIC images are not expected to be generalizable to darker skin colors.</p>
      <p>Motivated by this necessity, we proposed an algorithm development and validation protocol to perform skin cancer early detection for all skin colors [<xref ref-type="bibr" rid="ref21">21</xref>]. In the protocol, we considered clinical images to develop the model because clinical images are easy to obtain, unlike dermoscopic images that require a specialist and microscopy. In this paper, we discuss the development and initial internal validation of skin image generation for underrepresented skin colors in publicly available data sets (Phases 2 and 3 of the protocol). This paper aims to (1) generate realistic images with malignant and benign skin lesions using 2 deep learning methods, (2) extensively evaluate the generated images using quantitative ratings as well as qualitative human expert and nonexpert ratings, and (3) develop a preliminary classifier, trained with the generated images, to categorize the images as malignant or benign and to study the generated images’ effect on the classification accuracy.</p>
      <p>The remaining article is organized as follows: the methods section explains the materials and techniques utilized to generate and evaluate the images. The subsequent section shows the experimental results of all components involved in this work, and the final section highlights our work limitations, discusses the proposed work in comparison with other existing studies, and concludes our work.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Background</title>
        <p>In this work, we implement 2 phases of our ongoing study that aims at leveraging deep learning to improve skin color diversity and thus malignancy detection in any skin color using clinical images. The first phase of our study [<xref ref-type="bibr" rid="ref21">21</xref>] focused on quantifying the underrepresentation of darker skin colors in dermatology atlases by developing a skin tone categorization tool. The second and third phases of the study, implemented herein, aim to generate images with darker skin color, extensively assess the generated images using several evaluation metrics, and study the impact of the generated images on malignancy detection by developing a classification model trained on the generated images. Finally, the fourth phase, expected to be completed by the end of 2022, will focus on developing an accurate malignancy detection classification model. This model will compile the generated images with text descriptions of skin cancer clinical presentations in darker skin colors and use novel deep learning architectures and ensemble learning approaches to improve classification accuracy. In this section, we explain the characteristics of the utilized data, the image generation methods, and the evaluation techniques employed to achieve the objectives of Phases 2 and 3.</p>
      </sec>
      <sec>
        <title>Study Data Set</title>
        <p>We collected 1701 clinical images representing several malignant and benign skin lesions from the publicly available skin image repositories DermNet NZ (994 images) [<xref ref-type="bibr" rid="ref22">22</xref>], ISIC-2018 JID editorial images (100 images) [<xref ref-type="bibr" rid="ref17">17</xref>], and Dermatology Atlas (607 images) [<xref ref-type="bibr" rid="ref23">23</xref>]. Images from DermNet NZ and ISIC (1094 images), referred to as set A, were utilized for generating images, training, and validating the classifier. Meanwhile, Dermatology Atlas images (607 images), referred to as set B, were utilized to test the classifier. The distribution of the data as malignant and benign is listed in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <p>The skin tone diversity of the study data sets was investigated using our skin tone categorization tool [<xref ref-type="bibr" rid="ref21">21</xref>]. The results, summarized in <xref ref-type="table" rid="table2">Table 2</xref>, showed that the majority (84.1%, n=920) of set A images were categorized as light and intermediate skin tones, while set B was more diverse and had varying skin tone distributions. Based on this, set B will facilitate our evaluation of the generalizability of the classification model developed using the generated images, as it has variant skin tone distribution compared to the training data.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Study data sets for malignant and benign class distribution [<xref ref-type="bibr" rid="ref21">21</xref>]. Set A (n=1094): training and validation set; set B (n=607): testing set.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="320"/>
            <col width="360"/>
            <col width="320"/>
            <thead>
              <tr valign="top">
                <td>Tumor type</td>
                <td>Set A, n (%)</td>
                <td>Set B, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Malignant</td>
                <td>634 (58)</td>
                <td>508 (83.7)</td>
              </tr>
              <tr valign="top">
                <td>Benign</td>
                <td>460 (42)</td>
                <td>99 (16.3)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Skin tone distribution of the study data sets. Set A (n=1094): training and validation set; set B (n=607): testing set.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="320"/>
            <col width="360"/>
            <col width="320"/>
            <thead>
              <tr valign="top">
                <td>Skin tone</td>
                <td>Set A, n (%)</td>
                <td>Set B, n (%)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Light</td>
                <td>690 (63.1)</td>
                <td>133 (21.9)</td>
              </tr>
              <tr valign="top">
                <td>Intermediate</td>
                <td>230 (21.0)</td>
                <td>198 (32.6)</td>
              </tr>
              <tr valign="top">
                <td>Tan</td>
                <td>110 (10.1)</td>
                <td>131 (21.6)</td>
              </tr>
              <tr valign="top">
                <td>Brown</td>
                <td>62 (5.7)</td>
                <td>134 (22.1)</td>
              </tr>
              <tr valign="top">
                <td>Black</td>
                <td>2 (0.18)</td>
                <td>11 (1.8)</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Image Generation</title>
        <sec>
          <title>Style Transfer</title>
          <p>Style transfer (ST) [<xref ref-type="bibr" rid="ref24">24</xref>] is an image generation technique developed based on the visual geometry group (VGG)-19 network architecture and trained on the ImageNet database with millions of images [<xref ref-type="bibr" rid="ref25">25</xref>]. ST utilizes 16 convolutional layers (Conv), 5 average pooling, and no fully connected layers of the VGG-19 architecture, as illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>A. The ST method, as demonstrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>B, primarily works by extracting features from content and style images denoted as F<sub>C</sub> and F<sub>S</sub>. Then, it iteratively blends the features to generate a new image with content and style features (GF<sub>C</sub>, GF<sub>S</sub>). The content and style losses are calculated as the difference between the original (F<sub>C</sub>, GF<sub>C</sub>) and the generated features (F<sub>S</sub>, GF<sub>S</sub>). The total loss is backpropagated to the VGG network to improve the quality of the generated image.</p>
          <p>Since convolutional neural networks (CNNs) trained with an adequate number of annotated data on object recognition can extract high-level features from images independent of their content [<xref ref-type="bibr" rid="ref26">26</xref>], the ST method can be generalized for feature extraction from skin lesion images. Therefore, ST can be utilized to generate darker skin images without retraining the VGG network. ST was utilized in this work by extracting the features of a light skin image containing the skin pathology and a style image with the target skin color. A new image containing an optimized blend of both feature sets was subsequently generated, starting from a noise image and iteratively improving by minimizing the total loss, as illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>B. The fine-tuning details of the ST method are discussed in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Style transfer (ST) in skin images. (A) VGG architecture. (B) Process of ST.</p>
            </caption>
            <graphic xlink:href="derma_v5i3e39143_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Deep Blending</title>
          <p>Deep blending (DB) is an integration of ST and Poisson image blending methods [<xref ref-type="bibr" rid="ref27">27</xref>], wherein the object of interest from a content image is transferred to the style image while minimizing the sharp intensity and texture change between the content and style images [<xref ref-type="bibr" rid="ref28">28</xref>]. As in ST, DB utilizes the VGG network to extract the features of the input images and iteratively updates the output image using the calculated loss functions. However, DB works only on the object of interest from the content image and thus requires a segmented object. Moreover, DB essentially works on the blending region where the content object meets the style image. Therefore, DB utilizes 3 loss functions: (1) Poisson-based gradient loss to minimize the change of the blending region gradient, (2) content loss to ensure the semantic of the blending region is similar to the content object, and (3) style loss to ensure the texture of the blending region is similar to the style image. Finally, DB performs 2 rounds of blending; the first round employs the content object and the style image, and the second employs the output blended image of the first round and the style image. The fine-tuning details of the DB method are discussed in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
        </sec>
        <sec>
          <title>Target Skin Color Selection</title>
          <p>The target skin color is the style needed to synthesize images in ST and DB methods. To generate images for the underrepresented skin colors in set A, tan, brown, and black skin colors were selected. The selection of the target style images was determined using the individual typology angle (ITA) calculated from the input transformed images [<xref ref-type="bibr" rid="ref29">29</xref>]. Consequently, the angle was mapped to a skin class according to predefined ITA ranges [<xref ref-type="bibr" rid="ref30">30</xref>]. The ITA calculation and mapping are explained in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
          <p><xref rid="figure2" ref-type="fig">Figure 2</xref> shows the selected skin images, to be utilized as style images, with the ITA score and skin classification. The tan skin image was obtained from Dermatology Atlas [<xref ref-type="bibr" rid="ref23">23</xref>], while the brown and dark skin images were obtained from ShutterStock [<xref ref-type="bibr" rid="ref31">31</xref>] through a standard license.</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Skin tone classification. ITA: individual typology angle.</p>
            </caption>
            <graphic xlink:href="derma_v5i3e39143_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Evaluation</title>
        <sec>
          <title>Quantitative Evaluation</title>
          <p>The quantitative evaluation was performed using the blind referenceless image spatial quality evaluator (BRISQUE) and the structural similarity index measure (SSIM) to assess realism and disease presentation, respectively. BRISQUE is a referenceless metric that quantifies the loss of image realism in the presence of distortions solely using the image being assessed [<xref ref-type="bibr" rid="ref32">32</xref>]. This method assigns a quality score to each image that correlates well with human quality judgment [<xref ref-type="bibr" rid="ref32">32</xref>]. The BRISQUE evaluation method is based on 2 main concepts: (1) real images maintain regular statistical properties, and (2) normalized brightness coefficients of a real image approximately follow a Gaussian distribution. As such, image distortion can be captured by a change in the expected statistical properties or deviation from a Gaussian distribution (such as the generalized Gaussian distribution [<xref ref-type="bibr" rid="ref33">33</xref>] and the asymmetric generalized Gaussian distribution [<xref ref-type="bibr" rid="ref34">34</xref>], as explained in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>).</p>
          <p>The second metric, SSIM, compares the structure, texture, and edges of a reference image with a modified image and provides a similarity score [<xref ref-type="bibr" rid="ref35">35</xref>]. SSIM was previously used to evaluate the quality of the generated skin lesion images [<xref ref-type="bibr" rid="ref36">36</xref>]; therefore, SSIM is employed in this study to evaluate the similarity of the generated images with the content image including the disease to measure disease presentation. The SSIM calculation is explained in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
        </sec>
        <sec>
          <title>Qualitative Evaluation</title>
          <p>For the qualitative assessment, 62 individuals with varying backgrounds participated in evaluating the generated images. Of the 62 participating individuals, 41 (66.1%) had no medical background and 21 (33.9%) were medical personnel that included 10 (47.6%) attending physicians, 2 (9.5%) physicians in training, 1 (4.8%) nurse, and 8 (38.1%) dermatologists. The first task was a human visual Turing test (VTT), wherein participants (with and without a medical background) were asked to classify the images as real or generated. The responses of the VTT were analyzed to (1) determine the significance of background (medical versus nonmedical personnel) and experience in discovering the generated images and (2) estimate the quality of the generated images by calculating the classification accuracy, false positive rate (FPR), defined as the ratio of generated images classified as real, and true positive rate (TPR), defined as the ratio of real images classified as real.</p>
          <p>The second task was a disease identification test carried out solely by dermatologists with varying experience levels. The responses to this test were analyzed to measure the recall (ratio of correctly diagnosed images by dermatologists) of the real and generated images. The 95% CI was calculated using the Clopper-Pearson method [<xref ref-type="bibr" rid="ref37">37</xref>] to estimate the uncertainty of the reported results.</p>
        </sec>
        <sec>
          <title>Preliminary Classification Evaluation</title>
          <p>To study the effect of the generated images on skin color diversity, the generated images were used to augment the original images of set A to train a CNN and classify the image as malignant or benign. The 1094 images of set A were randomly split, with 80% (n=875) used for training the network and 20% (n=219) used for validation. The CNN training followed 4 data utilization approaches, as illustrated in <xref rid="figure3" ref-type="fig">Figure 3</xref>: (a) use the images directly for training without performing any augmentation; (b) augment the images with their corresponding generated tan, brown, and black images; (c) augment the images through geometric transformations, such as flipping, rotating, and adding noise [<xref ref-type="bibr" rid="ref38">38</xref>]; and (d) augment the images with the generated and transformed images. All models were validated on the same validation set (219 images) and evaluated using separate test data, set B, which included 607 real images with diverse skin tone distribution, as illustrated in <xref ref-type="table" rid="table2">Table 2</xref>.</p>
          <p>ResNet-50 [<xref ref-type="bibr" rid="ref39">39</xref>] pretrained on ImageNet images was utilized in our work due to its applicability to dermatology diagnostic tasks [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. The ResNet-50 architecture consists of the 5 stages shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>A. For skin lesion classification, we customized ResNet-50 by adding an average pooling layer, a fully connected layer, and SoftMax to classify the lesions as malignant or benign, as shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>B. Transfer learning was applied when training the ResNet-50, wherein we froze the first 4 blocks of the ResNet-50 to make use of the ImageNet’s gained weights and trained the last block with the newly added layers to gain new weights. The customized ResNet-50 was trained for 30 epochs and optimized using an Adam optimizer [<xref ref-type="bibr" rid="ref42">42</xref>] with a learning rate of 0.001. The learning rate was incrementally reduced when there was no improvement in the validation accuracy for 5 consecutive epochs to allow the models to learn more optimal weights [<xref ref-type="bibr" rid="ref43">43</xref>].</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Image classification process. CNN: convolutional neural network; Tr: training set; Ts: test set; Vl: validation set.</p>
            </caption>
            <graphic xlink:href="derma_v5i3e39143_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>Classification network. (A) ResNet-50 architecture and (B) the customized ResNet-50.</p>
            </caption>
            <graphic xlink:href="derma_v5i3e39143_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>All images utilized in our work were collected from publicly available deidentified data sets. Therefore, we do not require ethics approval.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>
        <bold>Implementation Details</bold>
      </p>
      <p>All the developed models were implemented on Google Collaboratory Pro with a NVIDIA Tesla P100 GPU. We used Keras [<xref ref-type="bibr" rid="ref44">44</xref>] with Tensorflow [<xref ref-type="bibr" rid="ref45">45</xref>] to develop and optimize the models. The average time to generate a single image using the ST method was 46 seconds and 9 minutes using the DB method (performing 2 rounds of image optimization). The time for training the classification models varied based on the data utilization approach; the average training time was 14, 34, 34, and 47 minutes for the no augmentation, generated image augmentation, transformed image augmentation, and all images augmentation, respectively (<xref rid="figure3" ref-type="fig">Figure 3</xref>).</p>
      <sec>
        <title>Quantitative Evaluation</title>
        <p>Based on the skin tone analysis of the study data set, the 920 images categorized as light (690) and intermediate (230) skin colors were utilized as content, and 2760 images were generated using each method for the tan, brown, and dark style images. <xref ref-type="table" rid="table3">Tables 3</xref> and <xref ref-type="table" rid="table4">4</xref> report the average normalized BRISQUE and average SSIM scores for each skin color using ST and DB generation methods, respectively. As the BRISQUE measured the loss of realism in the generated images, lower BRISQUE scores indicated higher realism. As the SSIM measured the similarity between the generated images and the content images, higher SSIM scores indicated a higher similarity to the image including the disease.</p>
        <p>It can be seen that the ST method outperformed the DB method in terms of realism by achieving significantly lower average BRISQUE scores in all skin tones (<xref ref-type="table" rid="table3">Table 3</xref>). The overall BRISQUE score of the ST method was 0.23 (95% CI 0.19-0.27) compared to the DB score of 0.63 (95% CI 0.59-0.67). In terms of disease presentation, ST achieved higher average SSIM scores in all skin tones (<xref ref-type="table" rid="table4">Table 4</xref>). The overall SSIM score of the ST method was 0.44 (95% CI 0.40-0.49) compared to 0.17 (0.95% CI 0.14-0.21) for the DB method. Across the different tones, there was a consistent change in the BRISQUE metric for both methods resulting from the quality variation of the utilized style images. Similarly, the SSIM changed across skin colors, decreasing for ST and DB for darker colors due to the deviation from the light skin color of the content images.</p>
        <p>A visual qualitative comparison between the images generated by the ST and DB methods with respect to the real images is demonstrated in <xref rid="figure5" ref-type="fig">Figure 5</xref>. The ST-generated images showed clear disease presentation while adding up the pigmentation on the lesion region to match the darker skin color. However, the DB-generated images included the disease region from the content image and focused only on blending the border of the disease with the style image. Therefore, the ST-generated images looked more realistic compared to the DB-generated images.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Average normalized blind referenceless image spatial quality evaluator (BRISQUE) scores of the style transfer (ST) and deep blending (DB) methods.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="130"/>
            <col width="290"/>
            <col width="290"/>
            <col width="290"/>
            <thead>
              <tr valign="top">
                <td>Method</td>
                <td>Tan</td>
                <td>Brown</td>
                <td>Black</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>ST<sup>a</sup></td>
                <td>0.13 (95% CI 0.08-0.19)</td>
                <td>0.35 (95% CI 0.27-0.42)</td>
                <td>0.22 (95% CI 0.15-0.29)</td>
              </tr>
              <tr valign="top">
                <td>DB<sup>b</sup></td>
                <td>0.55 (95% CI 0.47-0.63)</td>
                <td>0.93 (95% CI 0.89-0.97)</td>
                <td>0.42 (95% CI 0.34-0.49)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><bold><sup>a</sup></bold>ST: style transfer.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>DB: deep blending.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Average structural similarity index measure (SSIM) scores of the style transfer (ST) and deep blending (DB) methods.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="130"/>
            <col width="290"/>
            <col width="290"/>
            <col width="290"/>
            <thead>
              <tr valign="top">
                <td>Method</td>
                <td>Tan</td>
                <td>Brown</td>
                <td>Black</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>ST<sup>a</sup></td>
                <td>0.51 (95% CI 0.43-0.59)</td>
                <td>0.44 (95% CI 0.36-0.52)</td>
                <td>0.37 (95% CI 0.30-0.45)</td>
              </tr>
              <tr valign="top">
                <td>DB<sup>b</sup></td>
                <td>0.20 (95% CI 0.14-0.26)</td>
                <td>0.17 (95% CI 0.11-0.23)</td>
                <td>0.15 (95% CI 0.09-0.21)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><bold><sup>a</sup></bold>ST: style transfer.</p>
            </fn>
            <fn id="table4fn2">
              <p><sup>b</sup>DB: deep blending.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Generated images using style transfer (ST) and deep blending (DB) compared to the real images.</p>
          </caption>
          <graphic xlink:href="derma_v5i3e39143_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Qualitative Evaluation</title>
        <p>For the human qualitative evaluation component, we conducted 2 assessments, a VTT to evaluate the realism of the generated images and a disease identification assessment to evaluate disease presentation. As the ST method showed superior quantitative evaluation compared to DB, we conducted all human evaluations on the ST images.</p>
        <p>The human VTT was performed on 45 real and 45 generated images to evaluate realism. A total of 54 participants, including 41 (75.9%) without a medical background and 13 (24.1%) medical personnel, including 10 (76.9%) attending physicians, 2 (15.4%) physicians in training, and 1 (7.7%) nurse, were asked to classify the images either as real or generated. First, we analyzed the scores of each participant to study the significance of the background and years of experience in identifying the generated images correctly. The generated score (number of generated images correctly identified) was set as the outcome, and the real score (number of real images correctly identified), background (medical versus nonmedical personnel), and years of experience (0: nonmedical personnel, 1: medical personnel with 2 to 5 years of experience, 2: medical personnel with 6 to 10 years of experience, and 3: medical personnel with more than 10 years of experience) were predictors.</p>
        <p>Linear regression was utilized to investigate the significance of the predictors on the outcome. First, the generated score was modeled using the background only, which turned out to be insignificant (<italic>P</italic>=.96). Consequently, the generated score was modeled using the background and years of experience, which also showed no significance (<italic>P</italic>=.65 and .61, respectively). Finally, the real score was integrated as a predictor, and background and experience were not shown be to significant factors, (<italic>P</italic>=.45 and .65, respectively); however, the real score was significant (<italic>P&#60;</italic>.001). The generated score in relation to the real score and the final fitted regression model is illustrated in <xref rid="figure6" ref-type="fig">Figure 6</xref>.</p>
        <p>Consequently, we calculated the classification accuracy, FPR, and TPR to compare the generated images with the real ones. As illustrated in <xref rid="figure7" ref-type="fig">Figure 7</xref>, for all participating individuals regardless of background, the FPR was 0.62 (1511/2430 votes; 95% CI 0.60-0.64), and the TPR was 0.60 (1449/2430 votes; 95% CI 0.58-0.62), indicating high realism of the generated images. Moreover, there was no significant difference between the FPR of medical personnel and nonmedical personnel, which was 0.615 (95% CI 0.58-0.65) versus 0.624 (95% CI 0.60-0.65). The overall accuracy was 0.49 (95% CI 0.47-0.50), indicating that the participants had poor differentiation between generated and real images.</p>
        <p>The second human qualitative assessment aimed to evaluate the accuracy of disease presentation in the generated images. We included a total of 80 images: 20 real images and 60 ST method–generated images (20 each for tan, brown, and black skin colors). The diseases included are shown in <xref rid="figure8" ref-type="fig">Figure 8</xref>. Eight expert dermatologists, masked to our study methodology and image sources, participated in a survey comprising real and generated images and chose a diagnosis most consistent with the image presented. The average recall (rate of correctly diagnosed lesions by dermatologists) of the real images was 0.76 (121 correct diagnoses out of 160) compared to 0.75 (360 correct diagnoses out of 480) for the generated images. Details of the recall for each disease group, image type, and skin color are demonstrated in <xref rid="figure8" ref-type="fig">Figure 8</xref>.</p>
        <p>In <xref rid="figure8" ref-type="fig">Figure 8</xref>, the average recall of the generated images grouped by skin color, tan (G-Tan), brown (G-Brown), and dark (G-Dark), is represented by a red dot to compare to the real images. As this figure shows, basal cell carcinoma had the lowest average recall of the generated images compared to the real recall. In basal cell carcinoma, the tan-generated images had a recall of 0.81 compared to a real image recall of 0.69; however, the brown and dark images had a significantly lower recall of 0.44 and 0.38, respectively. Therefore, further analysis was performed to gain a deeper insight into the disease misdiagnosis.</p>
        <p>The results of the recall experiment were summarized as confusion matrices for the real, generated tan, brown, and dark images, as shown in <xref rid="figure9" ref-type="fig">Figure 9</xref>A-D. The diagonal of the confusion matrix represents the rates of correctly diagnosed diseases (true positives), while all other numbers in the matrix represented the misdiagnosis rates.</p>
        <p>It can be observed that basal cell carcinoma in the brown and dark skin images was mainly misdiagnosed as melanoma with a misidentification rate of 0.31 and 0.62, respectively. A closer look at the confusion matrix of the dark generated images (<xref rid="figure9" ref-type="fig">Figure 9</xref>D) reveals that intraepidermal carcinoma was also misdiagnosed as melanoma with a misidentification rate of 0.25. In addition, halo nevus was misidentified as melanoma with a rate of 0.19. On the other hand, melanoma was best identified in the dark skin color with a rate of 0.94. This high rate could be explained by the misdiagnosis of several lesions as melanoma. Thus, any pigmented lesion on the dark skin was primarily misdiagnosed as melanoma.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Generated score versus the real score. Line represents the linear regression model with the standard error shaded.</p>
          </caption>
          <graphic xlink:href="derma_v5i3e39143_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Evaluation of the human Visual Turing test results, with error bars representing 95% CI. FPR: false positive rate; TPR: true positive rate.</p>
          </caption>
          <graphic xlink:href="derma_v5i3e39143_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Recall of the utilized diseases, with error bars representing 95% CI. AK: actinic keratosis; AN: atypical nevi; BCC: basal cell carcinoma; IEC: intraepidermal carcinoma; HN: halo nevus; Hem: hemangioma; Mel: melanoma; SCC: squamous cell carcinoma; SK: seborrheic keratosis; VM: vascular malformation.</p>
          </caption>
          <graphic xlink:href="derma_v5i3e39143_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure9" position="float">
          <label>Figure 9</label>
          <caption>
            <p>Confusion matrix of the real and generated images. (A) real images, (B) tan-generated images, (C) brown-generated images, and (D) dark-generated images.</p>
          </caption>
          <graphic xlink:href="derma_v5i3e39143_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Preliminary Classification Evaluation</title>
        <p>A total of 4 models were developed: trained on set A images without augmentation (model 1), trained on set A augmented with the ST-generated images (model 2), trained on set A augmented with geometric transformations (eg, flipping, rotation, and noise) (model 3), and set A augmented with both the generated and transformed images (model 4). To assess the models’ generalizability, all were tested on set B, which entirely consisted of real images and was characterized by a different skin color distribution compared to the training set A (<xref ref-type="table" rid="table2">Table 2</xref>).</p>
        <p>A comparison between the accuracy and AUC of the developed models is shown in <xref ref-type="table" rid="table5">Table 5</xref>. It can be observed that model 1 is the least performing model because it has the least discrimination ability characterized by the least AUC of 0.63. On the other hand, model 2 is the best performing model with an accuracy and AUC of 0.76 and 0.72, respectively, indicating the significant impact of the skin color augmentation on the model’s generalizability. With respect to model 3 (AUC 0.66), a comparable performance to model 1 (AUC 0.63) can be noticed, indicating that geometric transformations did not significantly increase the model’s performance. Finally, model 4 (AUC 0.69) showed improved performance compared to model 3 (AUC 0.66) but decreased performance compared to model 2 (AUC 0.72), emphasizing that combining several data augmentations did not benefit the model.</p>
        <p>It can be concluded that augmenting the data with diverse skin color images allowed the model to learn skin tone–related features; thus, model 2 was robust to the variations of the skin color in the test set. On the other hand, the geometric transformations did not provide the model with the variability needed to handle the deviation in skin tone distribution present in the test set. Therefore, when combined with the generated images, a decrease in performance was noticed, highlighting the importance of selecting consistent image augmentations that work to fill the gap between the training and testing data [<xref ref-type="bibr" rid="ref38">38</xref>].</p>
        <p>Finally, to evaluate the significance of the difference in the AUC between the best performing model (model 2) and all other models, the Delong test to compare 2 ROC curves [<xref ref-type="bibr" rid="ref46">46</xref>] was carried out. The difference in AUC between models 2 and 1 and between models 2 and 3 was significant (<italic>P</italic>&#60;.001 and <italic>P</italic>=.03, respectively), while there was no significant difference in the AUC between models 2 and 4(<italic>P</italic>=.35).</p>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Performance of the classification models on set B.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="240"/>
            <col width="380"/>
            <col width="380"/>
            <thead>
              <tr valign="top">
                <td>Models</td>
                <td>Accuracy</td>
                <td>AUC<sup>a</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Model 1</td>
                <td>0.56 (95% CI 0.52-0.60)</td>
                <td>0.63 (95% CI 0.58-0.68)</td>
              </tr>
              <tr valign="top">
                <td>Model 2</td>
                <td>0.76 (95% CI 0.72-0.79)</td>
                <td>0.72 (95% CI 0.67-0.77)</td>
              </tr>
              <tr valign="top">
                <td>Model 3</td>
                <td>0.56 (95% CI 0.52-0.60)</td>
                <td>0.66 (95% CI 0.62-0.71)</td>
              </tr>
              <tr valign="top">
                <td>Model 4</td>
                <td>0.60 (95% CI 0.56-0.64)</td>
                <td>0.69 (95% CI 0.65-0.74)</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>AUC: area under the curve.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Results</title>
        <p>In this work, we proposed a DL-based approach to generate realistic skin images for underrepresented skin colors using publicly available white skin clinical images. We utilized the pathology of light skin images and healthy dark skin images to extract and blend disease and pigmentation features. The employed strategy of generating darker images based on feature blending helped to overcome the lack of dark skin images, as the utilized image generation techniques herein were trained to extract high-level features from images independently from their content [<xref ref-type="bibr" rid="ref26">26</xref>]. In terms of evaluating the quality of the generated images, comprehensive qualitative and quantitative approaches were developed. Given that the qualitative analyses can be affected by the paucity of darker skin images and because human judgment (especially the disease diagnoses test) might vary based on skin color, we performed statistical and mathematical quantitative analyses to address this issue. The results emphasized that ST-generated images had high realism and disease presentation, characterized by a lower loss of realism and higher structural similarity scores for all skin colors compared to those based on the DB method. Moreover, the generated images achieved high FPR and disease recall when compared to the real images. Finally, the generated images contributed to improvement in the classification performance when used to augment the training of ResNet-50 in comparison to other augmentation strategies.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Our work has several noteworthy limitations and areas for future improvement. Lesion pigmentation is not the only factor that characterizes skin cancer in people of color; thus, other disease morphological features need to be integrated into our models. As such, in Phase 4, text features representing skin cancer clinical presentation on darker skin will be created based on the published literature and consequently utilized along with the augmented images to train the classification models. In addition, the classification accuracy that has been investigated herein needs to be improved; therefore, in Phase 4, several CCN architectures and ensemble learning methods will be implemented to boost the classification accuracy. Moreover, images with real pathology in people of color are required to improve model training and validation. Finally, it is worth mentioning that other novel skin tone scales have been recently developed, such as Google’s Monk scale [<xref ref-type="bibr" rid="ref47">47</xref>]. Thus, our skin tone categorization tool can benefit from investigating and validating such new scales.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>Image generation using DL has been applied in the literature to improve data balance. The generative adversarial network (GAN) has been utilized to generate synthetic images for several malignant and benign lesions to overcome class imbalance [<xref ref-type="bibr" rid="ref48">48</xref>]. The model was trained on 10,000 dermoscopic images from the ISIC-2018 data set, and the generated images were evaluated for realism by humans. A total of 3 dermatologists and 5 DL experts classified a random sample of the real and generated images as real or fake. The analysis showed that the human classification accuracy was around 50%, meaning that the raters were not able to clearly distinguish between real and generated images. However, generating images with various skin colors was not considered in the aforementioned study.</p>
        <p>GAN was also employed to generate dermoscopic images to mitigate data imbalance. Three GAN models were trained on 2000 dermoscopic images from the ISIC-2017 data set [<xref ref-type="bibr" rid="ref49">49</xref>]. To evaluate the generated images, the authors compared the normalized color histogram of the generated images with the training images. Their results showed a high similarity in the distribution of both real and generated images. Despite the high quality of the generated images, there was no focus on skin color.</p>
        <p>In another study [<xref ref-type="bibr" rid="ref50">50</xref>], the authors utilized GAN to generate clinical skin images for various skin conditions, in which the required input features (eg, skin color and lesion location) were manually encoded. Encoding of input features was required during all model development phases (eg, training, validation, and testing); thus, the developed model could not be deployed without feature encoding. Although the images could be generated with different skin colors using the encoding maps, no images were generated with dark skin colors.</p>
        <p>In terms of evaluation, the realism of the generated images in the aforementioned study [<xref ref-type="bibr" rid="ref50">50</xref>] was evaluated by conducting a VTT with 10 participants, and the generated images had an average FPR of 0.3. Meanwhile, in our work, the VTT was conducted with 54 participants and achieved a higher FPR of 0.62. Moreover, the disease recall evaluation was conducted with 2 dermatologists and achieved an average recall of 0.45. However, in our work, the disease recall was assessed with 8 dermatologists and achieved a significantly higher average recall of 0.75. Furthermore, we performed a misdiagnosis analysis, and our findings strongly agreed with the published literature on skin cancer misdiagnosis in people of color [<xref ref-type="bibr" rid="ref51">51</xref>].</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>Despite the recent advances of AI in dermatology diagnosis, the lack of skin color diversity when training AI models is a major pitfall. Until a sufficient real-world diverse image repository is collected, augmenting real images with generated darker skin images is the first step to implementing robust diagnosis models. The generated images in this work achieved high realism and disease recall scores when compared to the real images. In addition, the generated images augmented the publicly available white skin images, and a classification model was developed that outperformed the model trained without the generated images. In our future work, which will comprise Phase 4 of this study, we will focus on overcoming our previously mentioned limitations to boost the accuracy and robustness of the preliminary classification model discussed herein. After completing all study phases and addressing all discussed limitations, the resulting model will be a tool to aid general practitioners in diagnosing possible skin malignancy and thereby improve the efficiency and reduce the redundancy of referrals that expert dermatologists receive for further clinical assessments and biopsies.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Image generation fine-tuning.</p>
        <media xlink:href="derma_v5i3e39143_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 516 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Individual typology angle.</p>
        <media xlink:href="derma_v5i3e39143_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 172 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Quantitative evaluation details.</p>
        <media xlink:href="derma_v5i3e39143_app3.pdf" xlink:title="PDF File  (Adobe PDF File), 227 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">BRISQUE</term>
          <def>
            <p>blind referenceless image spatial quality evaluator</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">DB</term>
          <def>
            <p>deep blending</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">DL</term>
          <def>
            <p>deep learning</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">FPR</term>
          <def>
            <p>false positive rate</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">GAN</term>
          <def>
            <p>generative adversarial network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">ISIC</term>
          <def>
            <p>International Skin Imaging Collaboration</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">ITA</term>
          <def>
            <p>individual typology angle</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">NSERC</term>
          <def>
            <p>Natural Sciences and Engineering Research Council of Canada</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">SSIM</term>
          <def>
            <p>structural similarity index measure</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">ST</term>
          <def>
            <p>style transfer</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">TPR</term>
          <def>
            <p>true positive rate</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">VGG</term>
          <def>
            <p>visual geometry group</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">VTT</term>
          <def>
            <p>visual Turing test</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>All authors contributed equally to this work. This research is funded by the Natural Sciences and Engineering Research Council of Canada (NSERC). We would like to thank Dr Irene Lara-Corrales and Dr Mohamed Hegazy for their input on the qualitative assessment of the generated images and for distributing it to their dermatology departments.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tessier</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>White lens of medicine: lack of diversity in dermatology hurts people of color</article-title>
          <source>Ms Magazine</source>
          <year>2020</year>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://msmagazine.com/2020/07/27/white-lens-of-medicine-lack-of-diversity-in-dermatology-hurts-people-of-color/">https://msmagazine.com/2020/07/27/white-lens-of-medicine-lack-of-diversity-in-dermatology-hurts-people-of-color/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adelekun</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Onyekaba</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lipoff</surname>
              <given-names>JB</given-names>
            </name>
          </person-group>
          <article-title>Skin color in dermatology textbooks: An updated evaluation and analysis</article-title>
          <source>J Am Acad Dermatol</source>
          <year>2021</year>
          <month>01</month>
          <volume>84</volume>
          <issue>1</issue>
          <fpage>194</fpage>
          <lpage>196</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jaad.2020.04.084</pub-id>
          <pub-id pub-id-type="medline">32335181</pub-id>
          <pub-id pub-id-type="pii">S0190-9622(20)30700-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marchetti</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Liopyris</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dusza</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Codella</surname>
              <given-names>NCF</given-names>
            </name>
            <name name-style="western">
              <surname>Gutman</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Helba</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kalloo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Halpern</surname>
              <given-names>AC</given-names>
            </name>
            <collab>International Skin Imaging Collaboration</collab>
          </person-group>
          <article-title>Computer algorithms show potential for improving dermatologists' accuracy to diagnose cutaneous melanoma: Results of the International Skin Imaging Collaboration 2017</article-title>
          <source>J Am Acad Dermatol</source>
          <year>2020</year>
          <month>03</month>
          <volume>82</volume>
          <issue>3</issue>
          <fpage>622</fpage>
          <lpage>627</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31306724"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jaad.2019.07.016</pub-id>
          <pub-id pub-id-type="medline">31306724</pub-id>
          <pub-id pub-id-type="pii">S0190-9622(19)32373-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC7006718</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haenssle</surname>
              <given-names>HA</given-names>
            </name>
            <name name-style="western">
              <surname>Fink</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Toberer</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stolz</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Deinlein</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hofmann-Wellenhof</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lallas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Emmert</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Buhl</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zutt</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Blum</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Abassi</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tromme</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Tschandl</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Enk</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenberger</surname>
              <given-names>A</given-names>
            </name>
            <collab>Reader Study Level I and Level II Groups</collab>
          </person-group>
          <article-title>Man against machine reloaded: performance of a market-approved convolutional neural network in classifying a broad spectrum of skin lesions in comparison with 96 dermatologists working under less artificial conditions</article-title>
          <source>Ann Oncol</source>
          <year>2020</year>
          <month>01</month>
          <volume>31</volume>
          <issue>1</issue>
          <fpage>137</fpage>
          <lpage>143</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0923-7534(19)35468-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.annonc.2019.10.013</pub-id>
          <pub-id pub-id-type="medline">31912788</pub-id>
          <pub-id pub-id-type="pii">S0923-7534(19)35468-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Codella N</collab>
            <collab>Rotemberg V</collab>
            <collab>Tschandl P</collab>
            <collab>Celebi M E</collab>
            <collab>Dusza S</collab>
            <collab>Gutman D</collab>
            <collab>Helba B</collab>
            <collab>Kalloo A</collab>
            <collab>Liopyris K</collab>
            <collab>Marchetti M</collab>
            <collab>Kittler H</collab>
            <collab>Halpern A</collab>
          </person-group>
          <article-title>Skin lesion analysis toward melanoma detection 2018: A challenge hosted by the international skin imaging collaboration (ISIC)</article-title>
          <source>arXiv</source>
          <year>2019</year>
          <month>03</month>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1902.03368">http://arxiv.org/abs/1902.03368</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Phillips</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Marsden</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jaffe</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Matin</surname>
              <given-names>RN</given-names>
            </name>
            <name name-style="western">
              <surname>Wali</surname>
              <given-names>GN</given-names>
            </name>
            <name name-style="western">
              <surname>Greenhalgh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McGrath</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>James</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Ladoyanni</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Bewley</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Argenziano</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Palamaras</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Assessment of Accuracy of an Artificial Intelligence Algorithm to Detect Melanoma in Images of Skin Lesions</article-title>
          <source>JAMA Netw Open</source>
          <year>2019</year>
          <month>10</month>
          <day>02</day>
          <volume>2</volume>
          <issue>10</issue>
          <fpage>e1913436</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jamanetwork.com/journals/jamanetworkopen/fullarticle/10.1001/jamanetworkopen.2019.13436"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2019.13436</pub-id>
          <pub-id pub-id-type="medline">31617929</pub-id>
          <pub-id pub-id-type="pii">2752995</pub-id>
          <pub-id pub-id-type="pmcid">PMC6806667</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tschandl</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rosendahl</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Akay</surname>
              <given-names>BN</given-names>
            </name>
            <name name-style="western">
              <surname>Argenziano</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Blum</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>RP</given-names>
            </name>
            <name name-style="western">
              <surname>Cabo</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gourhant</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kreusch</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lallas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lapins</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Marghoob</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Menzies</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Neuber</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Paoli</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rabinovitz</surname>
              <given-names>HS</given-names>
            </name>
            <name name-style="western">
              <surname>Rinner</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Scope</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Soyer</surname>
              <given-names>HP</given-names>
            </name>
            <name name-style="western">
              <surname>Sinz</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zalaudek</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kittler</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Expert-Level Diagnosis of Nonpigmented Skin Cancer by Combined Convolutional Neural Networks</article-title>
          <source>JAMA Dermatol</source>
          <year>2019</year>
          <month>01</month>
          <day>01</day>
          <volume>155</volume>
          <issue>1</issue>
          <fpage>58</fpage>
          <lpage>65</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/30484822"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamadermatol.2018.4378</pub-id>
          <pub-id pub-id-type="medline">30484822</pub-id>
          <pub-id pub-id-type="pii">2716294</pub-id>
          <pub-id pub-id-type="pmcid">PMC6439580</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thomas</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Lefevre</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Baxter</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hamilton</surname>
              <given-names>NA</given-names>
            </name>
          </person-group>
          <article-title>Interpretable deep learning systems for multi-class segmentation and classification of non-melanoma skin cancer</article-title>
          <source>Med Image Anal</source>
          <year>2021</year>
          <month>02</month>
          <volume>68</volume>
          <fpage>101915</fpage>
          <pub-id pub-id-type="doi">10.1016/j.media.2020.101915</pub-id>
          <pub-id pub-id-type="medline">33260112</pub-id>
          <pub-id pub-id-type="pii">S1361-8415(20)30279-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Eng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Way</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Bui</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kanada</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>de Oliveira Marinho</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gallegos</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gabriele</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Natarajan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Hofmann-Wellenhof</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>GS</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Ai</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Dunn</surname>
              <given-names>RC</given-names>
            </name>
            <name name-style="western">
              <surname>Coz</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A deep learning system for differential diagnosis of skin diseases</article-title>
          <source>Nat Med</source>
          <year>2020</year>
          <month>06</month>
          <volume>26</volume>
          <issue>6</issue>
          <fpage>900</fpage>
          <lpage>908</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-020-0842-3</pub-id>
          <pub-id pub-id-type="medline">32424212</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-020-0842-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Eun Chang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Chae</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Huh</surname>
              <given-names>CH</given-names>
            </name>
            <name name-style="western">
              <surname>Na</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Augmented Intelligence Dermatology: Deep Neural Networks Empower Medical Professionals in Diagnosing Skin Cancer and Predicting Treatment Options for 134 Skin Disorders</article-title>
          <source>J Invest Dermatol</source>
          <year>2020</year>
          <month>09</month>
          <volume>140</volume>
          <issue>9</issue>
          <fpage>1753</fpage>
          <lpage>1761</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0022-202X(20)30136-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jid.2020.01.019</pub-id>
          <pub-id pub-id-type="medline">32243882</pub-id>
          <pub-id pub-id-type="pii">S0022-202X(20)30136-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Imaizumi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Watanabe</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hirano</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Takemura</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kashiwagi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Monobe</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Hippocra: doctor-to-doctor teledermatology consultation service towards future AI-based diagnosis system in Japan</article-title>
          <year>2017</year>
          <conf-name>IEEE International Conference on Consumer Electronics</conf-name>
          <conf-date>June 12-14</conf-date>
          <conf-loc>Taipei, Taiwan</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>51</fpage>
          <lpage>52</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Na</surname>
              <given-names>JI</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Deep neural networks show an equivalent and often superior performance to dermatologists in onychomycosis diagnosis: Automatic construction of onychomycosis datasets by region-based convolutional deep neural network</article-title>
          <source>PLoS One</source>
          <year>2018</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>e0191493</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0191493"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0191493</pub-id>
          <pub-id pub-id-type="medline">29352285</pub-id>
          <pub-id pub-id-type="pii">PONE-D-17-25566</pub-id>
          <pub-id pub-id-type="pmcid">PMC5774804</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schlessinger</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Chhor</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gevaert</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Swetter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Novoa</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and dermatology: opportunities, challenges, and future directions</article-title>
          <source>Semin Cutan Med Surg</source>
          <year>2019</year>
          <month>03</month>
          <day>01</day>
          <volume>38</volume>
          <issue>1</issue>
          <fpage>E31</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.12788/j.sder.2019</pub-id>
          <pub-id pub-id-type="medline">31051021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Myers</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Thibodeaux</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Brownstone</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liao</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Machine Learning in Dermatology: Current Applications, Opportunities, and Limitations</article-title>
          <source>Dermatol Ther (Heidelb)</source>
          <year>2020</year>
          <month>06</month>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>365</fpage>
          <lpage>386</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32253623"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13555-020-00372-0</pub-id>
          <pub-id pub-id-type="medline">32253623</pub-id>
          <pub-id pub-id-type="pii">10.1007/s13555-020-00372-0</pub-id>
          <pub-id pub-id-type="pmcid">PMC7211783</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Han</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>SE</given-names>
            </name>
          </person-group>
          <article-title>Classification of the Clinical Images for Benign and Malignant Cutaneous Tumors Using a Deep Learning Algorithm</article-title>
          <source>J Invest Dermatol</source>
          <year>2018</year>
          <month>07</month>
          <volume>138</volume>
          <issue>7</issue>
          <fpage>1529</fpage>
          <lpage>1538</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0022-202X(18)30111-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jid.2018.01.028</pub-id>
          <pub-id pub-id-type="medline">29428356</pub-id>
          <pub-id pub-id-type="pii">S0022-202X(18)30111-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Navarrete-Dechent</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Dusza</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Liopyris</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Marghoob</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Halpern</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Marchetti</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Automated Dermatological Diagnosis: Hype or Reality?</article-title>
          <source>J Invest Dermatol</source>
          <year>2018</year>
          <month>10</month>
          <volume>138</volume>
          <issue>10</issue>
          <fpage>2277</fpage>
          <lpage>2279</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0022-202X(18)31991-2"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jid.2018.04.040</pub-id>
          <pub-id pub-id-type="medline">29864435</pub-id>
          <pub-id pub-id-type="pii">S0022-202X(18)31991-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC7701995</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <source>International Skin Image Collaboration</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.isic-archive.com/">https://www.isic-archive.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rahman</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Hossain</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Islam</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Hasan</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Hridhee</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>An approach for multiclass skin lesion classification based on ensemble learning</article-title>
          <source>Informatics in Medicine Unlocked</source>
          <year>2021</year>
          <volume>25</volume>
          <fpage>100659</fpage>
          <pub-id pub-id-type="doi">10.1016/j.imu.2021.100659</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hasan</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Elahi</surname>
              <given-names>MTE</given-names>
            </name>
            <name name-style="western">
              <surname>Alam</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Jawad</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Martí</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>DermoExpert: Skin lesion classification using a hybrid convolutional neural network through segmentation, transfer learning, and augmentation</article-title>
          <source>Informatics in Medicine Unlocked</source>
          <year>2022</year>
          <volume>28</volume>
          <fpage>100819</fpage>
          <pub-id pub-id-type="doi">10.1016/j.imu.2021.100819</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kinyanjui</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Odonga</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cintas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Codella</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Panda</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sattigeri</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Varshney</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Estimating skin tone and effects on classification performance in dermatology datasets</article-title>
          <year>2019</year>
          <conf-name>NeurIPS 2019 Workshop on Fair ML for Health</conf-name>
          <conf-date>December 14</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rezk</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Eltorki</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>El-Dakhakhni</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Leveraging Artificial Intelligence to Improve the Diversity of Dermatological Skin Color Pathology: Protocol for an Algorithm Development and Validation Study</article-title>
          <source>JMIR Res Protoc</source>
          <year>2022</year>
          <month>03</month>
          <day>08</day>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>e34896</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchprotocols.org/2022/3/e34896/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/34896</pub-id>
          <pub-id pub-id-type="medline">34983017</pub-id>
          <pub-id pub-id-type="pii">v11i3e34896</pub-id>
          <pub-id pub-id-type="pmcid">PMC8941446</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oakley</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>DermNet NZ</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dermnetnz.org/">https://dermnetnz.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Silva</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Dermatology Atlas</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.atlasdermatologico.com.br/index.jsf">http://www.atlasdermatologico.com.br/index.jsf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gatys</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Ecker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bethge</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Image style transfer using convoluational neural network</article-title>
          <year>2016</year>
          <conf-name>The IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>2016 June 27-30</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>2414</fpage>
          <lpage>2423</lpage>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.265</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Simonyan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zisserman</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Very deep convolutional networks for large-scale image recognition</article-title>
          <year>2015</year>
          <conf-name>The 3rd International Conference on Learning Representations</conf-name>
          <conf-date>May 7-9</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Donahue</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Vinyals</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffman</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tzeng</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Darrell</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>DeCAF: a deep convolutional activation feature for generic visual recognition</article-title>
          <year>2014</year>
          <conf-name>The 31st International Conference on Machine Learning</conf-name>
          <conf-date>Jun 21-26</conf-date>
          <conf-loc>Beijing, China</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pérez</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gangnet</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Blake</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Poisson image editing</article-title>
          <source>ACM Trans. Graph</source>
          <year>2003</year>
          <month>07</month>
          <volume>22</volume>
          <issue>3</issue>
          <fpage>313</fpage>
          <lpage>318</lpage>
          <pub-id pub-id-type="doi">10.1145/882262.882269</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep image blending</article-title>
          <year>2020</year>
          <conf-name>The IEEE Winter Conference on Applications of Computer Vision</conf-name>
          <conf-date>March 1-5</conf-date>
          <conf-loc>Snowmass Village, CO</conf-loc>
          <pub-id pub-id-type="doi">10.1109/wacv45572.2020.9093632</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lionnie</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Alaydrus</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A comparison of human skin color detection for biometrie identification</article-title>
          <year>2017</year>
          <conf-name>The International Conference on Broadband Communication, Wireless Sensors and Powering</conf-name>
          <conf-date>Nov 21-23</conf-date>
          <conf-loc>Jakarta, Indonesia</conf-loc>
          <pub-id pub-id-type="doi">10.1109/bcwsp.2017.8272565</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Tanaka</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Akimoto</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Utilization of individual typology angle (ITA) and hue angle in the measurement of skin color on images</article-title>
          <source>Bioimages</source>
          <year>2020</year>
          <volume>28</volume>
          <fpage>1</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.11169/bioimages.28.1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="web">
          <source>ShutterStock</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.shutterstock.com/home">https://www.shutterstock.com/home</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mittal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Moorthy</surname>
              <given-names>AK</given-names>
            </name>
            <name name-style="western">
              <surname>Bovik</surname>
              <given-names>AC</given-names>
            </name>
          </person-group>
          <article-title>No-reference image quality assessment in the spatial domain</article-title>
          <source>IEEE Trans Image Process</source>
          <year>2012</year>
          <volume>21</volume>
          <fpage>4695</fpage>
          <lpage>4708</lpage>
          <pub-id pub-id-type="doi">10.1109/TIP.2012.2214050</pub-id>
          <pub-id pub-id-type="medline">22910118</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharifi</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Leon-Garcia</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Estimation of shape parameter for generalized Gaussian distributions in subband decompositions of video</article-title>
          <source>IEEE Trans Circuits Syst Video Technol</source>
          <year>1995</year>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>52</fpage>
          <lpage>56</lpage>
          <pub-id pub-id-type="doi">10.1109/76.350779</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lasmar</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Stitou</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Berthoumieu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Multiscale skewed heavy tailed model for texture analysis</article-title>
          <year>2009</year>
          <conf-name>The 16th IEEE International Conference on Image Processing</conf-name>
          <conf-date>Nov 7-10</conf-date>
          <conf-loc>Cairo, Egypt</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <pub-id pub-id-type="doi">10.1109/icip.2009.5414404</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Renieblas</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Nogués</surname>
              <given-names>Agustín Turrero</given-names>
            </name>
            <name name-style="western">
              <surname>González</surname>
              <given-names>Alberto Muñoz</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez-Leon</surname>
              <given-names>Nieves</given-names>
            </name>
            <name name-style="western">
              <surname>Del Castillo</surname>
              <given-names>EG</given-names>
            </name>
          </person-group>
          <article-title>Structural similarity index family for image quality assessment in radiological images</article-title>
          <source>J Med Imaging (Bellingham)</source>
          <year>2017</year>
          <month>07</month>
          <volume>4</volume>
          <issue>3</issue>
          <fpage>035501</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28924574"/>
          </comment>
          <pub-id pub-id-type="doi">10.1117/1.JMI.4.3.035501</pub-id>
          <pub-id pub-id-type="medline">28924574</pub-id>
          <pub-id pub-id-type="pii">17059R</pub-id>
          <pub-id pub-id-type="pmcid">PMC5527267</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shahsavari</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ranjbari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Khatibi</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Proposing a novel Cascade Ensemble Super Resolution Generative Adversarial Network (CESR-GAN) method for the reconstruction of super-resolution skin lesion images</article-title>
          <source>Informatics in Medicine Unlocked</source>
          <year>2021</year>
          <volume>24</volume>
          <fpage>100628</fpage>
          <pub-id pub-id-type="doi">10.1016/j.imu.2021.100628</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Habtzghi</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Midha</surname>
              <given-names>CK</given-names>
            </name>
            <name name-style="western">
              <surname>Das</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Modified Clopper-Pearson Confidence Interval for Binomial Proportion</article-title>
          <source>Journal of Statistical Theory and Applications</source>
          <year>2014</year>
          <volume>13</volume>
          <issue>4</issue>
          <fpage>296</fpage>
          <lpage>310</lpage>
          <pub-id pub-id-type="doi">10.2991/jsta.2014.13.4.3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shorten</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Khoshgoftaar</surname>
              <given-names>TM</given-names>
            </name>
          </person-group>
          <article-title>A survey on Image Data Augmentation for Deep Learning</article-title>
          <source>J Big Data</source>
          <year>2019</year>
          <volume>6</volume>
          <pub-id pub-id-type="doi">10.1186/s40537-019-0197-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <year>2016</year>
          <conf-name>The IEEE conference on computer vision and pattern recognition</conf-name>
          <conf-date>Jun 27-30</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zuo</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Su</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The Classification of Six Common Skin Diseases Based on Xiangya-Derm: Development of a Chinese Database for Artificial Intelligence</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <month>09</month>
          <day>21</day>
          <volume>23</volume>
          <issue>9</issue>
          <fpage>e26025</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/9/e26025/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/26025</pub-id>
          <pub-id pub-id-type="medline">34546174</pub-id>
          <pub-id pub-id-type="pii">v23i9e26025</pub-id>
          <pub-id pub-id-type="pmcid">PMC8493463</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>He</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shi</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jian</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Xie</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yeh</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A Novel Convolutional Neural Network for the Diagnosis and Classification of Rosacea: Usability Study</article-title>
          <source>JMIR Med Inform</source>
          <year>2021</year>
          <month>03</month>
          <day>15</day>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>e23415</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2021/3/e23415/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/23415</pub-id>
          <pub-id pub-id-type="medline">33720027</pub-id>
          <pub-id pub-id-type="pii">v9i3e23415</pub-id>
          <pub-id pub-id-type="pmcid">PMC8077711</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kingma</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ba</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Adam: A method for stochastic optimization</article-title>
          <year>2015</year>
          <conf-name>The 3rd International Conference on Learning Representations</conf-name>
          <conf-date>May 7-9</conf-date>
          <conf-loc>San Diego, CA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bae</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Iyengar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Demystifying learning rate policies for high accuracy training of deep neural networks</article-title>
          <year>2019</year>
          <conf-name>The IEEE International Conference on Big Data</conf-name>
          <conf-date>Dec 9-12</conf-date>
          <conf-loc>Los Angeles, CA</conf-loc>
          <publisher-name>IEEE</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chollet</surname>
              <given-names>F</given-names>
            </name>
            <collab>Others</collab>
          </person-group>
          <article-title>Keras</article-title>
          <source>GitHub</source>
          <year>2015</year>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/fchollet/keras">https://github.com/fchollet/keras</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abadi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Agarwal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Barham</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brevdo</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Citro</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Corrado</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>A</given-names>
            </name>
            <collab>Others</collab>
          </person-group>
          <article-title>TensorFlow: large-scale machine learning on heterogeneous distributed systems</article-title>
          <source>TensorFlow</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.tensorflow.org/">https://www.tensorflow.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DeLong</surname>
              <given-names>ER</given-names>
            </name>
            <name name-style="western">
              <surname>DeLong</surname>
              <given-names>DM</given-names>
            </name>
            <name name-style="western">
              <surname>Clarke-Pearson</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Comparing the areas under two or more correlated receiver operating characteristic curves: a nonparametric approach</article-title>
          <source>Biometrics</source>
          <year>1988</year>
          <month>09</month>
          <volume>44</volume>
          <issue>3</issue>
          <fpage>837</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="medline">3203132</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Doshi</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Improving skin tone representation across Google</article-title>
          <source>Google AI</source>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://blog.google/products/search/monk-skin-tone-scale/">https://blog.google/products/search/monk-skin-tone-scale/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baur</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Albarqouni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Stoyanov</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Generating highly realistic images of skin lesions with GANs</article-title>
          <source>OR 2.0 Context-Aware Operating Theaters, Computer Assisted Robotic Endoscopy, Clinical Image-Based Procedures,Skin Image Analysis. CARE 2018, CLIP 2018, OR 2.0 2018, ISIC 2018. Lecture Notes in Computer Science</source>
          <year>2018</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>260</fpage>
          <lpage>267</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baur</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Albarqouni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Navab</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>MelanoGANs: high resolution skin lesion synthesis with GANs</article-title>
          <source>arXiv</source>
          <year>2018</year>
          <access-date>2022-08-11</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.48550/arXiv.1804.04338">https://doi.org/10.48550/arXiv.1804.04338</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghorbani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Natarajan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Coz</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>DermGAN: Synthetic generation of clinical skin images with pathology</article-title>
          <year>2019</year>
          <conf-name>NeurIPS workshop Machine Learning for Health (ML4H)</conf-name>
          <conf-date>Dec 13</conf-date>
          <conf-loc>Vancouver, BC</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bradford</surname>
              <given-names>PT</given-names>
            </name>
          </person-group>
          <article-title>Skin cancer in skin of color</article-title>
          <source>Dermatol Nurs</source>
          <year>2009</year>
          <volume>21</volume>
          <issue>4</issue>
          <fpage>170</fpage>
          <lpage>178</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/19691228"/>
          </comment>
          <pub-id pub-id-type="medline">19691228</pub-id>
          <pub-id pub-id-type="pmcid">PMC2757062</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
