<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JDERM</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Dermatol</journal-id>
      <journal-title>JMIR Dermatology</journal-title>
      <issn pub-type="epub">2562-0959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v3i1e18438</article-id>
      <article-id pub-id-type="pmid"/>
      <article-id pub-id-type="doi">10.2196/18438</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Skin Lesion Classification With Deep Convolutional Neural Network: Process Development and Validation</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Eysenbach</surname>
            <given-names>Gunther</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chitranshi</surname>
            <given-names>Anany</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Ray</surname>
            <given-names>Keya</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes" equal-contrib="yes">
          <name name-style="western">
            <surname>Ray</surname>
            <given-names>Arnab</given-names>
          </name>
          <degrees>BTech</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>SRM Institute of Science and Technology</institution>
            <addr-line>SRM Nagar, Kattankulathur</addr-line>
            <addr-line>Chennai, 603203</addr-line>
            <country>India</country>
            <phone>91 8939336693</phone>
            <email>ad733943@gmail.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4117-6577</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Gupta</surname>
            <given-names>Aman</given-names>
          </name>
          <degrees>BTech</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0725-2166</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Al</surname>
            <given-names>Amutha</given-names>
          </name>
          <degrees>MEng</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3236-2447</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>SRM Institute of Science and Technology</institution>
        <addr-line>Chennai</addr-line>
        <country>India</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Arnab Ray <email>ad733943@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jan-Dec</season>
        <year>2020</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>7</day>
        <month>5</month>
        <year>2020</year>
      </pub-date>
      <volume>3</volume>
      <issue>1</issue>
      <elocation-id>e18438</elocation-id>
      <history>
        <date date-type="received">
          <day>26</day>
          <month>2</month>
          <year>2020</year>
        </date>
        <date date-type="rev-request">
          <day>3</day>
          <month>3</month>
          <year>2020</year>
        </date>
        <date date-type="rev-recd">
          <day>3</day>
          <month>3</month>
          <year>2020</year>
        </date>
        <date date-type="accepted">
          <day>21</day>
          <month>3</month>
          <year>2020</year>
        </date>
      </history>
      <copyright-statement>©Arnab Ray, Aman Gupta, Amutha Al. Originally published in JMIR Dermatology (http://derma.jmir.org), 07.05.2020.</copyright-statement>
      <copyright-year>2020</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Dermatology Research, is properly cited. The complete bibliographic information, a link to the original publication on http://derma.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="http://derma.jmir.org/2020/1/e18438/" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Skin cancer is the most common cancer and is often ignored by people at an early stage. There are 5.4 million new cases of skin cancer worldwide every year. Deaths due to skin cancer could be prevented by early detection of the mole.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We propose a skin lesion classification system that has the ability to detect such moles at an early stage and is able to easily differentiate between a cancerous and noncancerous mole. Using this system, we would be able to save time and resources for both patients and practitioners.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We created a deep convolutional neural network using an Inceptionv3 and DenseNet-201 pretrained model.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>We found that using the concepts of fine-tuning and the ensemble learning model yielded superior results. Furthermore, fine-tuning the whole model helped models converge faster compared to fine-tuning only the top layers, giving better accuracy overall.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Based on our research, we conclude that deep learning algorithms are highly suitable for classifying skin cancer images.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>deep convolutional neural network</kwd>
        <kwd>VGG16, Inceptionv3</kwd>
        <kwd>Inception ResNet V2</kwd>
        <kwd>DenseNet</kwd>
        <kwd>skin cancer</kwd>
        <kwd>cancer</kwd>
        <kwd>neural network</kwd>
        <kwd>machine learning</kwd>
        <kwd>melanoma</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Skin Cancer</title>
        <p>One in every three cancers diagnosed is skin cancer. Although melanomas represent fewer than 5% of all skin cancers, they account for approximately 75% of all skin cancer–related deaths and are responsible for over 10,000 deaths annually. Early detection of the mole would decrease the number of skin cancer deaths.</p>
        <p>Skin cancer is significantly lower in India due to the presence of eumelanin in India’s dark-skinned population, which provides some protection against the development of skin cancer. Still, skin cancer constituted 3.18% of all patients with cancer in India. Of this, 54.76% were basal cell carcinomas, while 36.91% were squamous cell carcinoma and malignant melanoma was only 8.33%. The majority of patients were from rural areas (88%) and many were involved in agriculture (92%) [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      </sec>
      <sec>
        <title>Neural Networks in the Context of Skin Cancer</title>
        <p>We searched for research papers that used neural networks in the context of skin cancer from Google Scholar, PubMed, Research Gate, and the ISIC (International Skin Imaging Collaboration) archive. We included the results in the literature survey. Deep learning has solved many complex modern problems. The increasing amount of data on the internet helps in this process. There is a huge improvement in image classification using convolutional neural networks (CNN). The first few layers of deep CNN (DCNN) can learn the general features of an image, which can be used for different models. Using fine-tuning, DCNN models trained on one data set can be reused for image classification of other data sets. By fine-tuning Inceptionv3, Esteva et al [<xref ref-type="bibr" rid="ref2">2</xref>] proposed that, “CNN achieves performance on par with all tested experts, demonstrating an artificial intelligence capable of classifying skin cancer with a level of competence comparable to dermatologists”. Esteva and colleagues used their own obtained dermatologist-labelled data set consisting of 129,450 clinical images, including 3374 dermoscopy images. This data set includes 2032 skin diseases, belonging to 9 skin disease partitions. By fine-tuning Inceptionv3 on this data set, Esteva and colleagues achieved up to 66% accuracy classification on these 9 classes.</p>
        <p>Another previously published study that used DCNN used AlexNet [<xref ref-type="bibr" rid="ref3">3</xref>]. The data set consisted of 200 pictures. However, by image augmentation (ie, rotating all the pictures), 4400 images were made. This study used the transfer learning model, in which the AlexNet model was trained on ImageNet data, and the last layer was replaced with the softmax layer that is classified into melanoma, seborrheic keratosis, and nevus. For the change of weights, they used the stochastic gradient descent (SGD) algorithmic program. They were able to achieve an accuracy of 98%.</p>
        <p>In another study, the authors planned a mechanized strategy for malignant melanoma determination connected to an arrangement of dermoscopy photos [<xref ref-type="bibr" rid="ref4">4</xref>]. Highlights removed relied upon using a multilayer perceptron (MLP) classifier and coevent network to distinguish between melanocytic nevi and melanoma. The authors proposed two different procedures for MLP: programmed MLP and conventional MLP. Both techniques were useful for the separation of melanocytic carcinoma with a high accuracy. Following this, the arrangement procedure was executed with an MLP classifier that involved two strategies: automatic MLP and traditional MLP. The MLP classifier displayed distinctive grouping accuracy. The programmed MLP planned 93.4% and 76% training and testing accuracy, respectively.</p>
        <p>A different study used a model that uses support vector machine (SVM) learning algorithms [<xref ref-type="bibr" rid="ref5">5</xref>]. Their model did not use annotated information. The feature transfer that they used allowed the system to draw similarities between observations of dermoscopic pictures and that of the natural world. It mimics the method specialists use to explain patterns in skin lesions. Two-fold cross-validation was performed 20 times for analysis (40 experiments in total), and two discrimination tasks were examined: malignant melanoma versus atypical lesions, and malignant melanoma versus all nonmelanoma lesions. This approach achieved an accuracy of 93.1% for the primary task and 73.9% accuracy for the second task.</p>
        <p>In another study, authors designed and modelled a system that can collect and combine past pigmented skin lesion (PSL) image results, their analysis, and corresponding observations and conclusions by medical experts, using a prototyping methodology [<xref ref-type="bibr" rid="ref6">6</xref>]. One area of the system used computational intelligence techniques to research, process, and classify the images and their probable morphology. Trained medical personnel in remote locations can use mobile knowledge acquisition devices to take pictures of PSL and input the pictures into the planned system, which would classify the imaged PSL as malignant or benign.</p>
        <p>Another group used a similar concept using DCNN. They trained their model on a data set of 129,450 images. They used the Inceptionv3 architecture model and classified images among 757 different melanoma classes. The accuracy achieved was 72%; this value was relatively low due to the high number of classes in this data set [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>Another study used lesion segmentation as the first step of processing [<xref ref-type="bibr" rid="ref7">7</xref>]. They identified morphological features specific to certain lesions. Preprocessing steps included changing the color channel, smoothing the image, removing hairs, etc. They modelled the algorithm as a binary classification model (ie, benign or malignant). Lesion-related morphological features (including diameter, color, and magnification) were used as the input to a number of classifiers. The best accuracy (79%) was found with the k-nearest neighbors (KNN) algorithm.</p>
        <p>In this project, we used the HAM10000 data set obtained by ViDIR Group, Department of Dermatology, Medical University of Vienna. <xref rid="figure1" ref-type="fig">Figure 1</xref> shows example images from the data set that was used for this study.</p>
        <p>In this study, we fine-tuned DCNNs and compared the performance of 4 DCNNs: VGG16, Inception-ResNet V2, Inceptionv3, and DenseNet-201. Each DCNN was fine-tuned from the top layers. Fine-tuning of all layers was performed with Inceptionv3 and DenseNet-201. Finally, we created an ensemble of Inceptionv3 and DenseNet-201 with all layers fine-tuned.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Example lesion photos from the HAM10000 data set (ViDIR Group, Department of Dermatology, Medical University of Vienna).</p>
          </caption>
          <graphic xlink:href="derma_v3i1e18438_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Exploratory Data Analysis</title>
        <p>This step was performed to better understand the data and prepare the data for neural networks. In this project, we used the HAM10000 data set obtained by ViDIR Group, Department of Dermatology, Medical University of Vienna. The diagnostic accuracy for melanoma was significantly higher with dermoscopy compared to unaided eye diagnosis (respectively, log OR 4.0 [95% CI 3.0-5.1] versus log OR 2.7 [95% CI 1.9-3.4], an improvement of 49%, <italic>P</italic>&lt;.001) [<xref ref-type="bibr" rid="ref8">8</xref>]. The diagnostic accuracy solely depended on the experience and knowledge of the examiner.</p>
        <p>We observed that this data set is biased toward melanocytic nevi, as seen in <xref ref-type="table" rid="table1">Table 1</xref>. Hence, in the worst-case scenario, our neural network model will have an accuracy higher than 60%.</p>
        <p>All the original images (450×600 pixels) were resized to 64×4-pixel RGB images for the baseline model and 192×256 pixels for fine-tuning models. The data set was split into 7210 training examples, 1803 validation examples, and 1002 test examples.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Counts for each type of lesion in the data set</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>Type of lesion</td>
                <td>Number of images</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Melanocytic nevi</td>
                <td>6705</td>
              </tr>
              <tr valign="top">
                <td>Melanoma</td>
                <td>1113</td>
              </tr>
              <tr valign="top">
                <td>Benign keratosis</td>
                <td>1099</td>
              </tr>
              <tr valign="top">
                <td>Basal cell carcinoma</td>
                <td>514</td>
              </tr>
              <tr valign="top">
                <td>Actinic keratoses</td>
                <td>325</td>
              </tr>
              <tr valign="top">
                <td>Vascular lesions</td>
                <td>142</td>
              </tr>
              <tr valign="top">
                <td>Dermatofibroma</td>
                <td>115</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Baseline Model</title>
        <p>We built a baseline CNN to estimate the difficulty of the problem. Our architecture consisted of 6 layers: (1) a convolutional layer with 16 kernels each of size 3 and padding such that the size of the image is maintained, (2) a max-pooling layer with 2×2 window, (3) a convolutional layer with 32 kernels each of size 3 and padding to maintain size, (4) a max-pooling layer with 2×2 window, (5) a convolutional layer with 64 kernels each of size 3 and padding to maintain size, and (6) a max-pooling layer with 2×2 window.</p>
        <p>To train the model, data augmentation was required. The learning rate was initialized at 0.01 and Adam Optimizer was used. The baseline model was trained for a total of 35 epochs.</p>
      </sec>
      <sec>
        <title>VGG16 Model</title>
        <p>VGG16 is a convolutional neural net architecture (<xref rid="figure2" ref-type="fig">Figure 2</xref> [<xref ref-type="bibr" rid="ref9">9</xref>]) that won the ImageNet competition in 2014 and is generally regarded as one of the best current vision models architecture. Even though it is an old model, we chose VGG16 because of its simplicity.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>VGG16 architecture.</p>
          </caption>
          <graphic xlink:href="derma_v3i1e18438_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>On the ImageNet data set, VGG16 achieved an accuracy of 90.1% for top-5 and 71.3% for top-1.</p>
        <p>Data augmentation was performed to increase the data set image count. Fine-tuning was performed on the model by removing the top, fully-connected layers that were then replaced with following: (1) a max-pooling layer, (2) a fully connected layer with 512 units, (3) a dropout layer with 0.5 rate, and (4) a softmax activation layer for 7 types of skin lesions.</p>
        <p>The first step included freezing all layers in VGG16 and performing feature extraction for newly added layers. After 3 epochs, we unfroze the final convolutional block of VGG16 and started fine-tuning a model for 20 epochs. The learning rate was set to 0.001 and Adam Optimizer was used. VGG16 was fine-tuned for a total of 30 epochs.</p>
      </sec>
      <sec>
        <title>Inception Model</title>
        <p>Inceptionv3 produced an accuracy of 93.7% for top-5 and 77.9% for top-1 on the ImageNet data set. The Inception module has 1×1, 3×3, and 5×5 convolutions, all in parallel (<xref rid="figure3" ref-type="fig">Figure 3</xref> [<xref ref-type="bibr" rid="ref10">10</xref>]). The intention was to let the network decide, through training, what information would be learned and used. It also allows for multi-scale processing; the model can recover low-level features via small convolutional layers and high-level features with large convolutional layers.</p>
        <p>We fine-tuned all layers of Inceptionv3 and the top two inception blocks with batch normalization layers. Inceptionv3 was fine-tuned for 20 epochs.</p>
        <p>Additionally, we tried Inception-ResNet, a variant of Inception. It uses a residual connection, which has become necessary for training very deep convolutional models. The same training strategy used for Inceptionv3 was used for Inception-ResNet.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Inceptionv3 architecture. Published with permission.</p>
          </caption>
          <graphic xlink:href="derma_v3i1e18438_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>DenseNet Model</title>
        <p>This is a new architecture that performed exceptionally well in the ImageNet data set competition, giving an accuracy of 93.6% in top-5 and 77.3% on top-1. DenseNet has 4 dense blocks and uses approximately 20 million parameters (<xref rid="figure4" ref-type="fig">Figure 4</xref> [<xref ref-type="bibr" rid="ref11">11</xref>]).</p>
        <p>In a dense block, one layer generates feature maps through a composite function, consisting of three consecutive operations: batch normalization, ReLU (rectified linear activation unit), and a 3×3 convolution. We used DenseNet-201, which uses 4 dense blocks, and we performed two types of fine-tuning on it: (1) fine-tuning on the last dense block (32 layers; Part A), and (2) fine-tuning on the whole network (Part B). Part A was trained for 27 epochs and Part B was trained for 20 epochs.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>DenseNet architecture. Published with permission.</p>
          </caption>
          <graphic xlink:href="derma_v3i1e18438_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p><xref ref-type="table" rid="table2">Table 2</xref> shows the classification results from each model when the top layers were fine-tuned (Part A). <xref ref-type="table" rid="table3">Table 3</xref> displays the classification results for each model when all layers were fine-tuned. All experiments were performed on a laptop with GPU NVIDIA 1050Ti. To speed up processing times, Google Colab (P100 GPU) was used.</p>
      <p>From training a custom model, it was clear that the problem cannot be solved by a simple CNN model with a few layers. Therefore, we incorporated fine-tuning of the pretrained model. By hypertuning the pretrained model that had over 100 layers, we achieved better results. Fine-tuning all layers (Part B) gave us better results than fine-tuning only the top layers (Part A). Crucially, Part B was trained for fewer epochs, which helped the model converge faster. However, in both cases, DenseNet gave us better results than Inceptionv3. Using the concepts of ensemble learning, we created an ensemble of Inceptionv3 and DenseNet-201. This combination achieved a further improved accuracy of 88.8% on the validation set and 88.5% on the test set.</p>
      <table-wrap position="float" id="table2">
        <label>Table 2</label>
        <caption>
          <p>Fine-tuning the top layers.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <col width="200"/>
          <thead>
            <tr valign="top">
              <td>Model</td>
              <td>Validation (%)</td>
              <td>Test (%)</td>
              <td>Test loss</td>
              <td>Depth (layers)</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Custom model</td>
              <td>77.48</td>
              <td>76.54</td>
              <td>0.646671</td>
              <td>11</td>
            </tr>
            <tr valign="top">
              <td>VGG16</td>
              <td>79.82</td>
              <td>79.64</td>
              <td>0.708</td>
              <td>23</td>
            </tr>
            <tr valign="top">
              <td>Inceptionv3</td>
              <td>79.935</td>
              <td>79.94</td>
              <td>0.7482</td>
              <td>315</td>
            </tr>
            <tr valign="top">
              <td>Inception-ResNet V2</td>
              <td>80.82</td>
              <td>82.53</td>
              <td>0.6691</td>
              <td>784</td>
            </tr>
            <tr valign="top">
              <td>DenseNet-201</td>
              <td>85.8</td>
              <td>83.9</td>
              <td>0.691</td>
              <td>711</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap position="float" id="table3">
        <label>Table 3</label>
        <caption>
          <p>Fine-tuning all layers.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="320"/>
          <col width="250"/>
          <col width="230"/>
          <col width="200"/>
          <thead>
            <tr valign="top">
              <td>Model</td>
              <td>Validation (%)</td>
              <td>Test (%)</td>
              <td>Test loss</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Inceptionv3</td>
              <td>86.92</td>
              <td>86.826</td>
              <td>0.6241</td>
            </tr>
            <tr valign="top">
              <td>DenseNet-201</td>
              <td>86.696</td>
              <td>87.725</td>
              <td>0.5587</td>
            </tr>
            <tr valign="top">
              <td>Ensemble (Inceptionv3 and DenseNet-201)</td>
              <td>88.8</td>
              <td>88.52</td>
              <td>0.41156</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>Our results indicate that deep learning algorithms are highly suitable for classifying skin cancer images. Additionally, by using the concepts of fine-tuning and the ensemble learning model, improved results were achieved. Finally, we found that fine-tuning the whole model helped the model converge faster compared with fine-tuning only the top layers, giving an overall better accuracy.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">DCNN</term>
          <def>
            <p>deep convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ISIC</term>
          <def>
            <p>International Skin Imaging Collaboration</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">KNN</term>
          <def>
            <p>k-nearest neighbor</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">MLP</term>
          <def>
            <p>multilayer perceptron</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">PSL</term>
          <def>
            <p>pigmented skin lesion</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lal</surname>
              <given-names>ST</given-names>
            </name>
            <name name-style="western">
              <surname>Banipal</surname>
              <given-names>RP</given-names>
            </name>
            <name name-style="western">
              <surname>Bhatti</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Yadav</surname>
              <given-names>HP</given-names>
            </name>
          </person-group>
          <article-title>Changing Trends of Skin Cancer: A Tertiary Care Hospital Study in Malwa Region of Punjab</article-title>
          <source>J Clin Diagn Res</source>
          <year>2016</year>
          <month>06</month>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>PC12</fpage>
          <lpage>5</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27504344"/>
          </comment>
          <pub-id pub-id-type="doi">10.7860/JCDR/2016/18487.8051</pub-id>
          <pub-id pub-id-type="medline">27504344</pub-id>
          <pub-id pub-id-type="pmcid">PMC4963704</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Esteva</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kuprel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Novoa</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Ko</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Swetter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Blau</surname>
              <given-names>HM</given-names>
            </name>
            <name name-style="western">
              <surname>Thrun</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Dermatologist-level classification of skin cancer with deep neural networks</article-title>
          <source>Nature</source>
          <year>2017</year>
          <month>1</month>
          <day>25</day>
          <volume>542</volume>
          <issue>7639</issue>
          <fpage>115</fpage>
          <lpage>118</lpage>
          <pub-id pub-id-type="doi">10.1038/nature21056</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hosny</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Kassem</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Foaud</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Skin Cancer Classification using Deep Learning and Transfer Learning</article-title>
          <year>2018</year>
          <conf-name>9th Cairo International Biomedical Engineering Conference (CIBEC)</conf-name>
          <conf-date>December 20-22</conf-date>
          <conf-loc>Cairo, Egypt</conf-loc>
          <fpage>90</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1109/CIBEC.2018.8641762</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>S.Mabrouk</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>A.Sheha</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sharawy</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Automatic Detection of Melanoma Skin Cancer using Texture Analysis</article-title>
          <source>IJCA</source>
          <year>2012</year>
          <month>03</month>
          <day>31</day>
          <volume>42</volume>
          <issue>20</issue>
          <fpage>22</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.5120/5817-8129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Codella</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Abedini</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Garnavi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Halpern</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Deep Learning, Sparse Coding, and SVM for Melanoma Recognition in Dermoscopy Images</article-title>
          <source>Machine Learning In Medical Imaging</source>
          <year>2015</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>118</fpage>
          <lpage>126</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Okuboyejo</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Olugbara</surname>
              <given-names>OO</given-names>
            </name>
            <name name-style="western">
              <surname>Odunaike</surname>
              <given-names>SA</given-names>
            </name>
          </person-group>
          <article-title>Automating Skin Disease Diagnosis Using Image Classification</article-title>
          <year>2013</year>
          <conf-name>Proceedings of the World Congress on Engineering and Computer Science</conf-name>
          <conf-date>October 23-25</conf-date>
          <conf-loc>San Francisco</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Codella</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Rotemberg</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Tschandl</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Celebi</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Dusza</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gutman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Helba</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kalloo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liopyris</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Marchetti</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kittler</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Halpern</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Skin lesion analysis toward melanoma detection: A challenge at the 2017 International Symposium on Biomedical Imaging (ISBI), hosted by the International Skin Imaging Collaboration (ISIC)</article-title>
          <year>2018</year>
          <conf-name>IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)</conf-name>
          <conf-date>April 4</conf-date>
          <conf-loc>Washington, DC, USA</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kittler</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Pehamberger</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wolff</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Binder</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Diagnostic accuracy of dermoscopy</article-title>
          <source>Lancet Oncol</source>
          <year>2002</year>
          <month>03</month>
          <volume>3</volume>
          <issue>3</issue>
          <fpage>159</fpage>
          <lpage>65</lpage>
          <pub-id pub-id-type="doi">10.1016/s1470-2045(02)00679-4</pub-id>
          <pub-id pub-id-type="medline">11902502</pub-id>
          <pub-id pub-id-type="pii">S1470204502006794</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <source>VGG16 architecture</source>
          <year>2020</year>
          <month>06</month>
          <day>02</day>
          <comment>Classez et segmentez des données visuelles <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openclassrooms.com/fr/courses/4470531-classez-et-segmentez-des-donnees-visuelles/5097666-tp-implementez-votre-premier-reseau-de-neurones-avec-keras">https://tinyurl.com/ya49hbpk</ext-link> </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Szegedy </surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sermanet</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Reed</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Anguelov</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Erhan</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vanhoucke</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Rabinovich</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Going deeper with convolutions</article-title>
          <source>2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</source>
          <year>2015</year>
          <month>06</month>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/1409.4842"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>van der Maaten</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Weinberger</surname>
              <given-names>KQ</given-names>
            </name>
          </person-group>
          <article-title>Densely connected convolutional networks</article-title>
          <source>arXiv</source>
          <year>2016</year>
          <month>08</month>
          <day>25</day>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/1608.06993"/>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
