|
106 | 106 | - Matplotlib version 3.3.4, since Captum currently uses a Matplotlib
|
107 | 107 | function whose arguments have been renamed in later versions
|
108 | 108 |
|
109 |
| -To install Captum in an Anaconda or pip virtual environment, use the |
110 |
| -appropriate command for your environment below: |
111 |
| -
|
112 |
| -With ``conda``: |
113 |
| -
|
114 |
| -.. code-block:: sh |
115 |
| -
|
116 |
| - conda install pytorch torchvision captum flask-compress matplotlib=3.3.4 -c pytorch |
| 109 | +To install Captum, use the appropriate command for your environment below: |
117 | 110 |
|
118 | 111 | With ``pip``:
|
119 | 112 |
|
|
127 | 120 |
|
128 | 121 | A First Example
|
129 | 122 | ---------------
|
130 |
| - |
| 123 | +
|
131 | 124 | To start, let’s take a simple, visual example. We’ll start with a ResNet
|
132 | 125 | model pretrained on the ImageNet dataset. We’ll get a test input, and
|
133 | 126 | use different **Feature Attribution** algorithms to examine how the
|
134 | 127 | input images affect the output, and see a helpful visualization of this
|
135 | 128 | input attribution map for some test images.
|
136 |
| - |
137 |
| -First, some imports: |
138 | 129 |
|
139 |
| -""" |
| 130 | +First, some imports: |
140 | 131 |
|
141 |
| -import torch |
142 |
| -import torch.nn.functional as F |
143 |
| -import torchvision.transforms as transforms |
144 |
| -import torchvision.models as models |
| 132 | +""" |
145 | 133 |
|
146 |
| -import captum |
147 |
| -from captum.attr import IntegratedGradients, Occlusion, LayerGradCam, LayerAttribution |
148 |
| -from captum.attr import visualization as viz |
| 134 | +import json |
149 | 135 |
|
150 | 136 | import os, sys
|
151 |
| -import json |
152 | 137 |
|
153 |
| -import numpy as np |
154 |
| -from PIL import Image |
| 138 | +import captum |
155 | 139 | import matplotlib.pyplot as plt
|
| 140 | + |
| 141 | +import numpy as np |
| 142 | +import torch |
| 143 | +import torch.nn.functional as F |
| 144 | +import torchvision.models as models |
| 145 | +import torchvision.transforms as transforms |
| 146 | +from captum.attr import ( |
| 147 | + IntegratedGradients, |
| 148 | + LayerAttribution, |
| 149 | + LayerGradCam, |
| 150 | + Occlusion, |
| 151 | + visualization as viz, |
| 152 | +) |
156 | 153 | from matplotlib.colors import LinearSegmentedColormap
|
| 154 | +from PIL import Image |
157 | 155 |
|
158 | 156 |
|
159 | 157 | #########################################################################
|
160 | 158 | # Now we’ll use the TorchVision model library to download a pretrained
|
161 | 159 | # ResNet. Since we’re not training, we’ll place it in evaluation mode for
|
162 | 160 | # now.
|
163 |
| -# |
| 161 | +# |
164 | 162 |
|
165 |
| -model = models.resnet18(weights='IMAGENET1K_V1') |
| 163 | +model = models.resnet18(weights="IMAGENET1K_V1") |
166 | 164 | model = model.eval()
|
167 | 165 |
|
168 | 166 |
|
169 | 167 | #######################################################################
|
170 | 168 | # The place where you got this interactive notebook should also have an
|
171 | 169 | # ``img`` folder with a file ``cat.jpg`` in it.
|
172 |
| -# |
| 170 | +# |
173 | 171 |
|
174 |
| -test_img = Image.open('img/cat.jpg') |
| 172 | +test_img = Image.open("img/cat.jpg") |
175 | 173 | test_img_data = np.asarray(test_img)
|
176 | 174 | plt.imshow(test_img_data)
|
177 | 175 | plt.show()
|
|
0 commit comments