@@ -132,10 +132,10 @@ def transform_image(image_bytes):
132132######################################################################
133133# Above method takes image data in bytes, applies the series of transforms
134134# and returns a tensor. To test the above method, read an image file in
135- # bytes mode (first replacing `<PATH/TO/ .jpeg/FILE> ` with the actual path
136- # to the file on your computer) and see if you get a tensor back:
135+ # bytes mode (first replacing `../_static/img/sample_file .jpeg` with the actual
136+ # path to the file on your computer) and see if you get a tensor back:
137137
138- with open ("<PATH/TO/.jpeg/FILE> /sample_file.jpeg" , 'rb' ) as f :
138+ with open ("../_static/img /sample_file.jpeg" , 'rb' ) as f :
139139 image_bytes = f .read ()
140140 tensor = transform_image (image_bytes = image_bytes )
141141 print (tensor )
@@ -169,15 +169,16 @@ def get_prediction(image_bytes):
169169# The tensor ``y_hat`` will contain the index of the predicted class id.
170170# However, we need a human readable class name. For that we need a class id
171171# to name mapping. Download
172- # `this file <https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json>`_
173- # as ``imagenet_class_index.json`` and remember where you saved it.
174- # This file contains the mapping of ImageNet class id to ImageNet class
175- # name. We will load this JSON file and get the class name of the
176- # predicted index.
172+ # `this file <https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json>`_
173+ # as ``imagenet_class_index.json`` and remember where you saved it (or, if you
174+ # are following the exact steps in this tutorial, save it in
175+ # `tutorials/_static`). This file contains the mapping of ImageNet class id to
176+ # ImageNet class name. We will load this JSON file and get the class name of
177+ # the predicted index.
177178
178179import json
179180
180- imagenet_class_index = json .load (open ('<PATH/TO/.json/FILE> /imagenet_class_index.json' ))
181+ imagenet_class_index = json .load (open ('../_static /imagenet_class_index.json' ))
181182
182183def get_prediction (image_bytes ):
183184 tensor = transform_image (image_bytes = image_bytes )
@@ -194,7 +195,7 @@ def get_prediction(image_bytes):
194195# We will test our above method:
195196
196197
197- with open ("<PATH/TO/.jpeg/FILE> /sample_file.jpeg" , 'rb' ) as f :
198+ with open ("../_static/img /sample_file.jpeg" , 'rb' ) as f :
198199 image_bytes = f .read ()
199200 print (get_prediction (image_bytes = image_bytes ))
200201
@@ -229,7 +230,7 @@ def get_prediction(image_bytes):
229230# method to read files from the requests:
230231#
231232# .. code-block:: python
232- #
233+ #
233234# from flask import request
234235#
235236# @app.route('/predict', methods=['POST'])
@@ -243,25 +244,26 @@ def get_prediction(image_bytes):
243244# return jsonify({'class_id': class_id, 'class_name': class_name})
244245
245246######################################################################
246- # The ``app.py`` file is now complete. Following is the full version:
247+ # The ``app.py`` file is now complete. Following is the full version; replace
248+ # the paths with the paths where you saved your files and it should run:
247249#
248250# .. code-block:: python
249- #
251+ #
250252# import io
251253# import json
252- #
254+ #
253255# from torchvision import models
254256# import torchvision.transforms as transforms
255257# from PIL import Image
256258# from flask import Flask, jsonify, request
257- #
258- #
259+ #
260+ #
259261# app = Flask(__name__)
260262# imagenet_class_index = json.load(open('<PATH/TO/.json/FILE>/imagenet_class_index.json'))
261263# model = models.densenet121(pretrained=True)
262264# model.eval()
263- #
264- #
265+ #
266+ #
265267# def transform_image(image_bytes):
266268# my_transforms = transforms.Compose([transforms.Resize(255),
267269# transforms.CenterCrop(224),
@@ -271,25 +273,25 @@ def get_prediction(image_bytes):
271273# [0.229, 0.224, 0.225])])
272274# image = Image.open(io.BytesIO(image_bytes))
273275# return my_transforms(image).unsqueeze(0)
274- #
275- #
276+ #
277+ #
276278# def get_prediction(image_bytes):
277279# tensor = transform_image(image_bytes=image_bytes)
278280# outputs = model.forward(tensor)
279281# _, y_hat = outputs.max(1)
280282# predicted_idx = str(y_hat.item())
281283# return imagenet_class_index[predicted_idx]
282- #
283- #
284+ #
285+ #
284286# @app.route('/predict', methods=['POST'])
285287# def predict():
286288# if request.method == 'POST':
287289# file = request.files['file']
288290# img_bytes = file.read()
289291# class_id, class_name = get_prediction(image_bytes=img_bytes)
290292# return jsonify({'class_id': class_id, 'class_name': class_name})
291- #
292- #
293+ #
294+ #
293295# if __name__ == '__main__':
294296# app.run()
295297
@@ -301,15 +303,15 @@ def get_prediction(image_bytes):
301303# $ FLASK_ENV=development FLASK_APP=app.py flask run
302304
303305#######################################################################
304- # We can use the
305- # `requests <https://pypi.org/project/requests/>`_
306+ # We can use the
307+ # `requests <https://pypi.org/project/requests/>`_
306308# library to send a POST request to our app:
307309#
308310# .. code-block:: python
309- #
311+ #
310312# import requests
311- #
312- # resp = requests.post("http://localhost:5000/predict",
313+ #
314+ # resp = requests.post("http://localhost:5000/predict",
313315# files={"file": open('<PATH/TO/.jpg/FILE>/cat.jpg','rb')})
314316
315317#######################################################################
0 commit comments