by Dr Freddy Wordingham
Web App
9. Accepting an image
In the last lesson we hooked the front- to the backend, so we can perform very complex calculations in the world of Python, but display the inputs and results in a beautiful user interface.
In this lesson, we'll add a drag-and-drop input to the frontend, and use it to send an image to the backend, and run some code on the backend to tell us the dimensions of the image. (We'll add the model back into the mix soon after!)
We're going to do it in two parts. First we're going to add the backend functionality to accept an image, in part A. Then, in part B, we'll add the frontend code.
š¦ Dependancies
We're going to need a couple of new things this session.
In the backend we're going to need pillow
to manipulate images easily, and python-multipart
to support sending images to the backend. Add these to the requirements.txt file:
pillow
python-multipart
numpy==1.23.1
Then, to get them into our python venv
we'll need to run:
pip install -r requirements.txt
šļø Dimensions
Let's start by adding a simple route to our application which accepts an image, and returns back to us the dimensions of that image.
In main.py modify our imports:
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from mangum import Mangum
from PIL import Image
from pydantic import BaseModel
from tensorflow.keras import models
import numpy as np
import os
import tensorflow as tf
Here, we've:
- Added
UploadFile
to thefastapi
imports. - Imported
Image
fromPIL
- Imported
numpy
asnp
- Imported
tensorflow
astf
- Imported
models
fromtensorflow.keras
And now we've got those we can add the route. We'll do it below the /sum
POST route:
class DimensionsOutput(BaseModel):
width: int
height: int
# Tell us the dimensions of an image
@app.post("/dimensions")
def dimensions(file: UploadFile = File(...)):
image = Image.open(file.file)
image_array = np.array(image)
width = image_array.shape[1]
height = image_array.shape[0]
return DimensionsOutput(width=width, height=height)
This code is:
- Defining a class called
DimensionsOutput
which will hold the output of our route. - Creating a new POST endpoint at the path "/dimensions"
- which accepts a
File
object as its input. - The route function then reads the input image, and stores the image
width
andheight
into two respective variables. - Prints this result to the terminal
- And finally returns the result, enclosed in a
DimensionsOutput
object.
Now we've got that, we can add a new component tag to the frontend which will trigger this endpoint!
For now we can check things are behaving as expected using cURL:
curl -X POST \
"http://localhost:8000/dimensions" \
-H "accept: application/json" \
-H "Content-Type: multipart/form-data" \
-F "file=@resources/dog.jpg"
š APPENDIX
š½ How to Run
š§± Build Frontend
Navigate to the frontend/
directory:
cd frontend
Install any missing frontend dependancies:
npm install
Build the files for distributing the frontend to clients:
npm run build
š² Run the Backend
Go back to the project root directory:
cd ..
Activate the virtual environment, if you haven't already:
source .venv/bin/activate
Install any missing packages:
pip install -r requirements.txt
If you haven't already, train a CNN:
python scripts/train.py
Continue training an existing model:
python scripts/continue_training.py
Serve the web app:
python -m uvicorn main:app --port 8000 --reload
šļø Updated Files
Project structure
.
āāā .venv/
āāā .gitignore
āāā resources
ā āāā dog.jpg
āāā frontend
ā āāā build/
ā āāā node_modules/
ā āāā public/
ā āāā src
ā ā āāā App.css
ā ā āāā App.test.tsx
ā ā āāā App.tsx
ā ā āāā ImageUpload.tsx
ā ā āāā index.css
ā ā āāā index.tsx
ā ā āāā logo.svg
ā ā āāā react-app-env.d.ts
ā ā āāā reportWebVitals.ts
ā ā āāā setupTests.ts
ā ā āāā Sum.tsx
ā āāā .gitignore
ā āāā package-lock.json
ā āāā package.json
ā āāā README.md
ā āāā tsconfig.json
āāā output
ā āāā activations_conv2d/
ā āāā activations_conv2d_1/
ā āāā activations_conv2d_2/
ā āāā activations_dense/
ā āāā activations_dense_1/
ā āāā model.h5
ā āāā sample_images.png
ā āāā training_history.png
āāā scripts
ā āāā classify.py
ā āāā continue_training.py
ā āāā train.py
āāā main.py
āāā README.md
āāā requirements.txt
requirements.txt
tensorflow
matplotlib
fastapi
mangum
uvicorn
pillow
python-multipart
numpy==1.23.1
main.py
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from mangum import Mangum
from PIL import Image
from pydantic import BaseModel
from tensorflow.keras import models
import numpy as np
import os
import tensorflow as tf
# Instantiate the app
app = FastAPI()
# Ping test method
@app.get("/ping")
def ping():
return "pong!"
class SumInput(BaseModel):
a: int
b: int
class SumOutput(BaseModel):
sum: int
# Sum two numbers together
@app.post("/sum")
def sum(input: SumInput):
return SumOutput(sum=input.a + input.b)
class DimensionsOutput(BaseModel):
width: int
height: int
# Tell us the dimensions of an image
@app.post("/dimensions")
def dimensions(file: UploadFile = File(...)):
image = Image.open(file.file)
image_array = np.array(image)
width = image_array.shape[1]
height = image_array.shape[0]
return DimensionsOutput(width=width, height=height)
# Server our react application at the root
app.mount("/", StaticFiles(directory=os.path.join("frontend",
"build"), html=True), name="build")
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Permits requests from all origins.
# Allows cookies and credentials to be included in the request.
allow_credentials=True,
allow_methods=["*"], # Allows all HTTP methods.
allow_headers=["*"] # Allows all headers.
)
# Define the Lambda handler
handler = Mangum(app)
# Prevent Lambda showing errors in CloudWatch by handling warmup requests correctly
def lambda_handler(event, context):
if "source" in event and event["source"] == "aws.events":
print("This is a warm-ip invocation")
return {}
else:
return handler(event, context)
frontend/src/App.tsx
import "./App.css";
import ImageUpload from "./ImageUpload";
function App() {
return (
<div className="App">
<header className="App-header">
<ImageUpload />
</header>
</div>
);
}
export default App;
frontend/src/ImageUpload.tsx
import { useCallback, useState } from "react";
import { useDropzone, FileWithPath } from "react-dropzone";
function ImageUpload() {
const [imagePreview, setImagePreview] = useState<string | null>(null);
const [predictedClass, setPredictedClass] = useState<string | null>(null);
const onDrop = useCallback((acceptedFiles: FileWithPath[]) => {
const file = acceptedFiles[0];
const reader = new FileReader();
reader.onloadend = () => {
setImagePreview(reader.result as string);
};
if (file) {
reader.readAsDataURL(file);
}
const formData = new FormData();
formData.append("file", file);
fetch("/dimensions", {
method: "POST",
body: formData,
})
.then((request) => request.json())
.then((data) => {
console.log(data);
})
.catch((error) => console.log(`Error: ${error}`));
}, []);
const { getRootProps, getInputProps, isDragActive } = useDropzone({ onDrop });
const style = {
padding: "20px",
border: isDragActive ? "2px dashed cyan" : "2px dashed gray",
};
if (imagePreview) {
return (
<div>
<img src={imagePreview} width="400px" alt="Preview" />
{predictedClass && <p>Prediction: {predictedClass}</p>}
</div>
);
}
return (
<div {...getRootProps()} style={style}>
<input {...getInputProps()} />
<div>Drop Image Here!</div>
</div>
);
}
export default ImageUpload;