by Dr Freddy Wordingham
Web App
16. Code optimisation
Finally, let's take a look at some optimisations we can make.
ā Model Loading
At the moment, when we hit the /classify
endpoint we load the model each time. It would be faster if the model was already loaded, and reused between requests.
We can add some code to the start of backend/classify.py
to store the model
in a global variable, and then reuse it between reqeusts:
# Load the model globally
MODEL_PATH = os.path.join("output", "model.h5")
model = models.load_model(MODEL_PATH)
We therefore don't need to load the model each time in the /classify
route function:
# # Load the model
# model = models.load_model(os.path.join("output", "model.h5"))
š APPENDIX
š How to Run
š§± Build Frontend
Navigate to the frontend/
directory:
cd frontend
Install any missing frontend dependancies:
npm install
Build the files for distributing the frontend to clients:
npm run build
š² Run the Backend
Go back to the project root directory:
cd ..
Activate the virtual environment, if you haven't already:
source .venv/bin/activate
Install any missing packages:
pip install -r requirements.txt
If you haven't already, train a CNN:
python scripts/train.py
Continue training an existing model:
python scripts/continue_training.py
Serve the web app:
python -m uvicorn main:app --port 8000 --reload
š Deploy
Deploy to the cloud:
serverless deploy
Remove from the cloud:
severless remove
šļø Updated Files
Project structure
.
āāā .venv/
āāā .gitignore
āāā .serverless/
āāā resources
ā āāā dog.jpg
āāā frontend
ā āāā build/
ā āāā node_modules/
ā āāā public/
ā āāā src
ā ā āāā App.css
ā ā āāā App.test.tsx
ā ā āāā App.tsx
ā ā āāā ImageGrid.tsx
ā ā āāā ImageUpload.tsx
ā ā āāā index.css
ā ā āāā index.tsx
ā ā āāā Predictions.css
ā ā āāā Predictions.tsx
ā ā āāā logo.svg
ā ā āāā react-app-env.d.ts
ā ā āāā reportWebVitals.ts
ā ā āāā setupTests.ts
ā ā āāā Sum.tsx
ā āāā .gitignore
ā āāā package-lock.json
ā āāā package.json
ā āāā README.md
ā āāā tsconfig.json
āāā output
ā āāā activations_conv2d/
ā āāā activations_conv2d_1/
ā āāā activations_conv2d_2/
ā āāā activations_dense/
ā āāā activations_dense_1/
ā āāā model.h5
ā āāā sample_images.png
ā āāā training_history.png
āāā scripts
ā āāā classify.py
ā āāā continue_training.py
ā āāā train.py
āāā main.py
āāā README.md
āāā requirements.txt
āāā serverless.yml
classify.py
from fastapi import APIRouter, File, UploadFile
from PIL import Image
from pydantic import BaseModel
from tensorflow.keras import models, Model
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from io import BytesIO
import base64
# Load the model globally
MODEL_PATH = os.path.join("output", "model.h5")
model = models.load_model(MODEL_PATH)
CLASS_NAMES = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck"
]
router = APIRouter()
class ClassifyOutput(BaseModel):
predicted_class: str
predictions: dict[str, str]
activation_images: dict[str, list[str]]
# Classify an image
@router.post("/classify")
async def classify(file: UploadFile = File(...)):
# Load the image
image = Image.open(file.file)
image_array = np.array(image)
# Ensure the image has 3 channels for RGB, and resize to 32x32
image_pil = Image.fromarray((image_array * 255).astype("uint8"))
image_pil = image_pil.convert("RGB").resize((32, 32))
image_array = np.array(image_pil)
# Add a batch dimension
image_array = tf.expand_dims(image_array, 0)
# # Load the model
# model = models.load_model(os.path.join("output", "model.h5"))
# Sample image
predictions = model.predict(image_array)
predicted_class = np.argmax(predictions)
# Sort the predictions
sorted_indices = np.argsort(predictions, axis=-1)[:, ::-1]
# Print the results
for i in sorted_indices[0]:
key = CLASS_NAMES[i].ljust(20, ".")
probability = "{:.1f}".format(predictions[0][i] * 100).rjust(5, " ")
print(f"{key} : {probability}%")
predictions = {
CLASS_NAMES[i]: f"{float(predictions[0][i]):.2f}" for i in sorted_indices[0]}
# Visualize intermediate layers
layer_names = [
layer.name for layer in model.layers if "conv" in layer.name or "dense" in layer.name]
activation_images = visualise_intermediate_layers(
model, image_array, layer_names)
return ClassifyOutput(predicted_class=CLASS_NAMES[predicted_class], predictions=predictions, activation_images=activation_images)
def visualise_intermediate_layers(model, image_array, layer_names):
"""Visualise the intermediate layers of a model"""
layer_outputs = [layer.output for layer in model.layers]
activation_model = Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(image_array)
activation_images = {}
for layer_name, activation in zip(layer_names, activations):
activation_images[layer_name] = []
num_channels = activation.shape[-1]
for i in range(num_channels):
plt.figure()
plt.imshow(activation[0, :, :, i], cmap="viridis")
plt.axis("off")
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
# Save the image to a bytes buffer
buf = BytesIO()
plt.savefig(buf, format="png", bbox_inches="tight", pad_inches=0)
buf.seek(0)
# Convert the image to a base64 string
img_str = base64.b64encode(buf.read()).decode()
activation_images[layer_name].append(img_str)
plt.close()
return activation_images