rootp1 commited on
Commit
e3e177e
·
0 Parent(s):

Initial Docker ML app deployment

Browse files
Files changed (5) hide show
  1. .gitignore +25 -0
  2. DockerFIle +22 -0
  3. app.py +28 -0
  4. model.py +58 -0
  5. requirements.txt +7 -0
.gitignore ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.js
7
+ /venv
8
+ /__pycache__
9
+ /myenv
10
+ # testing
11
+ /coverage
12
+
13
+ # production
14
+ /build
15
+
16
+ # misc
17
+ .DS_Store
18
+ .env.local
19
+ .env.development.local
20
+ .env.test.local
21
+ .env.production.local
22
+
23
+ npm-debug.log*
24
+ yarn-debug.log*
25
+ yarn-error.log*
DockerFIle ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Create a non-root user like Hugging Face recommends
4
+ RUN useradd -m -u 1000 user
5
+ USER user
6
+ ENV PATH="/home/user/.local/bin:$PATH"
7
+
8
+ # Set working directory
9
+ WORKDIR /app
10
+
11
+ # Install Python dependencies
12
+ COPY --chown=user requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir -r requirements.txt
14
+
15
+ # Copy the rest of the app
16
+ COPY --chown=user . /app
17
+
18
+ # Expose port
19
+ EXPOSE 7860
20
+
21
+ # Start Flask app
22
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_cors import CORS
3
+ from model import load_model, predict_species, get_label_names
4
+
5
+ app = Flask(__name__)
6
+
7
+ CORS(app)
8
+
9
+ # Load model once
10
+ model = load_model()
11
+ label_names = get_label_names()
12
+
13
+ @app.route('/predict', methods=['GET'])
14
+ def predict():
15
+ image_url = request.args.get('url')
16
+ if not image_url:
17
+ return jsonify({'error': 'URL parameter is missing'}), 400
18
+
19
+ try:
20
+ predicted_species = predict_species(model, image_url, label_names)
21
+ return jsonify({'species': predicted_species})
22
+ except Exception as e:
23
+ return jsonify({'error': str(e)}), 500
24
+
25
+ if __name__ == '__main__':
26
+ import os
27
+ app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
28
+
model.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import timm
2
+ import torch
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ import requests
6
+ from io import BytesIO
7
+
8
+
9
+ def load_model():
10
+ """Load the pre-trained model."""
11
+ model = timm.create_model("hf_hub:timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k_inat21", pretrained=True)
12
+ model.eval()
13
+ return model
14
+
15
+
16
+ def get_label_names():
17
+ """Fetch the class labels from the Hugging Face Hub."""
18
+ config_url = "https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k_inat21/resolve/main/config.json"
19
+ response = requests.get(config_url)
20
+ response.raise_for_status()
21
+ config = response.json()
22
+ return config["label_names"]
23
+
24
+
25
+ def preprocess_image(image_url):
26
+ """Fetch and preprocess the image."""
27
+ preprocess = transforms.Compose([
28
+ transforms.Resize(336),
29
+ transforms.CenterCrop(336),
30
+ transforms.ToTensor(),
31
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
32
+ ])
33
+
34
+ response = requests.get(image_url)
35
+ response.raise_for_status()
36
+ image = Image.open(BytesIO(response.content))
37
+ input_tensor = preprocess(image).unsqueeze(0) # Add a batch dimension
38
+ return input_tensor
39
+
40
+
41
+ def predict_species(model, image_url, label_names):
42
+ """Make a prediction using the model."""
43
+ input_tensor = preprocess_image(image_url)
44
+
45
+ # Move to GPU if available
46
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
+ model = model.to(device)
48
+ input_tensor = input_tensor.to(device)
49
+
50
+ # Make prediction
51
+ with torch.no_grad():
52
+ output = model(input_tensor)
53
+ _, predicted_class = torch.max(output, 1)
54
+
55
+ # Map prediction to species
56
+ predicted_species = label_names[predicted_class.item()]
57
+ return predicted_species
58
+ #finish
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ timm
4
+ torch
5
+ torchvision
6
+ pillow
7
+ requests