Rajeev-86 commited on
Commit
82e3da2
·
0 Parent(s):

Initial commit for TorchServe Docker setup

Browse files
Files changed (3) hide show
  1. Dockerfile +28 -0
  2. image_handler.py +31 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. Pinned Base Image
2
+ FROM pytorch/torchserve:0.12.0-cpu
3
+
4
+ USER root
5
+
6
+ # 2. Install gdown and immediately uninstall it for a smaller image
7
+ RUN pip install --no-cache-dir gdown && pip uninstall gdown -y
8
+
9
+ # 3. Install other dependencies and clean up
10
+ COPY requirements.txt /tmp/requirements.txt
11
+ RUN pip install -r /tmp/requirements.txt && rm /tmp/requirements.txt
12
+
13
+ WORKDIR /home/model-server
14
+ RUN mkdir model_store
15
+
16
+ # 4. Download models using gdown
17
+ RUN gdown 1x9lGntRiYsNb-dYf1SugGGwnoGa_oQes -O model_store/UNET.mar
18
+ RUN gdown 1Y_P77RtNnC1StUeBGlONuNGAW5rFq02S -O model_store/R-UNET.mar
19
+ RUN gdown 1VYvAh5S5MQICbqmQkNJ1Epdmcm5VgVWb -O model_store/A-R-UNET.mar
20
+
21
+ USER model-server
22
+
23
+ # 5. Start TorchServe with all models
24
+ CMD ["torchserve", \
25
+ "--start", \
26
+ "--ncs", \
27
+ "--model-store", "/home/model-server/model_store", \
28
+ "--models", "model_unet=UNET.mar,model_runet=R-UNET.mar,model_arunet=A-R-UNET.mar"]
image_handler.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ts.torch_handler.base_handler import BaseHandler
2
+ import torch
3
+ import torchvision.transforms as transforms
4
+ from PIL import Image
5
+ import io
6
+
7
+ class ImageHandler(BaseHandler):
8
+ def __init__(self):
9
+ super(ImageHandler, self).__init__()
10
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ self.transform = transforms.Compose([transforms.ToTensor()])
12
+
13
+ def preprocess(self, data):
14
+ # TorchServe sends input as bytes → we decode into PIL image
15
+ image_bytes = data[0].get("body")
16
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
17
+ tensor = self.transform(image).unsqueeze(0).to(self.device)
18
+ return tensor
19
+
20
+ def inference(self, data, *args, **kwargs):
21
+ with torch.no_grad():
22
+ output = self.model(data)
23
+ return output
24
+
25
+ def postprocess(self, data):
26
+ output_tensor = data.squeeze(0).cpu().clamp(0, 1) # ensure valid range
27
+ output_image = transforms.ToPILImage()(output_tensor)
28
+
29
+ buf = io.BytesIO()
30
+ output_image.save(buf, format="PNG")
31
+ return [buf.getvalue()]
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==2.8.0+cu126
2
+ torchvision==0.23.0+cu126
3
+ numpy==2.0.2
4
+ pillow==11.3.0