Forum Archive

CoreML MLMultiArray to Image ?

momorprods

Greetings, I’m currently experimenting CoreML using the template @omz made some while ago. I’m trying to run the DeepLabV3 model, which returns an image as an output. More precisely : a MLMultiArray Int32 513 x513 Matrix.

And so far I didn’t manage to find a trick to transform this into a PIL image.

Did someone already tried this?

Thanks!

cvp

@momorprods try, a Quick and dirty beginning of solution, and I don't know anything about that.
It is sure that there are better ways to build a PIL image from this kind of matrix

#!python3
'''
This is a demo of how you can use the CoreML framework (via objc_util) to classify images in Pythonista. It downloads the trained 'MobileNet' CoreML model from the Internet, and uses it to classify images that are either taken with the camera, or picked from the photo library.
'''
import numpy as np
from PIL import Image
import requests
import os
import io
import photos
import dialogs
from PIL import Image
from objc_util import ObjCClass, nsurl, ns

# Configuration (change URL and filename if you want to use a different model):
#MODEL_URL = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel'
MODEL_FILENAME = 'mobilenet.mlmodel'
#MODEL_FILENAME = 'Alphanum_28x28.mlmodel'
MODEL_FILENAME = 'DeepLabV3.mlmodel'

# Use a local path for caching the model file (no need to sync this with iCloud):
MODEL_PATH = os.path.join(os.path.expanduser('~/Documents'), MODEL_FILENAME)

# Declare/import ObjC classes:
MLModel = ObjCClass('MLModel')
VNCoreMLModel = ObjCClass('VNCoreMLModel')
VNCoreMLRequest = ObjCClass('VNCoreMLRequest')
VNImageRequestHandler = ObjCClass('VNImageRequestHandler')
VNDetectTextRectanglesRequest = ObjCClass('VNDetectTextRectanglesRequest').alloc()
#print(dir(VNDetectTextRectanglesRequest))

def load_model():
    '''Helper method for downloading/caching the mlmodel file'''
    if not os.path.exists(MODEL_PATH):
        print(f'Downloading model: {MODEL_FILENAME}...')
        r = requests.get(MODEL_URL, stream=True)
        file_size = int(r.headers['content-length'])
        with open(MODEL_PATH, 'wb') as f:
            bytes_written = 0
            for chunk in r.iter_content(1024*100):
                f.write(chunk)
                print(f'{bytes_written/file_size*100:.2f}% downloaded')
                bytes_written += len(chunk)
        print('Download finished')
    ml_model_url = nsurl(MODEL_PATH)
    # Compile the model:
    c_model_url = MLModel.compileModelAtURL_error_(ml_model_url, None)
    # Load model from the compiled model file:
    ml_model = MLModel.modelWithContentsOfURL_error_(c_model_url, None)
    # Create a VNCoreMLModel from the MLModel for use with the Vision framework:
    vn_model = VNCoreMLModel.modelForMLModel_error_(ml_model, None)
    return vn_model


def _classify_img_data(img_data):
    '''The main image classification method, used by `classify_image` (for camera images) and `classify_asset` (for photo library assets).'''
    vn_model = load_model()
    # Create and perform the recognition request:
    req = VNCoreMLRequest.alloc().initWithModel_(vn_model).autorelease()
    handler = VNImageRequestHandler.alloc().initWithData_options_(img_data, None).autorelease()
    success = handler.performRequests_error_([req], None)
    if success:
        best_result = req.results()[0]
        x = str(best_result.featureValue().multiArrayValue())
        # xxxxxx [n,n,....; n,n....]
        i = x.find('[')
        x = x[i+1:-1]
        v = x.split(';')
        pil = Image.new("RGB", (513,513))
        pix = pil.load()
        y = 0
        for r in v:
            cs = r.split(',')
            x = 0
            for c in cs:
                pixel = 0 if c == '0' else 255
                pix[x,y] = (pixel,pixel,pixel)
                x += 1
            y += 1
        pil.show() 
        try:
            label = str(best_result.identifier())
        except:
            label = 'no identifier in result'
        confidence = best_result.confidence()
        return {'label': label, 'confidence': confidence}
    else:
        return None


def classify_image(img):
    buffer = io.BytesIO()
    img.save(buffer, 'JPEG')
    img_data = ns(buffer.getvalue())
    return _classify_img_data(img_data)


def classify_asset(asset):
    img_data = ns(asset.get_image_data().getvalue())
    return _classify_img_data(img_data)


def scale_image(img, max_dim):
    '''Helper function to downscale an image for showing in the console'''
    scale = max_dim / max(img.size)
    w = int(img.size[0] * scale)
    h = int(img.size[1] * scale)
    return img.resize((w, h), Image.ANTIALIAS)


def main():
    r = dialogs.alert('Classify Image', '', 'Camera', 'Photo Library')
    if r == 1:
        img = photos.capture_image()
        if img is None:
            return
        scale_image(img, 224).show()
        result = classify_image(img)
    else:
        all_assets = photos.get_assets()
        asset = photos.pick_asset(assets=all_assets)
        #asset = photos.pick_asset()
        if asset is None:
            return
        result = classify_asset(asset)
        asset.get_ui_image((255, 255)).show()
    if result:
        print(result)
    else:
        print('Image classification failed')


if __name__ == '__main__':
    main()

momorprods

@cvp thanks a lot, this works great. You’re so fast! 👍🙏

cvp

@momorprods said:

You’re so fast!

Not at all, I spent 2 hours to write 10 lines, not proud of that 😂

cvp

@emma0122 said:

If you are writing correct

Ask @ihf if he thinks I write correct 😂