| reponame
				 stringclasses 5
				values | filepath
				 stringlengths 15 64 | content
				 stringlengths 63 24.6k | 
|---|---|---|
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/kubeflow_runner.py | 
	from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner as runner
from tfx.proto import trainer_pb2
from pipeline import configs, pipeline
def run():
    runner_config = runner.KubeflowV2DagRunnerConfig(default_image=configs.PIPELINE_IMAGE)
    runner.KubeflowV2DagRunner(
        config=runner_config,
        output_filename=configs.PIPELINE_NAME + "_pipeline.json",
    ).run(
        pipeline.create_pipeline(
            pipeline_name=configs.PIPELINE_NAME,
            pipeline_root=configs.PIPELINE_ROOT,
            data_path=configs.DATA_PATH,
            schema_path=configs.SCHEMA_PATH,
            modules={
                "training_fn": configs.TRAINING_FN,
                "preprocessing_fn": configs.PREPROCESSING_FN,
            },
            train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
            eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
            eval_configs=configs.EVAL_CONFIGS,
            ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS,
            ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS,
            example_gen_beam_args=configs.EXAMPLE_GEN_BEAM_ARGS,
            transform_beam_args=configs.TRANSFORM_BEAM_ARGS,
            hf_pusher_args=configs.HF_PUSHER_ARGS,
        )
    )
if __name__ == "__main__":
    logging.set_verbosity(logging.INFO)
    run()
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/local_runner.py | 
	import os
from absl import logging
from tfx import v1 as tfx
from tfx.orchestration.data_types import RuntimeParameter
from pipeline import configs
from pipeline import local_pipeline
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
# NOTE: It is recommended to have a separated OUTPUT_DIR which is *outside* of
#       the source code structure. Please change OUTPUT_DIR to other location
#       where we can store outputs of the pipeline.
OUTPUT_DIR = "."
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
# - Metadata will be written to SQLite database in METADATA_PATH.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME)
METADATA_PATH = os.path.join(
    OUTPUT_DIR, "tfx_metadata", configs.PIPELINE_NAME, "metadata.db"
)
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model")
# Specifies data file directory. DATA_PATH should be a directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
#       Kubeflow), you can use a path starting "gs://YOUR_BUCKET_NAME/path" for
#       DATA_PATH. For example,
#       DATA_PATH = 'gs://bucket/penguin/csv/'.
# TODO(step 4): Specify the path for your data.
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
def run():
    """Define a pipeline."""
    tfx.orchestration.LocalDagRunner().run(
        local_pipeline.create_pipeline(
            pipeline_name=configs.PIPELINE_NAME,
            pipeline_root=PIPELINE_ROOT,
            data_path=configs.DATA_PATH,
            schema_path=configs.SCHEMA_PATH,
            modules={
                "training_fn": configs.TRAINING_FN,
                "preprocessing_fn": configs.PREPROCESSING_FN,
            },
            train_args=tfx.proto.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
            eval_args=tfx.proto.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
            eval_configs=configs.EVAL_CONFIGS,
            serving_model_dir=SERVING_MODEL_DIR,
            metadata_connection_config=tfx.orchestration.metadata.sqlite_metadata_connection_config(
                METADATA_PATH
            ),
        )
    )
if __name__ == "__main__":
    logging.set_verbosity(logging.INFO)
    run()
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/model_analysis.ipynb | 
	# import required libs
import glob
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
print('TF version: {}'.format(tf.version.VERSION))
print('TFMA version: {}'.format(tfma.version.VERSION_STRING))# Read artifact information from metadata store.
import beam_dag_runner
from tfx.orchestration import metadata
from tfx.types import standard_artifacts
metadata_connection_config = metadata.sqlite_metadata_connection_config(
              beam_dag_runner.METADATA_PATH)
with metadata.Metadata(metadata_connection_config) as store:
    model_eval_artifacts = store.get_artifacts_by_type(standard_artifacts.ModelEvaluation.TYPE_NAME)# configure output paths
# Exact paths to output artifacts can be found in the execution logs
# or KFP Web UI if you are using kubeflow.
model_eval_path = model_eval_artifacts[-1].uri
print("Generated model evaluation result:{}".format(model_eval_path))eval_result = tfma.load_eval_result(model_eval_path)
tfma.view.render_slicing_metrics(eval_result, slicing_spec = tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])) | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/__init__.py | 
	# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/common.py | 
	IMAGE_KEY = "image"
IMAGE_SHAPE_KEY = "image_shape"
LABEL_KEY = "label"
LABEL_SHAPE_KEY = "label_shape"
CONCRETE_INPUT = "pixel_values"
NUM_LABELS = 3
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/hyperparams.py | 
	INPUT_IMG_SIZE = 128
TRAIN_BATCH_SIZE = 64
EVAL_BATCH_SIZE = 64
EPOCHS = 10
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/preprocessing.py | 
	import tensorflow as tf
from tensorflow.keras.applications import mobilenet_v2
from .utils import transformed_name
from .common import IMAGE_KEY, LABEL_KEY
def preprocessing_fn(inputs):
    """tf.transform's callback function for preprocessing inputs.
    Args:
      inputs: map from feature keys to raw not-yet-transformed features.
    Returns:
      Map from string feature key to transformed feature operations.
    """
    # print(inputs)
    outputs = {}
    image_features = mobilenet_v2.preprocess_input(inputs[IMAGE_KEY])
    outputs[transformed_name(IMAGE_KEY)] = image_features
    outputs[transformed_name(LABEL_KEY)] = inputs[LABEL_KEY]
    return outputs
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/signatures.py | 
	from typing import Dict
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow.keras.applications import mobilenet_v2
from .utils import transformed_name
from .common import IMAGE_KEY, LABEL_KEY, CONCRETE_INPUT
from .hyperparams import INPUT_IMG_SIZE
def _serving_preprocess(string_input):
    """
    _serving_preprocess turns base64 encoded string data into a
    string type of Tensor. Then it is decoded as 3 channel based
    uint8 type of Tensor. Finally, it is normalized and resized
    to the size the model expects.
    """
    decoded_input = tf.io.decode_base64(string_input)
    decoded = tf.io.decode_jpeg(decoded_input, channels=3)
    resized = tf.image.resize(decoded, size=(INPUT_IMG_SIZE, INPUT_IMG_SIZE))
    normalized = mobilenet_v2.preprocess_input(resized)
    return normalized
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def _serving_preprocess_fn(string_input):
    """
    _serving_preprocess_fn simply iteratively applies _serving_preprocess
    function to each entry of the batch of requests. So, the output is the
    preprocessed batch of requests being ready to be fed into the model.
    """
    decoded_images = tf.map_fn(
        _serving_preprocess, string_input, dtype=tf.float32, back_prop=False
    )
    return {transformed_name(IMAGE_KEY): decoded_images}
def model_exporter(model: tf.keras.Model):
    """
    model_exporter will be assigned to the "serving_defaults" signature.
    that means, when clients sends requests to the endpoint of this model
    hosted on TF Serving, the serving_fn which model_exporter returns will
    be the first point where the request payloads are going to be accepted.
    """
    m_call = tf.function(model.call).get_concrete_function(
        tf.TensorSpec(
            shape=[None, INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3],
            dtype=tf.float32,
            name=transformed_name(IMAGE_KEY),
        )
    )
    @tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
    def serving_fn(string_input):
        """
        serving_fn simply preprocesses the request payloads with the
        _serving_preprocess_fn, then the preprocessed data will be fed
        into the model. The model outputs predictions of the request p
        ayloads, then it will be returned back to the client after app
        lying postprocess of tf.math.argmax to the outputs(logits)
        """
        images = _serving_preprocess_fn(string_input)
        logits = m_call(**images)
        seg_mask = tf.math.argmax(logits, -1)
        return {"seg_mask": seg_mask}
    return serving_fn
"""
    Note that transform_features_signature and tf_examples_serving_signature
    functions exist only for model evaluation purposes with Evaluator component.
"""
def transform_features_signature(
    model: tf.keras.Model, tf_transform_output: tft.TFTransformOutput
):
    """
    transform_features_signature simply returns a function that transforms
    any data of the type of tf.Example which is denoted as the type of sta
    ndard_artifacts.Examples in TFX. The purpose of this function is to ap
    ply Transform Graph obtained from Transform component to the data prod
    uced by ImportExampleGen. This function will be used in the Evaluator
    component, so the raw evaluation inputs from ImportExampleGen can be a
    pporiately transformed that the model could understand.
    """
    # basically, what Transform component emits is a SavedModel that knows
    # how to transform data. transform_features_layer() simply returns the
    # layer from the Transform.
    model.tft_layer = tf_transform_output.transform_features_layer()
    @tf.function(
        input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")]
    )
    def serve_tf_examples_fn(serialized_tf_examples):
        """
        raw_feature_spec returns a set of feature maps(dict) for the input
        TFRecords based on the knowledge that Transform component has lear
        ned(learn doesn't mean training here). By using this information,
        the raw data from ImportExampleGen could be parsed with tf.io.parse
        _example utility function.
        Then, it is passed to the model.tft_layer, so the final output we
        get is the transformed data of the raw input.
        """
        feature_spec = tf_transform_output.raw_feature_spec()
        parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
        transformed_features = model.tft_layer(parsed_features)
        return transformed_features
    return serve_tf_examples_fn
def tf_examples_serving_signature(model, tf_transform_output):
    """
    tf_examples_serving_signature simply returns a function that performs
    data transformation(preprocessing) and model prediction in a sequential
    manner. How data transformation is done is idential to the process of
    transform_features_signature function.
    """
    model.tft_layer = tf_transform_output.transform_features_layer()
    @tf.function(
        input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string, name="examples")]
    )
    def serve_tf_examples_fn(
        serialized_tf_example: tf.Tensor,
    ) -> Dict[str, tf.Tensor]:
        raw_feature_spec = tf_transform_output.raw_feature_spec()
        raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
        transformed_features = model.tft_layer(raw_features)
        outputs = model(transformed_features)
        return {transformed_name(LABEL_KEY): outputs}
    return serve_tf_examples_fn
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/train.py | 
	from typing import List
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor, FnArgs
from tfx_bsl.tfxio import dataset_options
from .common import IMAGE_KEY, LABEL_KEY, NUM_LABELS
from .hyperparams import EPOCHS, EVAL_BATCH_SIZE, TRAIN_BATCH_SIZE
from .signatures import (
    model_exporter,
    tf_examples_serving_signature,
    transform_features_signature,
)
from .unet import build_model
from .utils import transformed_name
"""
    _input_fn reads TFRecord files with the given file_pattern passed down 
    from the upstream TFX component, Transform. The file patterns are inter
    nally determined by Transform component, and they are automatically acce
    ssible through fn_args.train_files and fn_args.eval_files in the run_fn 
    function. Assume the dataset is already transformed appropriately. 
"""
def _input_fn(
    file_pattern: List[str],
    data_accessor: DataAccessor,
    tf_transform_output: tft.TFTransformOutput,
    is_train: bool = False,
    batch_size: int = 200,
) -> tf.data.Dataset:
    """
    DataAccessor is responsible for accessing the data on disk, and
    TensorFlowDatasetOptions provides options for TFXIO's TensorFlowDataset.
    the factory function tf_dataset_factory takes three inputs of List[str],
    dataset_options.TensorFlowDatasetOptions, and schema_pb2.Schema. The
    schema_pb2.Schema holds the information how the TFRecords are structured,
    like what kind of features are accessible. In this case, there are two
    features of image_xf and label_xf, and they are the preprocessed results
    from Transform component.
    - Transform component simply preprocess the raw inputs, then returns the
    transformed output in TFRecord format. tf_dataset_factory is just a handy
    method to access TFRecord, and it is not strongly coupled with Transform
    component.
    by giving label_key option in the TensorFlowDataset, the tf_dataset_factory
    outputs the dataset in the form of Tuple[Dict[str, Tensor], Tensor]. Here,
    the second term will hold label information, and the first term holds what
    ever the rest is in the dataset (image_xf for this case).
    then, in the modeling part, you should have input layers with the names
    appearing in the first term Dict[str, Tensor]. For instance:
        inputs = tf.keras.layers.Input(..., name="image_xf")
    you could get rid of the label_key option, and it is totally optional. But
    then, you should have the output layer named with the label key. Otherwise,
    the model does not know which data from the Tuple to feed in the model. If
    you use label_key option, it it will be directly used in the output layer.
    """
    dataset = data_accessor.tf_dataset_factory(
        file_pattern,
        dataset_options.TensorFlowDatasetOptions(
            batch_size=batch_size, label_key=transformed_name(LABEL_KEY), shuffle=is_train
        ),
        tf_transform_output.transformed_metadata.schema,
    )
    return dataset
def run_fn(fn_args: FnArgs):
    tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
    train_dataset = _input_fn(
        fn_args.train_files,
        fn_args.data_accessor,
        tf_transform_output,
        is_train=True,
        batch_size=TRAIN_BATCH_SIZE,
    )
    eval_dataset = _input_fn(
        fn_args.eval_files,
        fn_args.data_accessor,
        tf_transform_output,
        is_train=False,
        batch_size=EVAL_BATCH_SIZE,
    )
    model = build_model(
        transformed_name(IMAGE_KEY), transformed_name(LABEL_KEY), NUM_LABELS
    )
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=EPOCHS,
    )
    model.save(
        fn_args.serving_model_dir,
        save_format="tf",
        signatures={
            "serving_default": model_exporter(model),
            "transform_features": transform_features_signature(
                model, tf_transform_output
            ),
            "from_examples": tf_examples_serving_signature(model, tf_transform_output),
        },
    )
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/unet.py | 
	import tensorflow as tf
from .hyperparams import INPUT_IMG_SIZE
"""
    _build_model builds a UNET model. The implementation codes are
    borrowed from the [TF official tutorial on Semantic Segmentation]
    (https://www.tensorflow.org/tutorials/images/segmentation)
"""
def build_model(input_name, label_name, num_labels) -> tf.keras.Model:
    base_model = tf.keras.applications.MobileNetV2(
        input_shape=[INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3], include_top=False
    )
    # Use the activations of these layers
    layer_names = [
        "block_1_expand_relu",  # 64x64
        "block_3_expand_relu",  # 32x32
        "block_6_expand_relu",  # 16x16
        "block_13_expand_relu",  # 8x8
        "block_16_project",  # 4x4
    ]
    base_model_outputs = [base_model.get_layer(name).output for name in layer_names]
    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=base_model_outputs)
    down_stack.trainable = False
    up_stack = [
        upsample(512, 3),  # 4x4 -> 8x8
        upsample(256, 3),  # 8x8 -> 16x16
        upsample(128, 3),  # 16x16 -> 32x32
        upsample(64, 3),  # 32x32 -> 64x64
    ]
    inputs = tf.keras.layers.Input(
        shape=[INPUT_IMG_SIZE, INPUT_IMG_SIZE, 3], name=input_name
    )
    # Downsampling through the model
    skips = down_stack(inputs)
    x = skips[-1]
    skips = reversed(skips[:-1])
    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])
    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(
        filters=num_labels,
        kernel_size=3,
        strides=2,
        padding="same",
        name=label_name,
    )  # 64x64 -> 128x128
    x = last(x)
    model = tf.keras.Model(inputs=inputs, outputs=x)
    model.compile(
        optimizer="adam",
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        # by suppling "accuracy", Keras will automatically infer the appropriate variant.
        # in this case, sparse_categorical_accuracy will be chosen.
        metrics=["accuracy"],
    )
    return model
"""
    InstanceNormalization class and upsample function are
    borrowed from pix2pix in [TensorFlow Example repository](
    https://github.com/tensorflow/examples/tree/master/tensorflow_examples/models/pix2pix)
"""
class InstanceNormalization(tf.keras.layers.Layer):
    """Instance Normalization Layer (https://arxiv.org/abs/1607.08022)."""
    def __init__(self, epsilon=1e-5):
        super(InstanceNormalization, self).__init__()
        self.epsilon = epsilon
    def build(self, input_shape):
        self.scale = self.add_weight(
            name="scale",
            shape=input_shape[-1:],
            initializer=tf.random_normal_initializer(1.0, 0.02),
            trainable=True,
        )
        self.offset = self.add_weight(
            name="offset", shape=input_shape[-1:], initializer="zeros", trainable=True
        )
    def call(self, x):
        mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
        inv = tf.math.rsqrt(variance + self.epsilon)
        normalized = (x - mean) * inv
        return self.scale * normalized + self.offset
def upsample(filters, size, norm_type="batchnorm", apply_dropout=False):
    """Upsamples an input.
    Conv2DTranspose => Batchnorm => Dropout => Relu
    Args:
      filters: number of filters
      size: filter size
      norm_type: Normalization type; either 'batchnorm' or 'instancenorm'.
      apply_dropout: If True, adds the dropout layer
    Returns:
      Upsample Sequential Model
    """
    initializer = tf.random_normal_initializer(0.0, 0.02)
    result = tf.keras.Sequential()
    result.add(
        tf.keras.layers.Conv2DTranspose(
            filters,
            size,
            strides=2,
            padding="same",
            kernel_initializer=initializer,
            use_bias=False,
        )
    )
    if norm_type.lower() == "batchnorm":
        result.add(tf.keras.layers.BatchNormalization())
    elif norm_type.lower() == "instancenorm":
        result.add(InstanceNormalization())
    if apply_dropout:
        result.add(tf.keras.layers.Dropout(0.5))
    result.add(tf.keras.layers.ReLU())
    return result
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/models/utils.py | 
	import absl
def INFO(text: str):
    absl.logging.info(text)
def transformed_name(key: str) -> str:
    return key + "_xf"
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/configs.py | 
	import os  # pylint: disable=unused-import
import tensorflow_model_analysis as tfma
import tfx.extensions.google_cloud_ai_platform.constants as vertex_const
import tfx.extensions.google_cloud_ai_platform.trainer.executor as vertex_training_const
PIPELINE_NAME = "segmentation-training-pipeline"
try:
    import google.auth  # pylint: disable=g-import-not-at-top  # pytype: disable=import-error
    try:
        _, GOOGLE_CLOUD_PROJECT = google.auth.default()
    except google.auth.exceptions.DefaultCredentialsError:
        GOOGLE_CLOUD_PROJECT = "gcp-ml-172005"
except ImportError:
    GOOGLE_CLOUD_PROJECT = "gcp-ml-172005"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = GOOGLE_CLOUD_PROJECT + "-complete-mlops"
PIPELINE_IMAGE = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{PIPELINE_NAME}"
OUTPUT_DIR = os.path.join("gs://", GCS_BUCKET_NAME)
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", PIPELINE_NAME)
DATA_PATH = "gs://pets-tfrecords/pets-tfrecords/"
SCHEMA_PATH = "pipeline/schema.pbtxt" # GCS path is also allowed
PREPROCESSING_FN = "models.preprocessing.preprocessing_fn"
TRAINING_FN = "models.train.run_fn"
TUNER_FN = "models.train.tuner_fn"
CLOUD_TUNER_FN = "models.train.tuner_fn"
GRADIO_APP_PATH = "apps.gradio.img_classifier"
MODEL_HUB_REPO_PLACEHOLDER = "$MODEL_REPO_ID"
MODEL_HUB_URL_PLACEHOLDER = "$MODEL_REPO_URL"
MODEL_VERSION_PLACEHOLDER = "$MODEL_VERSION"
TRAIN_NUM_STEPS = 160
EVAL_NUM_STEPS = 4
EXAMPLE_GEN_BEAM_ARGS = None
TRANSFORM_BEAM_ARGS = None
"""
EVAL_CONFIGS is to configuration for the Evaluator component to define 
how it is going to evalua the model performance. The full spec follows 
the EvalConfig protocol buffer message, which can be found here: 
https://github.com/tensorflow/model-analysis/blob/v0.41.0/tensorflow_model_analysis/proto/config.proto
tfma.ModelSpec
    signature_name is one of the signature in the SavedModel from
    Trainer component, and it will be used to make predictions on 
    given data. preprocessing_function_names allows us to include
    a set of transformation(preprocessing) signatures in the Saved
    Model from Trainer component. 
    label_key and prediction_key will be used to compare the ground
    truth and prediction results.
slicing_specs
    we use the entire dataset to evaluate the model performance. If
    you want to evaluate the model based on different slices of data
    set, you should prepare TFRecords to have multiple features which
    of each corresponds to each slices(or categories), then write the
    slicing_specs options accordingly. Also we can evaluate the model 
    performance on different slices of data differently with PerSlice
    MetricThreshold in the metrics_specs section.
"""
EVAL_CONFIGS = tfma.EvalConfig(
    model_specs=[
        tfma.ModelSpec(
            # the names in the signature_name preprocessing_function_names
            # are defined in the `signatures` parameter when model.save()
            # you can find how it is done in models/train.py
            signature_name="from_examples",
            preprocessing_function_names=["transform_features"],
            label_key="label_xf",
            prediction_key="label_xf",
        )
    ],
    slicing_specs=[tfma.SlicingSpec()],
    metrics_specs=[
        tfma.MetricsSpec(
            metrics=[
                tfma.MetricConfig(
                    class_name="SparseCategoricalAccuracy",
                    threshold=tfma.MetricThreshold(
                        # value_threshold is normally defined to set the minimum
                        # performance threshold. That means a model whose perfor
                        # mance is better than this is going to be the first model,
                        # and it also means that it is only used when there is no
                        # model deployed in production yet.
                        value_threshold=tfma.GenericValueThreshold(
                            lower_bound={"value": 0.55}
                        ),
                        # We can specify two models in the Evaluator component. One
                        # is the currently trained model, and the other one is the
                        # best model currently deployed(retrieved from the Artifact
                        # Store). change_threshold let us to define the threshold by
                        # how much the currently trained model should be better than
                        # the previous model to replace it.
                        change_threshold=tfma.GenericChangeThreshold(
                            direction=tfma.MetricDirection.HIGHER_IS_BETTER,
                            absolute={"value": -1e-3},
                        ),
                    ),
                )
            ]
        )
    ],
)
GCP_AI_PLATFORM_TRAINING_ARGS = {
    vertex_const.ENABLE_VERTEX_KEY: True,
    vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION,
    vertex_training_const.TRAINING_ARGS_KEY: {
        "project": GOOGLE_CLOUD_PROJECT,
        "worker_pool_specs": [
            {
                "machine_spec": {
                    "machine_type": "n1-standard-4",
                    "accelerator_type": "NVIDIA_TESLA_K80",
                    "accelerator_count": 1,
                },
                "replica_count": 1,
                "container_spec": {
                    "image_uri": PIPELINE_IMAGE,
                },
            }
        ],
    },
    "use_gpu": True,
}
fullres_data = os.environ.get("ENABLE_DATAFLOW", "false")
if fullres_data.lower() == "true":
    DATA_PATH = "gs://sidewalks-tfx-fullres/sidewalks-tfrecords/"
    DATAFLOW_SERVICE_ACCOUNT = "[email protected]"
    DATAFLOW_MACHINE_TYPE = "n1-standard-4"
    DATAFLOW_MAX_WORKERS = 4
    DATAFLOW_DISK_SIZE_GB = 100
    EXAMPLE_GEN_BEAM_ARGS = [
        "--runner=DataflowRunner",
        "--project=" + GOOGLE_CLOUD_PROJECT,
        "--region=" + GOOGLE_CLOUD_REGION,
        "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT,
        "--machine_type=" + DATAFLOW_MACHINE_TYPE,
        "--experiments=use_runner_v2",
        "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS),
        "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB),
    ]
    TRANSFORM_BEAM_ARGS = [
        "--runner=DataflowRunner",
        "--project=" + GOOGLE_CLOUD_PROJECT,
        "--region=" + GOOGLE_CLOUD_REGION,
        "--service_account_email=" + DATAFLOW_SERVICE_ACCOUNT,
        "--machine_type=" + DATAFLOW_MACHINE_TYPE,
        "--experiments=use_runner_v2",
        "--max_num_workers=" + str(DATAFLOW_MAX_WORKERS),
        "--disk_size_gb=" + str(DATAFLOW_DISK_SIZE_GB),
        "--worker_harness_container_image=" + PIPELINE_IMAGE,
    ]
    GCP_AI_PLATFORM_TRAINING_ARGS[vertex_training_const.TRAINING_ARGS_KEY][
        "worker_pool_specs"
    ] = [
        {
            "machine_spec": {
                "machine_type": "n1-standard-8",
                "accelerator_type": "NVIDIA_TESLA_V100",
                "accelerator_count": 1,
            },
            "replica_count": 1,
            "container_spec": {
                "image_uri": PIPELINE_IMAGE,
            },
        }
    ]
GCP_AI_PLATFORM_SERVING_ARGS = {
    vertex_const.ENABLE_VERTEX_KEY: True,
    vertex_const.VERTEX_REGION_KEY: GOOGLE_CLOUD_REGION,
    vertex_const.VERTEX_CONTAINER_IMAGE_URI_KEY: "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-8:latest",
    vertex_const.SERVING_ARGS_KEY: {
        "project_id": GOOGLE_CLOUD_PROJECT,
        "deployed_model_display_name": PIPELINE_NAME.replace("-", "_"),
        "endpoint_name": "prediction-" + PIPELINE_NAME.replace("-", "_"),
        "traffic_split": {"0": 100},
        "machine_type": "n1-standard-4",
        "min_replica_count": 1,
        "max_replica_count": 1,
    },
}
HF_PUSHER_ARGS = {
    "username": "chansung",
    "access_token": "$HF_ACCESS_TOKEN",
    "repo_name": PIPELINE_NAME,
    "space_config": {
        "app_path": "apps.gradio.semantic_segmentation",
    },
}
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/local_pipeline.py | 
	from typing import Any, Dict, List, Optional, Text
from tfx import v1 as tfx
import tensorflow_model_analysis as tfma
from ml_metadata.proto import metadata_store_pb2
from tfx.proto import example_gen_pb2
import absl
import tensorflow_model_analysis as tfma
from tfx.components import ImportExampleGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components import Evaluator
from tfx.components import Pusher
from tfx.orchestration import pipeline
from tfx.proto import example_gen_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental.latest_blessed_model_resolver import (
    LatestBlessedModelResolver,
)
def create_pipeline(
    pipeline_name: Text,
    pipeline_root: Text,
    data_path: Text,
    schema_path: Text,
    modules: Dict[Text, Text],
    train_args: trainer_pb2.TrainArgs,
    eval_args: trainer_pb2.EvalArgs,
    eval_configs: tfma.EvalConfig,
    serving_model_dir: Text,
    metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None,
) -> tfx.dsl.Pipeline:
    components = []
    input_config = example_gen_pb2.Input(
        splits=[
            example_gen_pb2.Input.Split(name="train", pattern="train-00-*.tfrec"),
            example_gen_pb2.Input.Split(name="eval", pattern="val-00-*.tfrec"),
        ]
    )
    example_gen = ImportExampleGen(input_base=data_path, input_config=input_config)
    components.append(example_gen)
    statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"])
    components.append(statistics_gen)
    schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path)
    components.append(schema_gen)
    transform = Transform(
        examples=example_gen.outputs["examples"],
        schema=schema_gen.outputs["schema"],
        preprocessing_fn=modules["preprocessing_fn"],
    )
    components.append(transform)
    trainer = Trainer(
        run_fn=modules["training_fn"],
        transformed_examples=transform.outputs["transformed_examples"],
        transform_graph=transform.outputs["transform_graph"],
        schema=schema_gen.outputs["schema"],
        train_args=train_args,
        eval_args=eval_args,
    )
    components.append(trainer)
    model_resolver = resolver.Resolver(
        strategy_class=LatestBlessedModelResolver,
        model=Channel(type=Model),
        model_blessing=Channel(type=ModelBlessing),
    ).with_id("latest_blessed_model_resolver")
    components.append(model_resolver)
    evaluator = Evaluator(
        examples=example_gen.outputs["examples"],
        model=trainer.outputs["model"],
        baseline_model=model_resolver.outputs["model"],
        eval_config=eval_configs,
    )
    components.append(evaluator)
    pusher_args = {
        "model": trainer.outputs["model"],
        "model_blessing": evaluator.outputs['blessing'],
        "push_destination": tfx.proto.PushDestination(
            filesystem=tfx.proto.PushDestination.Filesystem(
                base_directory=serving_model_dir
            )
        ),
    }
    pusher = Pusher(**pusher_args)  # pylint: disable=unused-variable
    components.append(pusher)
    return pipeline.Pipeline(
        pipeline_name=pipeline_name,
        pipeline_root=pipeline_root,
        components=components,
        enable_cache=False,
        metadata_connection_config=metadata_connection_config,
    )
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/pipeline.py | 
	from typing import Any, Dict, List, Optional, Text
import tensorflow_model_analysis as tfma
from ml_metadata.proto import metadata_store_pb2
from tfx import v1 as tfx
from tfx.components import (
    Evaluator,
    ImportExampleGen,
    StatisticsGen,
    Transform,
)
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental.latest_blessed_model_resolver import LatestBlessedModelResolver
from tfx.extensions.google_cloud_ai_platform.pusher.component import (
    Pusher as VertexPusher,
)
from tfx.extensions.google_cloud_ai_platform.trainer.component import (
    Trainer as VertexTrainer,
)
from tfx.orchestration import pipeline
from tfx.proto import example_gen_pb2, trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model, ModelBlessing
from pipeline.components.HFPusher.component import HFPusher
def create_pipeline(
    pipeline_name: Text,
    pipeline_root: Text,
    data_path: Text,
    schema_path: Text,
    modules: Dict[Text, Text],
    train_args: trainer_pb2.TrainArgs,
    eval_args: trainer_pb2.EvalArgs,
    eval_configs: tfma.EvalConfig,
    metadata_connection_config: Optional[metadata_store_pb2.ConnectionConfig] = None,
    ai_platform_training_args: Optional[Dict[Text, Text]] = None,
    ai_platform_serving_args: Optional[Dict[Text, Any]] = None,
    example_gen_beam_args: Optional[List] = None,
    transform_beam_args: Optional[List] = None,
    hf_pusher_args: Optional[Dict[Text, Any]] = None,
) -> tfx.dsl.Pipeline:
    components = []
    # Data splitting config.
    input_config = example_gen_pb2.Input(
        splits=[
            example_gen_pb2.Input.Split(name="train", pattern="train-*.tfrec"),
            example_gen_pb2.Input.Split(name="eval", pattern="val-*.tfrec"),
        ]
    )
    # Data input (pipeline starts here).
    example_gen = ImportExampleGen(input_base=data_path, input_config=input_config)
    if example_gen_beam_args is not None:
        example_gen.with_beam_pipeline_args(example_gen_beam_args)
    components.append(example_gen)
    # Generate stats from the data. Useful for preprocessing, post-processing,
    # anomaly detection, etc.
    statistics_gen = StatisticsGen(examples=example_gen.outputs["examples"])
    components.append(statistics_gen)
    schema_gen = tfx.components.ImportSchemaGen(schema_file=schema_path)
    components.append(schema_gen)
    # Apply any preprocessing. Transformations get saved as a graph in a SavedModel.
    transform = Transform(
        examples=example_gen.outputs["examples"],
        schema=schema_gen.outputs["schema"],
        preprocessing_fn=modules["preprocessing_fn"],
    )
    if transform_beam_args is not None:
        transform.with_beam_pipeline_args(transform_beam_args)
    components.append(transform)
    # Training.
    trainer_args = {
        "run_fn": modules["training_fn"],
        "transformed_examples": transform.outputs["transformed_examples"],
        "transform_graph": transform.outputs["transform_graph"],
        "schema": schema_gen.outputs["schema"],
        "train_args": train_args,
        "eval_args": eval_args,
        "custom_config": ai_platform_training_args,
    }
    trainer = VertexTrainer(**trainer_args)
    components.append(trainer)
    # Resolver component - did we do better than the previous model?
    model_resolver = resolver.Resolver(
        strategy_class=LatestBlessedModelResolver,
        model=Channel(type=Model),
        model_blessing=Channel(type=ModelBlessing),
    ).with_id("latest_blessed_model_resolver")
    components.append(model_resolver)
    # Evaluate the model.
    evaluator = Evaluator(
        examples=example_gen.outputs["examples"],
        model=trainer.outputs["model"],
        baseline_model=model_resolver.outputs["model"],
        eval_config=eval_configs,
    )
    components.append(evaluator)
    # Based on blessing status, push the model to prod (deployment stage.)
    pusher_args = {
        "model": trainer.outputs["model"],
        "model_blessing": evaluator.outputs["blessing"],
        "custom_config": ai_platform_serving_args,
    }
    pusher = VertexPusher(**pusher_args)  # pylint: disable=unused-variable
    components.append(pusher)
    # Push the blesses model to HF hub and deploy a demo app on Hugging Face
    # Spaces.
    hf_pusher_args["model"] = trainer.outputs["model"]
    hf_pusher_args["model_blessing"] = evaluator.outputs["blessing"]
    hf_pusher = HFPusher(**hf_pusher_args)
    components.append(hf_pusher)
    return pipeline.Pipeline(
        pipeline_name=pipeline_name,
        pipeline_root=pipeline_root,
        components=components,
        enable_cache=True,
        metadata_connection_config=metadata_connection_config,
    )
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/apps/gradio/semantic_segmentation/app.py | 
	import gradio as gr
import numpy as np
import tensorflow as tf
from huggingface_hub import from_pretrained_keras
from PIL import Image
MODEL_CKPT = "$MODEL_REPO_ID@$MODEL_VERSION"
MODEL = from_pretrained_keras(MODEL_CKPT)
RESOLTUION = 128
PETS_PALETTE = []
with open(r"./palette.txt", "r") as fp:
    for line in fp:
        if "#" not in line:
            tmp_list = list(map(int, line[:-1].strip("][").split(", ")))
            PETS_PALETTE.append(tmp_list)
def preprocess_input(image: Image) -> tf.Tensor:
    image = np.array(image)
    image = tf.convert_to_tensor(image)
    image = tf.image.resize(image, (RESOLTUION, RESOLTUION))
    image = image / 255
    return tf.expand_dims(image, 0)
# The below utility get_seg_overlay() are from:
# https://github.com/deep-diver/semantic-segmentation-ml-pipeline/blob/main/notebooks/inference_from_SavedModel.ipynb
def get_seg_overlay(image, seg):
    color_seg = np.zeros(
        (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
    )  # height, width, 3
    palette = np.array(PETS_PALETTE)
    for label, color in enumerate(palette):
        color_seg[seg == label, :] = color
    # Show image + mask
    img = np.array(image) * 0.5 + color_seg * 0.5
    img *= 255
    img = np.clip(img, 0, 255)
    img = img.astype(np.uint8)
    return img
def run_model(image: Image) -> tf.Tensor:
    preprocessed_image = preprocess_input(image)
    prediction = MODEL.predict(preprocessed_image)
    seg_mask = tf.math.argmax(prediction, -1)
    seg_mask = tf.squeeze(seg_mask)
    return seg_mask
def get_predictions(image: Image):
    predicted_segmentation_mask = run_model(image)
    preprocessed_image = preprocess_input(image)
    preprocessed_image = tf.squeeze(preprocessed_image, 0)
    pred_img = get_seg_overlay(
        preprocessed_image.numpy(), predicted_segmentation_mask.numpy()
    )
    return Image.fromarray(pred_img)
title = (
    "Simple demo for a semantic segmentation model trained on the PETS dataset to classify inside, outside, and border of an object."
)
description = """
Note that the outputs obtained in this demo won't be state-of-the-art. The underlying project has a different objective focusing more on the ops side of
deploying a semantic segmentation model. For more details, check out the repository: https://github.com/deep-diver/semantic-segmentation-ml-pipeline/.
"""
demo = gr.Interface(
    get_predictions,
    gr.inputs.Image(type="pil"),
    "pil",
    allow_flagging="never",
    title=title,
    description=description,
    examples=[["test-image1.png"], ["test-image2.png"], ["test-image3.png"], ["test-image4.png"], ["test-image5.png"]],
)
demo.launch()
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/__init__.py | 
	# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================== | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/component.py | 
	# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HuggingFace(HF) Pusher TFX Component.
The HFPusher is used to push model and prototype application to HuggingFace Hub.
"""
from typing import Text, Dict, Any, Optional
from tfx import types
from tfx.dsl.components.base import base_component, executor_spec
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter, ExecutionParameter
from pipeline.components.HFPusher import executor
MODEL_KEY = "model"
PUSHED_MODEL_KEY = "pushed_model"
MODEL_BLESSING_KEY = "model_blessing"
class HFPusherSpec(types.ComponentSpec):
    """ComponentSpec for TFX HFPusher Component."""
    PARAMETERS = {
        "username": ExecutionParameter(type=str),
        "access_token": ExecutionParameter(type=str),
        "repo_name": ExecutionParameter(type=str),
        "space_config": ExecutionParameter(type=Dict[Text, Any], optional=True),
    }
    INPUTS = {
        MODEL_KEY: ChannelParameter(type=standard_artifacts.Model, optional=True),
        MODEL_BLESSING_KEY: ChannelParameter(
            type=standard_artifacts.ModelBlessing, optional=True
        ),
    }
    OUTPUTS = {
        PUSHED_MODEL_KEY: ChannelParameter(type=standard_artifacts.PushedModel),
    }
class HFPusher(base_component.BaseComponent):
    """Component for pushing model and application to HuggingFace Hub.
    The `HFPusher` is a [TFX Component](https://www.tensorflow.org/tfx
    /guide/understanding_tfx_pipelines#component), and its primary pur
    pose is to push a model from an upstream component such as [`Train
    er`](https://www.tensorflow.org/tfx/guide/trainer) to HuggingFace
    Model Hub. It also provides a secondary feature that pushes an app
    lication to HuggingFace Space Hub.
    """
    SPEC_CLASS = HFPusherSpec
    EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
    def __init__(
        self,
        username: str,
        access_token: str,
        repo_name: str,
        space_config: Optional[Dict[Text, Any]] = None,
        model: Optional[types.Channel] = None,
        model_blessing: Optional[types.Channel] = None,
    ):
        """The HFPusher TFX component.
        HFPusher pushes a trained or blessed model to HuggingFace Model Hub.
        This is designed to work as a downstream component of Trainer and o
        ptionally Evaluator(optional) components. Trainer gives trained mod
        el, and Evaluator gives information whether the trained model is bl
        essed or not after evaluation of the model. HFPusher component only
        publishes a model when it is blessed. If Evaluator is not specified,
        the input model will always be pushed.
        Args:
        username: the ID of HuggingFace Hub
        access_token: the access token obtained from HuggingFace Hub for the
            given username. Refer to [this document](https://huggingface.co/
            docs/hub/security-tokens) to know how to obtain one.
        repo_name: the name of Model Hub repository where the model will be
            pushed. This should be unique name under the username within th
            e Model Hub. repository is identified as {username}/{repo_name}.
        space_config: optional configurations set when to push an application
            to HuggingFace Space Hub. This is a dictionary, and the following
            information could be set.
            app_path: the path where the application related files are stored.
                this should follow the form either of app.gradio.segmentation
                or app/gradio/segmentation. This is a required parameter when
                space_config is set. This could be a local or GCS paths.
            space_sdk: Space Hub supports gradio, streamit, and static types
                of application. The default is set to gradio.
            placeholders: placeholders to replace in every files under the a
                pp_path. This is used to replace special string with the mod
                el related values. If this is not set, the default placehold
                ers will be used as follows.
                ```
                placeholders = {
                    "MODEL_REPO_ID" : "$MODEL_REPO_ID",
                    "MODEL_REPO_URL": "$MODEL_REPO_URL",
                    "MODEL_VERSION" : "$MODEL_VERSION",
                }
                ```
                In this case, "$MODEL_REPO_ID", "$MODEL_REPO_URL", "$MODEL_VE
                RSION" strings will be replaced with appropriate values at ru
                ntime. If placeholders are set, custom strings will be used.
            repo_name: the name of Space Hub repository where the application
                will be pushed. This should be unique name under the username
                within the Space Hub. repository is identified as {username}/
                {repo_name}. If this is not set, the same name to the Model H
                ub repository will be used.
        model: a TFX input channel containing a Model artifact. this is usually
            comes from the standard [`Trainer`]
            (https://www.tensorflow.org/tfx/guide/trainer) component.
        model_blessing: a TFX input channel containing a ModelBlessing artifact.
            this is usually comes from the standard [`Evaluator`]
            (https://www.tensorflow.org/tfx/guide/evaluator) component.
        Returns:
        a TFX output channel containing a PushedModel artifact. It contains
        information where the model is published at and whether the model is
        pushed or not.
        Raises:
            RuntimeError: if app_path is not set when space_config is provided.
        Example:
        Basic usage example:
        ```py
        trainer = Trainer(...)
        evaluator = Evaluator(...)
        hf_pusher = HFPusher(
            username="chansung",
            access_token=<YOUR-HUGGINGFACE-ACCESS-TOKEN>,
            repo_name="my-model",
            model=trainer.outputs["model"],
            model_blessing=evaluator.outputs["blessing"],
            space_config={
                "app_path": "apps.gradio.semantic_segmentation"
            }
        )
        ```
        """
        pushed_model = types.Channel(type=standard_artifacts.PushedModel)
        spec = HFPusherSpec(
            username=username,
            access_token=access_token,
            repo_name=repo_name,
            space_config=space_config,
            model=model,
            model_blessing=model_blessing,
            pushed_model=pushed_model,
        )
        super().__init__(spec=spec)
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/component_test.py | 
	# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFX HuggingFace Pusher Custom Component."""
import tensorflow as tf
from tfx.types import standard_artifacts
from tfx.types import channel_utils
from pipeline.components.HFPusher.component import HFPusher
class HFPusherTest(tf.test.TestCase):
    def testConstruct(self):
        test_model = channel_utils.as_channel([standard_artifacts.Model()])
        hf_pusher = HFPusher(
            username="test_username",
            access_token="test_access_token",
            repo_name="test_repo_name",
            model=test_model,
        )
        self.assertEqual(
            standard_artifacts.PushedModel.TYPE_NAME,
            hf_pusher.outputs["pushed_model"].type_name,
        )
if __name__ == "__main__":
    tf.test.main()
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/executor.py | 
	# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HF Pusher TFX Component Executor. The HF Pusher Executor calls 
the workflow handler runner.deploy_model_for_hf_hub().
"""
import ast
import time
from typing import Any, Dict, List
from tfx import types
from tfx.components.pusher import executor as tfx_pusher_executor
from tfx.types import artifact_utils, standard_component_specs
from pipeline.components.HFPusher import runner
_USERNAME_KEY = "username"
_ACCESS_TOKEN_KEY = "access_token"
_REPO_NAME_KEY = "repo_name"
_SPACE_CONFIG_KEY = "space_config"
class Executor(tfx_pusher_executor.Executor):
    """Pushes a model and an app to HuggingFace Model and Space Hubs respectively"""
    def Do(
        self,
        input_dict: Dict[str, List[types.Artifact]],
        output_dict: Dict[str, List[types.Artifact]],
        exec_properties: Dict[str, Any],
    ):
        """Overrides the tfx_pusher_executor to leverage some of utility methods
        Args:
          input_dict: Input dict from input key to a list of artifacts, including:
            - model_export: a TFX input channel containing a Model artifact.
            - model_blessing: a TFX input channel containing a ModelBlessing
              artifact.
          output_dict: Output dict from key to a list of artifacts, including:
            - pushed_model: a TFX output channel containing a PushedModel arti
              fact. It contains information where the model is published at an
              d whether the model is pushed or not. furthermore, pushed model
              carries the following information.
              - pushed : integer value to denote if the model is pushed or not.
                This is set to 0 when the input model is not blessed, and it is
                set to 1 when the model is successfully pushed.
              - pushed_version : string value to indicate the current model ver
                sion. This is decided by time.time() Python built-in function.
              - repo_id : model repository ID where the model is pushed to. This
                follows the format of f"{username}/{repo_name}".
              - branch : branch name where the model is pushed to. The branch na
                me is automatically assigned to the same value of pushed_version.
              - commit_id : the id from the commit history (branch name could be
                sufficient to retreive a certain version of the model) of the mo
                del repository.
              - repo_url : model repository URL. It is something like f"https://
                huggingface.co/{repo_id}/{branch}"
              - space_url : space repository URL. It is something like f"https://
                huggingface.co/{repo_id}"f
          exec_properties: An optional dict of execution properties, including:
            - username: username of the HuggingFace user (can be an individual
              user or an organization)
            - access_token: access token value issued by HuggingFace for the s
              pecified username.
            - repo_name: the repository name to push the current version of the
              model to. The default value is same as the TFX pipeline name.
            - space_config: space_config carries additional values such as:
              - app_path : path where the application templates are in the cont
                ainer that runs the TFX pipeline. This is expressed either apps.
                `gradio.img_classifier` or `apps/gradio.img_classifier`.
              - repo_name : the repository name to push the application to. The
                default value is same as the TFX pipeline name
              - space_sdk : either gradio or streamlit. this will decide which a
                pplication framework to be used for the Space repository. The de
                fault value is gradio
              - placeholders : dictionary which placeholders to replace with mod
                el specific information. The keys represents descriptions, and t
                he values represents the actual placeholders to replace in the f
                iles under the `app_path`. There are currently two predefined keys,
                and if placeholders is set to None, the default values will be used.
        """
        self._log_startup(input_dict, output_dict, exec_properties)
        model_push = artifact_utils.get_single_instance(
            output_dict[standard_component_specs.PUSHED_MODEL_KEY]
        )
        # if the model is not blessed
        if not self.CheckBlessing(input_dict):
            self._MarkNotPushed(model_push)
            return
        model_path = self.GetModelPath(input_dict)
        model_version_name = f"v{int(time.time())}"
        space_config = exec_properties.get(_SPACE_CONFIG_KEY, None)
        if space_config is not None:
            space_config = ast.literal_eval(space_config)
        pushed_properties = runner.deploy_model_for_hf_hub(
            username=exec_properties.get(_USERNAME_KEY, None),
            access_token=exec_properties.get(_ACCESS_TOKEN_KEY, None),
            repo_name=exec_properties.get(_REPO_NAME_KEY, None),
            space_config=space_config,
            model_path=model_path,
            model_version=model_version_name,
        )
        self._MarkPushed(model_push, pushed_destination=pushed_properties["repo_url"])
        for key in pushed_properties:
            value = pushed_properties[key]
            if key != "repo_url":
                model_push.set_string_custom_property(key, value)
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/model_card.py | 
	from huggingface_hub import ModelCard, ModelCardData
def create_card(template_path, model_metadata, **template_kwargs):
    """Creates model card.
    Args:
        template_path (str): Path to the jinja template model is based on.
        model_metadata (dict): Dict of card metadata.
        Refer to the link to know what you can pass to the metadata section:
        https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md
    Returns:
        Model card (ModelCard): instantiated and filled ModelCard.
    """
    if model_metadata is None:
        model_metadata = {}
    model_metadata["library_name"] = "tfx"
    if "tags" in model_metadata:
        model_metadata["tags"].append("keras")
    else:
        model_metadata["tags"] = "keras"
    card_data = ModelCardData(**{v: k for k, v in model_metadata.items()})
    model_card = ModelCard.from_template(card_data,
                                        template_path,
                                        **template_kwargs)
    return model_card
    
 | 
| 
	deep-diver/semantic-segmentation-ml-pipeline | 
	training_pipeline/pipeline/components/HFPusher/runner.py | 
	# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""HuggingFace Pusher runner module.
This module handles the workflow to publish
machine learning model to HuggingFace Hub.
"""
from typing import Text, Any, Dict, Optional
import mimetypes
import tempfile
import tensorflow as tf
from absl import logging
from tfx.utils import io_utils
from pipeline.components.HFPusher.model_card import create_card
from pathlib import Path
from huggingface_hub import Repository
from huggingface_hub import HfApi
from requests.exceptions import HTTPError
_MODEL_REPO_KEY = "MODEL_REPO_ID"
_MODEL_URL_KEY = "MODEL_REPO_URL"
_MODEL_VERSION_KEY = "MODEL_VERSION"
_DEFAULT_MODEL_REPO_PLACEHOLDER_KEY = "$MODEL_REPO_ID"
_DEFAULT_MODEL_URL_PLACEHOLDER_KEY = "$MODEL_REPO_URL"
_DEFAULT_MODEL_VERSION_PLACEHOLDER_KEY = "$MODEL_VERSION"
def _is_text_file(path):
    mimetype = mimetypes.guess_type(path)
    if mimetype[0] != None:
        return 'text' in mimetype[0]
    
    return False
def _replace_placeholders_in_files(
    root_dir: str, placeholder_to_replace: Dict[str, str]
):
    """Recursively open every files under the root_dir, and then
    replace special tokens with the given values in placeholder_
    to_replace"""
    files = tf.io.gfile.listdir(root_dir)
    for file in files:
        path = tf.io.gfile.join(root_dir, file)
        if tf.io.gfile.isdir(path):
            _replace_placeholders_in_files(path, placeholder_to_replace)
        else:
            _replace_placeholders_in_file(path, placeholder_to_replace)
def _replace_placeholders_in_file(
    filepath: str, placeholder_to_replace: Dict[str, str]
):
    """replace special tokens with the given values in placeholder_
    to_replace. This function gets called by _replace_placeholders
    _in_files function"""
    if _is_text_file(filepath):
        with tf.io.gfile.GFile(filepath, "r") as f:
            source_code = f.read()
        for placeholder in placeholder_to_replace:
            source_code = source_code.replace(
                placeholder, placeholder_to_replace[placeholder]
            )
        with tf.io.gfile.GFile(filepath, "w") as f:
            f.write(source_code)
def _replace_placeholders(
    target_dir: str,
    placeholders: Dict[str, str],
    model_repo_id: str,
    model_repo_url: str,
    model_version: str,
):
    """set placeholder_to_replace before calling _replace_placeholde
    rs_in_files function"""
    if placeholders is None:
        placeholders = {
            _MODEL_REPO_KEY: _DEFAULT_MODEL_REPO_PLACEHOLDER_KEY,
            _MODEL_URL_KEY: _DEFAULT_MODEL_URL_PLACEHOLDER_KEY,
            _MODEL_VERSION_KEY: _DEFAULT_MODEL_VERSION_PLACEHOLDER_KEY,
        }
    placeholder_to_replace = {
        placeholders[_MODEL_REPO_KEY]: model_repo_id,
        placeholders[_MODEL_URL_KEY]: model_repo_url,
        placeholders[_MODEL_VERSION_KEY]: model_version,
    }
    _replace_placeholders_in_files(target_dir, placeholder_to_replace)
def _replace_files(src_path, dst_path):
    """Replace the contents(files/folders) of the repository with the
    latest contents.
    Args:
        src_path (path or str): Path of the source repository where latest
        content exists.
        dst_path (path or str): Path to the destination repository that
        contains old files.
    """
    not_to_delete = [".gitattributes", ".git"]
    inside_root_dst_path = tf.io.gfile.listdir(dst_path)
    for content_name in inside_root_dst_path:
        content = f"{dst_path}/{content_name}"
        if content_name not in not_to_delete:
            if tf.io.gfile.isdir(content):
                tf.io.gfile.rmtree(content)
            else:
                tf.io.gfile.remove(content)
    inside_root_src_path = tf.io.gfile.listdir(src_path)
    for content_name in inside_root_src_path:
        content = f"{src_path}/{content_name}"
        dst_content = f"{dst_path}/{content_name}"
        if tf.io.gfile.isdir(content):
            io_utils.copy_dir(content, dst_content)
        else:
            tf.io.gfile.copy(content, dst_content)
def _create_remote_repo(
    access_token: str, repo_id: str, repo_type: str = "model", space_sdk: str = None
):
    """Create a remote repository on HuggingFace Hub platform. HTTPError
    exception is raised when the repository already exists
    Args:
        access_token (str): Hugging Face Hub token with write access.
        repo_id (str): Repository ID to push the repository.
        repo_type (str, optional): Repository type. Defaults to "model".
        space_sdk (str, optional): SDK for Space. Defaults to None.
    """
    logging.info(f"repo_id: {repo_id}")
    try:
        HfApi().create_repo(
            token=access_token,
            repo_id=repo_id,
            repo_type=repo_type,
            space_sdk=space_sdk,
        )
    except HTTPError:
        logging.warning(
            f"this warning is expected if {repo_id} repository already exists"
        )
def _clone_and_checkout(
    repo_url: str, local_path: str, access_token: str, version: Optional[str] = None
) -> Repository:
    """clone the remote repository to the given local_path"""
    repository = Repository(
        local_dir=local_path, clone_from=repo_url, use_auth_token=access_token
    )
    if version is not None:
        repository.git_checkout(revision=version, create_branch_ok=True)
    return repository
def _push_to_remote_repo(repo: Repository, commit_msg: str, branch: str = "main"):
    """Push any changes to the remote repository
    Args:
        repo (Repository): Repository class from huggingface_hub.
        commit_msg (str): Commit message.
        branch (str, optional): Branch to push the model. Defaults to "main".
    """
    repo.git_add(pattern=".", auto_lfs_track=True)
    repo.git_commit(commit_message=commit_msg)
    repo.git_push(upstream=f"origin {branch}")
def deploy_model_for_hf_hub(
    username: str,
    access_token: str,
    repo_name: str,
    model_path: str,
    model_version: str,
    space_config: Optional[Dict[Text, Any]] = None,
    model_metadata: Optional[Dict] = None,
    **template_kwargs,
) -> Dict[str, str]:
    """Executes ML model deployment workflow to HuggingFace Hub. Refer to the
    HFPusher component in component.py for generic description of each parame
    ter. This docstring only explains how the workflow works.
    step 1. push model to the Model Hub
    step 1-1.
        create a repository on the HuggingFace Hub. if there is an existing r
        epository with the given repo_name, that rpository will be overwritten.
    step 1-2.
        clone the created or existing remote repository to the local path. Al
        so, create a branch named with model version.
    step 1-3.
        remove every files under the cloned repository(local), and copies the
        model related files to the cloned local repository path.
    step 1-4
        write model card.
    step 1-5.
        push the updated repository to the given branch of remote Model Hub.
    step 2. push application to the Space Hub
    step 2-1.
        create a repository on the HuggingFace Hub. if there is an existing r
        epository with the given repo_name, that rpository will be overwritten.
    step 2-2.
        copies directory where the application related files are stored to a
        temporary directory. Since the files could be hosted in GCS bucket, t
        his process ensures every necessary files are located in the local fil
        e system.
    step 2-3.
        replace speical tokens in every files under the given directory.
    step 2-4.
        clone the created or existing remote repository to the local path.
    step 2-5.
        remove every files under the cloned repository(local), and copies the
        application related files to the cloned local repository path.
    step 2-6.
        push the updated repository to the remote Space Hub. note that the br
        anch is always set to "main", so that HuggingFace Space could build t
        he application automatically when pushed.
    
    Args:
        username (str): Hugging Face Hub username.
        access_token (str): Hugging Face Hub access token.
        repo_name (str): Name of the repository.
        model_path (str): Path to model file.
        model_version (str): Model version.
        space_config (dict): Configuration for Space.
        model_metadata (dict): Metadata for model card.
        
    """
    outputs = {}
    # step 1
    repo_url_prefix = "https://huggingface.co"
    repo_id = f"{username}/{repo_name}"
    repo_url = f"{repo_url_prefix}/{repo_id}"
    # step 1-1
    _create_remote_repo(access_token=access_token, repo_id=repo_id)
    logging.info(f"remote repository at {repo_url} is prepared")
    # step 1-2
    local_path = "hf_model"
    repository = _clone_and_checkout(
        repo_url=repo_url,
        local_path=local_path,
        access_token=access_token,
        version=model_version,
    )
    logging.info(
        f"remote repository is cloned, and new branch {model_version} is created"
    )
    # step 1-3
    if not model_metadata:
        model_metadata = {}
    card = create_card(model_metadata = model_metadata, 
                        template_path = "model_card_template.md",
                        **{"model_id":_MODEL_REPO_KEY, **template_kwargs})
    with open(Path(model_path) / "README.md", "w+") as fp:
        fp.write(str(card))
    # step 1-4
    _replace_files(model_path, local_path)
    logging.info(
        "current version of the model is copied to the cloned local repository"
    )
    # step 1-5
    _push_to_remote_repo(
        repo=repository,
        commit_msg=f"upload new version({model_version})",
        branch=model_version,
    )
    logging.info("updates are pushed to the remote repository")
    outputs["repo_id"] = repo_id
    outputs["branch"] = model_version
    outputs["commit_id"] = f"{repository.git_head_hash()}"
    outputs["repo_url"] = repo_url
    # step 2
    if space_config is not None:
        if "app_path" not in space_config:
            raise RuntimeError(
                f"the app_path is not provided. "
                f"app_path is required when space_config is set."
            )
        model_repo_id = repo_id
        model_repo_url = repo_url
        if "repo_name" in space_config:
            repo_id = f"{username}/{repo_name}"
            repo_url = f"{repo_url_prefix}/{repo_id}"
        else:
            repo_url = f"{repo_url_prefix}/spaces/{repo_id}"
        app_path = space_config["app_path"]
        app_path = app_path.replace(".", "/")
        # step 2-1
        _create_remote_repo(
            access_token=access_token,
            repo_id=repo_id,
            repo_type="space",
            space_sdk=space_config["space_sdk"]
            if "space_sdk" in space_config
            else "gradio",
        )
        # step 2-2
        tmp_dir = tempfile.gettempdir()
        io_utils.copy_dir(app_path, tmp_dir)
        # step 2-3
        _replace_placeholders(
            target_dir=tmp_dir,
            placeholders=space_config["placeholders"]
            if "placeholders" in space_config
            else None,
            model_repo_id=model_repo_id,
            model_repo_url=model_repo_url,
            model_version=model_version,
        )
        # step 2-4
        local_path = "hf_space"
        repository = _clone_and_checkout(
            repo_url=repo_url,
            local_path=local_path,
            access_token=access_token,
        )
        # step 2-5
        _replace_files(tmp_dir, local_path)
        # step 2-6
        _push_to_remote_repo(
            repo=repository,
            commit_msg=f"upload {model_version} model",
        )
        outputs["space_url"] = repo_url
    return outputs
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	cloud_build_tfx.ipynb | 
	from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "fast-ai-exploration"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = "vertex-tfx-mlops"
PIPELINE_NAME = "penguin-vertex-training"
DATA_ROOT = "gs://{}/data/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
MODULE_ROOT = "gs://{}/pipeline_module/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
    from absl import logging
    logging.error("Please set all required parameters.")_trainer_module_file = 'penguin_trainer.py'%%writefile {_trainer_module_file}
# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple and
# slightly modified run_fn() to add distribution_strategy.
from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
_FEATURE_KEYS = [
    "culmen_length_mm",
    "culmen_depth_mm",
    "flipper_length_mm",
    "body_mass_g",
]
_LABEL_KEY = "species"
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
# Since we're not generating or creating a schema, we will instead create
# a feature spec.  Since there are a fairly small number of features this is
# manageable for this dataset.
_FEATURE_SPEC = {
    **{
        feature: tf.io.FixedLenFeature(shape=[1], dtype=tf.float32)
        for feature in _FEATURE_KEYS
    },
    _LABEL_KEY: tf.io.FixedLenFeature(shape=[1], dtype=tf.int64),
}
def _input_fn(
    file_pattern: List[str],
    data_accessor: tfx.components.DataAccessor,
    schema: schema_pb2.Schema,
    batch_size: int,
) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
      file_pattern: List of paths or patterns of input tfrecord files.
      data_accessor: DataAccessor for converting input to RecordBatch.
      schema: schema of the input data.
      batch_size: representing the number of consecutive elements of returned
        dataset to combine in a single batch
    Returns:
      A dataset that contains (features, indices) tuple where features is a
        dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    return data_accessor.tf_dataset_factory(
        file_pattern,
        tfxio.TensorFlowDatasetOptions(batch_size=batch_size, label_key=_LABEL_KEY),
        schema=schema,
    ).repeat()
def _make_keras_model(learning_rate: float) -> tf.keras.Model:
    """Creates a DNN Keras model for classifying penguin data.
    Returns:
      A Keras Model.
    """
    # The model below is built with Functional API, please refer to
    # https://www.tensorflow.org/guide/keras/overview for all API options.
    inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS]
    d = keras.layers.concatenate(inputs)
    for _ in range(2):
        d = keras.layers.Dense(8, activation="relu")(d)
    outputs = keras.layers.Dense(3)(d)
    model = keras.Model(inputs=inputs, outputs=outputs)
    optimizer = keras.optimizers.Adam(learning_rate)
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
# NEW: Read `use_gpu` from the custom_config of the Trainer.
#      if it uses GPU, enable MirroredStrategy.
def _get_distribution_strategy(fn_args: tfx.components.FnArgs):
    if fn_args.custom_config.get("use_gpu", False):
        logging.info("Using MirroredStrategy with one GPU.")
        return tf.distribute.MirroredStrategy(devices=["device:GPU:0"])
    return None
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
      fn_args: Holds args used to train the model as name/value pairs.
    """
    # This schema is usually either an output of SchemaGen or a manually-curated
    # version provided by pipeline author. A schema can also derived from TFT
    # graph if a Transform component is used. In the case when either is missing,
    # `schema_from_feature_spec` could be used to generate schema from very simple
    # feature_spec, but the schema returned would be very primitive.
    schema = schema_utils.schema_from_feature_spec(_FEATURE_SPEC)
    hyperparameters = fn_args.hyperparameters
    logging.info("Hyperparameters:")
    logging.info(hyperparameters)
    train_dataset = _input_fn(
        fn_args.train_files, fn_args.data_accessor, schema, batch_size=_TRAIN_BATCH_SIZE
    )
    eval_dataset = _input_fn(
        fn_args.eval_files, fn_args.data_accessor, schema, batch_size=_EVAL_BATCH_SIZE
    )
    # NEW: If we have a distribution strategy, build a model in a strategy scope.
    strategy = _get_distribution_strategy(fn_args)
    if strategy is None:
        model = _make_keras_model(hyperparameters["learning_rate"])
    else:
        with strategy.scope():
            model = _make_keras_model(hyperparameters["learning_rate"])
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=hyperparameters["num_epochs"],
    )
    # The result of the training should be saved in `fn_args.serving_model_dir`
    # directory.
    model.save(fn_args.serving_model_dir, save_format="tf")REPO_URL = "https://github.com/sayakpaul/CI-CD-for-Model-Training"
BRANCH = "dev"
PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
SERVING_MODEL_DIR = "gs://{}/serving_model/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
VERSION = "1.0.0"
CICD_IMAGE_URI = f"gcr.io/tfx-oss-public/tfx:{VERSION}"
TFX_IMAGE_URI = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{PIPELINE_NAME}:{VERSION}"SUBSTITUTIONS=f"""\
_REPO_URL='{REPO_URL}',\
_BRANCH={BRANCH},\
_PROJECT={GOOGLE_CLOUD_PROJECT},\
_REGION={GOOGLE_CLOUD_REGION},\
_PIPELINE_NAME={PIPELINE_NAME},\
_PIPELINE_ROOT={PIPELINE_ROOT},\
_MODULE_ROOT={MODULE_ROOT},\
_DATA_ROOT={DATA_ROOT},\
_SERVING_MODEL_DIR={SERVING_MODEL_DIR},\
_CICD_IMAGE_URI={CICD_IMAGE_URI},\
_TFX_IMAGE_URI={TFX_IMAGE_URI}
"""
!echo $SUBSTITUTIONS | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	cloud_function_trigger.ipynb | 
	from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "fast-ai-exploration"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = "vertex-tfx-mlops"
PIPELINE_NAME = "penguin-vertex-training"
PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
PIPELINE_LOCATION = f"{PIPELINE_ROOT}/{PIPELINE_NAME}.json"
PUBSUB_TOPIC = f"trigger-{PIPELINE_NAME}"
DATA_ROOT = "gs://{}/data/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
MODULE_ROOT = "gs://{}/pipeline_module/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
    from absl import logging
    logging.error("Please set all required parameters.")ENV_VARS=f"""\
PROJECT={GOOGLE_CLOUD_PROJECT},\
REGION={GOOGLE_CLOUD_REGION},\
GCS_PIPELINE_FILE_LOCATION={PIPELINE_LOCATION}
"""
!echo {ENV_VARS}BUCKET = f'gs://{GCS_BUCKET_NAME}'
CLOUD_FUNCTION_NAME = f'trigger-{PIPELINE_NAME}-fn'
!gcloud functions deploy {CLOUD_FUNCTION_NAME} \
    --region={GOOGLE_CLOUD_REGION} \
    --trigger-topic={PUBSUB_TOPIC} \
    --runtime=python37 \
    --source=cloud_function\
    --entry-point=trigger_pipeline\
    --stage-bucket={BUCKET}\
    --update-env-vars={ENV_VARS}
# `trigger_pipeline` is the name of the function inside
# `cloud_function/main.py`import IPython
cloud_fn_url = f"https://console.cloud.google.com/functions/details/{GOOGLE_CLOUD_REGION}/{CLOUD_FUNCTION_NAME}"
html = (
    f'See the Cloud Function details <a href="{cloud_fn_url}" target="_blank">here</a>.'
)
IPython.display.display(IPython.display.HTML(html))from google.cloud import pubsub
import json
publish_client = pubsub.PublisherClient()
topic = f"projects/{GOOGLE_CLOUD_PROJECT}/topics/{PUBSUB_TOPIC}"
data = {"num_epochs": 3, "learning_rate": 1e-2}
message = json.dumps(data)
_ = publish_client.publish(topic, message.encode()) | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	cloud_scheduler_trigger.ipynb | 
	# only need if you are using Colab
from google.colab import auth
auth.authenticate_user()GOOGLE_CLOUD_PROJECT = "gcp-ml-172005"
GOOGLE_CLOUD_REGION = "us-central1"
PIPELINE_NAME = "penguin-vertex-training"
PUBSUB_TOPIC = f"trigger-{PIPELINE_NAME}"
SCHEDULER_JOB_NAME = "MLOpsJob"import json
data = '{"num_epochs": "3", "learning_rate": "1e-2"}'
data = json.dumps(data)import json
from google.cloud import scheduler_v1
from google.cloud.scheduler_v1.types.target import PubsubTarget
from google.cloud.scheduler_v1.types.job import Job
from google.cloud.scheduler_v1.types.cloudscheduler import CreateJobRequest
client = scheduler_v1.CloudSchedulerClient.from_service_account_json(
    r"./gcp-ml-172005-528977a75f85.json")parent = client.common_location_path(GOOGLE_CLOUD_PROJECT, GOOGLE_CLOUD_REGION)
data = {"num_epochs": "3", "learning_rate": "1e-2"}
data = json.dumps(data).encode('utf-8')
pubsub_target = PubsubTarget(
    topic_name=f"projects/{GOOGLE_CLOUD_PROJECT}/topics/{PUBSUB_TOPIC}", 
    data=data)
job = Job(name=f"projects/{GOOGLE_CLOUD_PROJECT}/locations/{GOOGLE_CLOUD_REGION}/jobs/traing_for_model", 
          pubsub_target=pubsub_target, 
          schedule="*/3 * * * *")
req = CreateJobRequest(parent=parent, job=job)result_job = client.create_job(req)result_job | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	build/compile_pipeline.py | 
	import argparse
from absl import logging
from create_pipeline import create_pipeline
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow.v2 import kubeflow_v2_dag_runner
import os
import sys
SCRIPT_DIR = os.path.dirname(
    os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
)
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, "..")))
from utils import config
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--use-gpu",
        type=str,
        required=False,
        default="False"
    )
    return parser.parse_args()
def compile_pipeline(args):
    pipeline_definition_file = config.PIPELINE_NAME + ".json"
    runner = kubeflow_v2_dag_runner.KubeflowV2DagRunner(
        config=kubeflow_v2_dag_runner.KubeflowV2DagRunnerConfig(
                default_image=config.TFX_IMAGE_URI
            ),
        output_filename=pipeline_definition_file,
    )
    use_gpu = True if args.use_gpu == "True" else False
    return runner.run(
        create_pipeline(
            num_epochs=data_types.RuntimeParameter(name="num_epochs", ptype=int),
            learning_rate=data_types.RuntimeParameter(name="learning_rate", ptype=float),
            use_gpu=use_gpu,
        ),
        write_out=True,
    )
def main():
    args = get_args()
    result = compile_pipeline(args)
    logging.info(result)
if __name__ == "__main__":
    main()
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	build/create_pipeline.py | 
	from tfx.orchestration import data_types
from tfx import v1 as tfx
import os
import sys
SCRIPT_DIR = os.path.dirname(
    os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
)
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, "..")))
from utils import config, custom_components
def create_pipeline(
    num_epochs: data_types.RuntimeParameter,
    learning_rate: data_types.RuntimeParameter,
    use_gpu: bool,
) -> tfx.dsl.Pipeline:
    """Implements the penguin pipeline with TFX."""
    # Brings data into the pipeline or otherwise joins/converts training data.
    example_gen = tfx.components.CsvExampleGen(input_base=config.DATA_ROOT)
    # Generate hyperparameters.
    hyperparams_gen = custom_components.hyperparameters_gen(
        num_epochs=num_epochs,
        learning_rate=learning_rate
    ).with_id("HyperparamsGen")
    # NEW: Configuration for Vertex AI Training.
    # This dictionary will be passed as `CustomJobSpec`.
    vertex_job_spec = {
        "project": config.GCP_PROJECT,
        "worker_pool_specs": [
            {
                "machine_spec": {
                    "machine_type": "n1-standard-4",
                },
                "replica_count": 1,
                "container_spec": {
                    "image_uri": "gcr.io/tfx-oss-public/tfx:{}".format(tfx.__version__),
                },
            }
        ],
    }
    if use_gpu:
        # See https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#acceleratortype
        # for available machine types.
        vertex_job_spec["worker_pool_specs"][0]["machine_spec"].update(
            {"accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1}
        )
    # Trains a model using Vertex AI Training.
    # NEW: We need to specify a Trainer for GCP with related configs.
    trainer = tfx.extensions.google_cloud_ai_platform.Trainer(
        module_file=config.MODULE_FILE,
        examples=example_gen.outputs["examples"],
        train_args=tfx.proto.TrainArgs(num_steps=100),
        eval_args=tfx.proto.EvalArgs(num_steps=5),
        hyperparameters=hyperparams_gen.outputs["hyperparameters"],
        custom_config={
            tfx.extensions.google_cloud_ai_platform.ENABLE_UCAIP_KEY: True,
            tfx.extensions.google_cloud_ai_platform.UCAIP_REGION_KEY: config.GCP_REGION,
            tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: vertex_job_spec,
            "use_gpu": use_gpu,
        },
    )
    # Pushes the model to a filesystem destination.
    pusher = tfx.components.Pusher(
        model=trainer.outputs["model"],
        push_destination=tfx.proto.PushDestination(
            filesystem=tfx.proto.PushDestination.Filesystem(
                base_directory=config.SERVING_MODEL_DIR
            )
        ),
    )
    components = [
        example_gen,
        hyperparams_gen,
        trainer,
        pusher,
    ]
    return tfx.dsl.Pipeline(
        pipeline_name=config.PIPELINE_NAME, pipeline_root=config.PIPELINE_ROOT, components=components
    )
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	build/penguin_trainer.py | 
	# Copied from https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple and
# slightly modified run_fn() to add distribution_strategy.
from typing import List
from absl import logging
import tensorflow as tf
from tensorflow import keras
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
_FEATURE_KEYS = [
    "culmen_length_mm",
    "culmen_depth_mm",
    "flipper_length_mm",
    "body_mass_g",
]
_LABEL_KEY = "species"
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
# Since we're not generating or creating a schema, we will instead create
# a feature spec.  Since there are a fairly small number of features this is
# manageable for this dataset.
_FEATURE_SPEC = {
    **{
        feature: tf.io.FixedLenFeature(shape=[1], dtype=tf.float32)
        for feature in _FEATURE_KEYS
    },
    _LABEL_KEY: tf.io.FixedLenFeature(shape=[1], dtype=tf.int64),
}
def _input_fn(
    file_pattern: List[str],
    data_accessor: tfx.components.DataAccessor,
    schema: schema_pb2.Schema,
    batch_size: int,
) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
      file_pattern: List of paths or patterns of input tfrecord files.
      data_accessor: DataAccessor for converting input to RecordBatch.
      schema: schema of the input data.
      batch_size: representing the number of consecutive elements of returned
        dataset to combine in a single batch
    Returns:
      A dataset that contains (features, indices) tuple where features is a
        dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    return data_accessor.tf_dataset_factory(
        file_pattern,
        tfxio.TensorFlowDatasetOptions(batch_size=batch_size, label_key=_LABEL_KEY),
        schema=schema
    ).repeat()
def _make_keras_model(learning_rate: float) -> tf.keras.Model:
    """Creates a DNN Keras model for classifying penguin data.
    Returns:
      A Keras Model.
    """
    # The model below is built with Functional API, please refer to
    # https://www.tensorflow.org/guide/keras/overview for all API options.
    inputs = [keras.layers.Input(shape=(1,), name=f) for f in _FEATURE_KEYS]
    d = keras.layers.concatenate(inputs)
    for _ in range(2):
        d = keras.layers.Dense(8, activation="relu")(d)
    outputs = keras.layers.Dense(3)(d)
    model = keras.Model(inputs=inputs, outputs=outputs)
    optimizer = keras.optimizers.Adam(learning_rate)
    model.compile(
        optimizer=optimizer,
        loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
# NEW: Read `use_gpu` from the custom_config of the Trainer.
#      if it uses GPU, enable MirroredStrategy.
def _get_distribution_strategy(fn_args: tfx.components.FnArgs):
    if fn_args.custom_config.get("use_gpu", False):
        logging.info("Using MirroredStrategy with one GPU.")
        return tf.distribute.MirroredStrategy(devices=["device:GPU:0"])
    return None
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
      fn_args: Holds args used to train the model as name/value pairs.
    """
    # This schema is usually either an output of SchemaGen or a manually-curated
    # version provided by pipeline author. A schema can also derived from TFT
    # graph if a Transform component is used. In the case when either is missing,
    # `schema_from_feature_spec` could be used to generate schema from very simple
    # feature_spec, but the schema returned would be very primitive.
    schema = schema_utils.schema_from_feature_spec(_FEATURE_SPEC)
    hyperparameters = fn_args.hyperparameters
    logging.info("Hyperparameters:")
    logging.info(hyperparameters)
    train_dataset = _input_fn(
        fn_args.train_files, fn_args.data_accessor, schema, batch_size=_TRAIN_BATCH_SIZE
    )
    eval_dataset = _input_fn(
        fn_args.eval_files, fn_args.data_accessor, schema, batch_size=_EVAL_BATCH_SIZE
    )
    # NEW: If we have a distribution strategy, build a model in a strategy scope.
    strategy = _get_distribution_strategy(fn_args)
    if strategy is None:
        model = _make_keras_model(hyperparameters["learning_rate"])
    else:
        with strategy.scope():
            model = _make_keras_model(hyperparameters["learning_rate"])
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=hyperparameters["num_epochs"],
    )
    # The result of the training should be saved in `fn_args.serving_model_dir`
    # directory.
    model.save(fn_args.serving_model_dir, save_format="tf")
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	cloud_function/main.py | 
	# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Function to be triggered by Pub/Sub."""
import os
import json
import logging
import base64
from kfp.v2.google.client import AIPlatformClient
from google.cloud import storage
def trigger_pipeline(event, context):
    # Parse the environment variables.
    project = os.getenv("PROJECT")
    region = os.getenv("REGION")
    gcs_pipeline_file_location = os.getenv("GCS_PIPELINE_FILE_LOCATION")
    
    if not project:
        raise ValueError("Environment variable GCP_PROJECT is not set.")
    if not region:
        raise ValueError("Environment variable GCP_REGION is not set.")
    if not gcs_pipeline_file_location:
        raise ValueError("Environment variable GCS_PIPELINE_FILE_LOCATION is not set.")
    
    # Check if the pipeline file exists in the provided GCS Bucket.
    storage_client = storage.Client()
    if not gcs_pipeline_file_location:
        raise ValueError("Environment variable GCS_PIPELINE_FILE_LOCATION is not set.")
    path_parts = gcs_pipeline_file_location.replace("gs://", "").split("/")
    bucket_name = path_parts[0]
    blob_name = "/".join(path_parts[1:])
    bucket = storage_client.bucket(bucket_name)
    blob = storage.Blob(bucket=bucket, name=blob_name)
    if not blob.exists(storage_client):
        raise ValueError(f"{gcs_pipeline_file_location} does not exist.")
    
    # Parse the data from the Pub/Sub trigger message.
    data = base64.b64decode(event["data"]).decode("utf-8")
    logging.info(f"Event data: {data}")
    parameter_values = json.loads(data)
    
    # Initialize Vertex AI API client and submit for pipeline execution.
    api_client = AIPlatformClient(project_id=project, region=region)
    response = api_client.create_run_from_job_spec(
        job_spec_path=gcs_pipeline_file_location,
        parameter_values=parameter_values,
        enable_caching=True,
    )
    logging.info(response)
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	utils/config.py | 
	import os
# GCP
GCP_PROJECT = os.getenv("PROJECT")
GCP_REGION = os.getenv("REGION")
# Data
DATA_ROOT = os.getenv("DATA_ROOT")
# Training and serving
TFX_IMAGE_URI = os.getenv("TFX_IMAGE_URI")
MODULE_ROOT = os.getenv("MODULE_ROOT")
MODULE_FILE = os.path.join(MODULE_ROOT, "penguin_trainer.py")
SERVING_MODEL_DIR = os.getenv("MODULE_ROOT")
# Pipeline
PIPELINE_NAME = os.getenv("PIPELINE_NAME")
PIPELINE_ROOT = os.getenv("SERVING_MODEL_DIR")
 | 
| 
	sayakpaul/CI-CD-for-Model-Training | 
	utils/custom_components.py | 
	"""
Taken from:
    * https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/src/tfx_pipelines/components.py#L51
"""
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import (
    InputArtifact,
    OutputArtifact,
    Parameter,
)
from tfx.types.standard_artifacts import HyperParameters
from tfx.types import artifact_utils
from tfx.utils import io_utils
import logging
import json
import os
@component
def hyperparameters_gen(
    num_epochs: Parameter[int],
    learning_rate: Parameter[float],
    hyperparameters: OutputArtifact[HyperParameters],
):
    hp_dict = dict()
    hp_dict["num_epochs"] = num_epochs
    hp_dict["learning_rate"] = learning_rate
    logging.info(f"Hyperparameters: {hp_dict}")
    hyperparams_uri = os.path.join(
        artifact_utils.get_single_uri([hyperparameters]), "hyperparameters.json"
    )
    io_utils.write_string_file(hyperparams_uri, json.dumps(hp_dict))
    logging.info(f"Hyperparameters are written to: {hyperparams_uri}")
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	custom_components/firebase_publisher.py | 
	"""
Custom TFX component for Firebase upload.
Author: Chansung Park
"""
from tfx import types
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx import v1 as tfx
from absl import logging
import firebase_admin
from firebase_admin import ml
from firebase_admin import storage
from firebase_admin import credentials
from google.cloud import storage as gcs_storage
@component
def FirebasePublisher(
    pushed_model: tfx.dsl.components.InputArtifact[
        tfx.types.standard_artifacts.PushedModel
    ],
    credential_uri: Parameter[str],
    firebase_dest_gcs_bucket: Parameter[str],
    model_display_name: Parameter[str],
    model_tag: Parameter[str],
) -> tfx.dsl.components.OutputDict(result=str):
    """
    publish trained tflite model to Firebase ML, this component assumes that 
    trained model and Firebase credential files are stored in GCS locations.
    
    Args:   
        pushed_model: The URI of pushed model obtained from previous component (i.e. Pusher)
        credential_uri: The URI of Firebase credential. In order to get one, go to Firebase dashboard 
            and on the Settings page, create a service account and download the service account key file. 
            Keep this file safe, since it grants administrator access to your project.
        firebase_dest_gcs_bucket: GCS bucket where the model is going to be temporarily stored.
            In order to create one, go to Firebase dashboard and on the Storage page, enable Cloud Storage. 
            Take note of your bucket name.
        model_display_name: The name to be appeared on Firebase ML dashboard
        model_tag: The tage name to be appeared on Firebase ML dashboard
    """
    
    model_uri = f"{pushed_model.uri}/model.tflite"
    
    assert model_uri.split("://")[0] == "gs"
    assert credential_uri.split("://")[0] == "gs"
    # create gcs client instance
    gcs_client = gcs_storage.Client()
    # get credential for firebase
    credential_gcs_bucket = credential_uri.split("//")[1].split("/")[0]
    credential_blob_path = "/".join(credential_uri.split("//")[1].split("/")[1:])
    bucket = gcs_client.bucket(credential_gcs_bucket)
    blob = bucket.blob(credential_blob_path)
    blob.download_to_filename("credential.json")
    logging.info(f"download credential.json from {credential_uri} is completed")
    # get tflite model file
    tflite_gcs_bucket = model_uri.split("//")[1].split("/")[0]
    tflite_blob_path = "/".join(model_uri.split("//")[1].split("/")[1:])
    bucket = gcs_client.bucket(tflite_gcs_bucket)
    blob = bucket.blob(tflite_blob_path)
    blob.download_to_filename("model.tflite")
    logging.info(f"download model.tflite from {model_uri} is completed")
    firebase_admin.initialize_app(
        credentials.Certificate("credential.json"),
        options={"storageBucket": firebase_dest_gcs_bucket},
    )
    logging.info("firebase_admin initialize app is completed")
    model_list = ml.list_models(list_filter=f"display_name={model_display_name}")
    # update
    if len(model_list.models) > 0:
        # get the first match model
        model = model_list.models[0]
        source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
        model.model_format = ml.TFLiteFormat(model_source=source)
        updated_model = ml.update_model(model)
        ml.publish_model(updated_model.model_id)
        logging.info("model exists, so update it in FireBase ML")
        return {"result": "model updated"}
    # create
    else:
        # load a tflite file and upload it to Cloud Storage
        source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
        # create the model object
        tflite_format = ml.TFLiteFormat(model_source=source)
        model = ml.Model(
            display_name=model_display_name,
            tags=[model_tag],
            model_format=tflite_format,
        )
        # Add the model to your Firebase project and publish it
        new_model = ml.create_model(model)
        ml.publish_model(new_model.model_id)
        logging.info("model doesn exists, so create one in FireBase ML")
        return {"result": "model created"}
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	custom_components/flower_densenet_trainer.py | 
	from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
    "image": tf.io.FixedLenFeature([], tf.string),
    "class": tf.io.FixedLenFeature([], tf.int64),
    "one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_CONCRETE_INPUT = "numpy_inputs"
_INPUT_SHAPE = (224, 224, 3)
_TRAIN_BATCH_SIZE = 64
_EVAL_BATCH_SIZE = 64
_EPOCHS = 2
def _parse_fn(example):
    example = tf.io.parse_single_example(example, _IMAGE_FEATURES)
    image = tf.image.decode_jpeg(example["image"], channels=3)
    class_label = tf.cast(example["class"], tf.int32)
    return image, class_label
def _input_fn(file_pattern: List[str], batch_size: int) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
        file_pattern: List of paths or patterns of input tfrecord files.
        batch_size: representing the number of consecutive elements of returned
            dataset to combine in a single batch.
    Returns:
        A dataset that contains (features, indices) tuple where features is a
            dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    logging.info(f"Reading data from: {file_pattern}")
    tfrecord_filenames = tf.io.gfile.glob(file_pattern[0] + ".gz")
    dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
    dataset = dataset.map(_parse_fn).batch(batch_size)
    return dataset.repeat()
def _make_keras_model() -> tf.keras.Model:
    """Creates a DenseNet121-based model for classifying flowers data.
    Returns:
    A Keras Model.
    """
    inputs = keras.Input(shape=_INPUT_SHAPE)
    base_model = keras.applications.DenseNet121(
        include_top=False, input_shape=_INPUT_SHAPE, pooling="avg"
    )
    base_model.trainable = False
    x = keras.applications.densenet.preprocess_input(inputs)
    x = base_model(
        x, training=False
    )  # Ensures BatchNorm runs in inference model in this model
    outputs = keras.layers.Dense(5, activation="softmax")(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
def _preprocess(bytes_input):
    decoded = tf.io.decode_jpeg(bytes_input, channels=3)
    resized = tf.image.resize(decoded, size=(224, 224))
    return resized
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
    decoded_images = tf.map_fn(
        _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
    )
    return {_CONCRETE_INPUT: decoded_images}
def _model_exporter(model: tf.keras.Model):
    m_call = tf.function(model.call).get_concrete_function(
        [
            tf.TensorSpec(
                shape=[None, 224, 224, 3], dtype=tf.float32, name=_CONCRETE_INPUT
            )
        ]
    )
    @tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
    def serving_fn(bytes_inputs):
        # This function comes from the Computer Vision book from O'Reilly.
        labels = tf.constant(
            ["daisy", "dandelion", "roses", "sunflowers", "tulips"], dtype=tf.string
        )
        images = preprocess_fn(bytes_inputs)
        probs = m_call(**images)
        indices = tf.argmax(probs, axis=1)
        pred_source = tf.gather(params=labels, indices=indices)
        pred_confidence = tf.reduce_max(probs, axis=1)
        return {"label": pred_source, "confidence": pred_confidence}
    return serving_fn
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
        fn_args: Holds args used to train the model as name/value pairs.
    """
    train_dataset = _input_fn(fn_args.train_files, batch_size=_TRAIN_BATCH_SIZE)
    eval_dataset = _input_fn(fn_args.eval_files, batch_size=_EVAL_BATCH_SIZE)
    model = _make_keras_model()
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=_EPOCHS,
    )
    _, acc = model.evaluate(eval_dataset, steps=fn_args.eval_steps)
    logging.info(f"Validation accuracy: {round(acc * 100, 2)}%")
    # The result of the training should be saved in `fn_args.serving_model_dir`
    # directory.
    tf.saved_model.save(
        model,
        fn_args.serving_model_dir,
        signatures={"serving_default": _model_exporter(model)},
    )
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	custom_components/flower_mobilenet_trainer.py | 
	from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
    "image": tf.io.FixedLenFeature([], tf.string),
    "class": tf.io.FixedLenFeature([], tf.int64),
    "one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_INPUT_SHAPE = (224, 224, 3)
_TRAIN_BATCH_SIZE = 64
_EVAL_BATCH_SIZE = 64
_EPOCHS = 2
def _parse_fn(example):
    example = tf.io.parse_single_example(example, _IMAGE_FEATURES)
    image = tf.image.decode_jpeg(example["image"], channels=3)
    class_label = tf.cast(example["class"], tf.int32)
    return image, class_label
def _input_fn(file_pattern: List[str], batch_size: int) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
        file_pattern: List of paths or patterns of input tfrecord files.
        batch_size: representing the number of consecutive elements of returned
            dataset to combine in a single batch.
    Returns:
        A dataset that contains (features, indices) tuple where features is a
            dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    logging.info(f"Reading data from: {file_pattern}")
    tfrecord_filenames = tf.io.gfile.glob(file_pattern[0] + ".gz")
    dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
    dataset = dataset.map(_parse_fn).batch(batch_size)
    return dataset.repeat()
def _make_keras_model() -> tf.keras.Model:
    """Creates a MobileNetV3-based model for classifying flowers data.
    Returns:
    A Keras Model.
    """
    inputs = keras.Input(shape=_INPUT_SHAPE)
    base_model = keras.applications.MobileNetV3Small(
        include_top=False, input_shape=_INPUT_SHAPE, pooling="avg"
    )
    base_model.trainable = False
    x = keras.applications.mobilenet_v3.preprocess_input(inputs)
    x = base_model(
        x, training=False
    )  # Ensures BatchNorm runs in inference model in this model
    outputs = keras.layers.Dense(5, activation="softmax")(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
        fn_args: Holds args used to train the model as name/value pairs.
    """
    train_dataset = _input_fn(fn_args.train_files, batch_size=_TRAIN_BATCH_SIZE)
    eval_dataset = _input_fn(fn_args.eval_files, batch_size=_EVAL_BATCH_SIZE)
    model = _make_keras_model()
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=_EPOCHS,
    )
    _, acc = model.evaluate(eval_dataset, steps=fn_args.eval_steps)
    logging.info(f"Validation accuracy: {round(acc * 100, 2)}%")
    # Convert the model.
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model = converter.convert()
    # Save the model.
    # The result of the training should be saved in `fn_args.serving_model_dir` directory.
    with tf.io.gfile.GFile(fn_args.serving_model_dir + "/model.tflite", "wb") as f:
        f.write(tflite_model)
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	custom_components/vertex_deployer.py | 
	"""
Custom TFX component for deploying a model to a Vertex AI Endpoint.
Author: Sayak Paul
Reference: https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/build/utils.py#L97
"""
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.types.standard_artifacts import String
from google.cloud import aiplatform as vertex_ai
from tfx import v1 as tfx
from absl import logging
@component
def VertexDeployer(
    project: Parameter[str],
    region: Parameter[str],
    model_display_name: Parameter[str],
    deployed_model_display_name: Parameter[str],
):
    logging.info(f"Endpoint display: {deployed_model_display_name}")
    vertex_ai.init(project=project, location=region)
    endpoints = vertex_ai.Endpoint.list(
        filter=f"display_name={deployed_model_display_name}", order_by="update_time"
    )
    if len(endpoints) > 0:
        logging.info(f"Endpoint {deployed_model_display_name} already exists.")
        endpoint = endpoints[-1]
    else:
        endpoint = vertex_ai.Endpoint.create(deployed_model_display_name)
    model = vertex_ai.Model.list(
        filter=f"display_name={model_display_name}", order_by="update_time"
    )[-1]
    endpoint = vertex_ai.Endpoint.list(
        filter=f"display_name={deployed_model_display_name}", order_by="update_time"
    )[-1]
    deployed_model = endpoint.deploy(
        model=model,
        # Syntax from here: https://git.io/JBQDP
        traffic_split={"0": 100},
        machine_type="n1-standard-4",
        min_replica_count=1,
        max_replica_count=1,
    )
    logging.info(f"Model deployed to: {deployed_model}")
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	custom_components/vertex_uploader.py | 
	"""
Custom TFX component for importing a model into Vertex AI.
Author: Sayak Paul
Reference: https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/src/tfx_pipelines/components.py#L74
"""
import os
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.types.standard_artifacts import String
from google.cloud import aiplatform as vertex_ai
from tfx import v1 as tfx
from absl import logging
@component
def VertexUploader(
    project: Parameter[str],
    region: Parameter[str],
    model_display_name: Parameter[str],
    pushed_model_location: Parameter[str],
    serving_image_uri: Parameter[str],
    uploaded_model: tfx.dsl.components.OutputArtifact[String],
):
    vertex_ai.init(project=project, location=region)
    pushed_model_dir = os.path.join(
        pushed_model_location, tf.io.gfile.listdir(pushed_model_location)[-1]
    )
    logging.info(f"Model registry location: {pushed_model_dir}")
    vertex_model = vertex_ai.Model.upload(
        display_name=model_display_name,
        artifact_uri=pushed_model_dir,
        serving_container_image_uri=serving_image_uri,
        parameters_schema_uri=None,
        instance_schema_uri=None,
        explanation_metadata=None,
        explanation_parameters=None,
    )
    uploaded_model.set_string_custom_property(
        "model_resource_name", str(vertex_model.resource_name)
    )
    logging.info(f"Model resource: {str(vertex_model.resource_name)}")
 | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	notebooks/Custom_Model_TFX.ipynb | 
	from google.colab import auth
auth.authenticate_user()import tensorflow as tf
print('TensorFlow version: {}'.format(tf.__version__))
from tfx import v1 as tfx
print('TFX version: {}'.format(tfx.__version__))
import kfp
print('KFP version: {}'.format(kfp.__version__))
from google.cloud import aiplatform as vertex_ai
import osGOOGLE_CLOUD_PROJECT = 'fast-ai-exploration'    #@param {type:"string"}
GOOGLE_CLOUD_REGION = 'us-central1'             #@param {type:"string"}
GCS_BUCKET_NAME = 'vertex-tfx-mlops'            #@param {type:"string"}
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
    from absl import logging
    logging.error('Please set all required parameters.')PIPELINE_NAME = 'two-way-vertex-pipelines5'
# Path to various pipeline artifact.
PIPELINE_ROOT = 'gs://{}/pipeline_root/{}'.format(
    GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for users' Python module.
MODULE_ROOT = 'gs://{}/pipeline_module/{}'.format(
    GCS_BUCKET_NAME, PIPELINE_NAME)
# Paths for input data.
DATA_ROOT = 'gs://flowers-public/tfrecords-jpeg-224x224'
# This is the path where your model will be pushed for serving.
SERVING_MODEL_DIR = 'gs://{}/serving_model/{}'.format(
    GCS_BUCKET_NAME, PIPELINE_NAME)
print('PIPELINE_ROOT: {}'.format(PIPELINE_ROOT))FIREBASE_CREDENTIAL_PATH = 'gs://credential-csp/gcp-ml-172005-firebase-adminsdk-5gdtb-38c6644f1e.json'
FIREBASE_GCS_BUCKET = 'gcp-ml-172005.appspot.com'_trainer_densenet_module_file = 'flower_densenet_trainer.py'
_trainer_mobilenet_module_file = 'flower_mobilenet_trainer.py'%%writefile {_trainer_densenet_module_file}
from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
    "image": tf.io.FixedLenFeature([], tf.string),
    "class": tf.io.FixedLenFeature([], tf.int64),
    "one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_CONCRETE_INPUT = "numpy_inputs"
_INPUT_SHAPE = (224, 224, 3)
_TRAIN_BATCH_SIZE = 64
_EVAL_BATCH_SIZE = 64
_EPOCHS = 2
def _parse_fn(example):
    example = tf.io.parse_single_example(example, _IMAGE_FEATURES)
    image = tf.image.decode_jpeg(example["image"], channels=3)
    class_label = tf.cast(example["class"], tf.int32)
    return image, class_label
def _input_fn(file_pattern: List[str], batch_size: int) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
        file_pattern: List of paths or patterns of input tfrecord files.
        batch_size: representing the number of consecutive elements of returned
            dataset to combine in a single batch.
    Returns:
        A dataset that contains (features, indices) tuple where features is a
            dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    logging.info(f"Reading data from: {file_pattern}")
    tfrecord_filenames = tf.io.gfile.glob(file_pattern[0] + ".gz")
    dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
    dataset = dataset.map(_parse_fn).batch(batch_size)
    return dataset.repeat()
def _make_keras_model() -> tf.keras.Model:
    """Creates a DenseNet121-based model for classifying flowers data.
    Returns:
    A Keras Model.
    """
    inputs = keras.Input(shape=_INPUT_SHAPE)
    base_model = keras.applications.DenseNet121(
        include_top=False, input_shape=_INPUT_SHAPE, pooling="avg"
    )
    base_model.trainable = False
    x = keras.applications.densenet.preprocess_input(inputs)
    x = base_model(
        x, training=False
    )  # Ensures BatchNorm runs in inference model in this model
    outputs = keras.layers.Dense(5, activation="softmax")(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
def _preprocess(bytes_input):
    decoded = tf.io.decode_jpeg(bytes_input, channels=3)
    resized = tf.image.resize(decoded, size=(224, 224))
    return resized
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
    decoded_images = tf.map_fn(
        _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
    )
    return {_CONCRETE_INPUT: decoded_images}
def _model_exporter(model: tf.keras.Model):
    m_call = tf.function(model.call).get_concrete_function(
        [
            tf.TensorSpec(
                shape=[None, 224, 224, 3], dtype=tf.float32, name=_CONCRETE_INPUT
            )
        ]
    )
    @tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
    def serving_fn(bytes_inputs):
        labels = tf.constant(
            ["daisy", "dandelion", "roses", "sunflowers", "tulips"], dtype=tf.string
        )
        images = preprocess_fn(bytes_inputs)
        probs = m_call(**images)
        indices = tf.argmax(probs, axis=1)
        pred_source = tf.gather(params=labels, indices=indices)
        pred_confidence = tf.reduce_max(probs, axis=1)
        return {"label": pred_source, "confidence": pred_confidence}
    return serving_fn
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
        fn_args: Holds args used to train the model as name/value pairs.
    """
    train_dataset = _input_fn(fn_args.train_files, batch_size=_TRAIN_BATCH_SIZE)
    eval_dataset = _input_fn(fn_args.eval_files, batch_size=_EVAL_BATCH_SIZE)
    model = _make_keras_model()
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=_EPOCHS,
    )
    _, acc = model.evaluate(eval_dataset, steps=fn_args.eval_steps)
    logging.info(f"Validation accuracy: {round(acc * 100, 2)}%")
    # The result of the training should be saved in `fn_args.serving_model_dir`
    # directory.
    tf.saved_model.save(
        model,
        fn_args.serving_model_dir,
        signatures={"serving_default": _model_exporter(model)},
    )
%%writefile {_trainer_mobilenet_module_file}
from typing import List
from absl import logging
from tensorflow import keras
from tfx import v1 as tfx
import tensorflow as tf
_IMAGE_FEATURES = {
    "image": tf.io.FixedLenFeature([], tf.string),
    "class": tf.io.FixedLenFeature([], tf.int64),
    "one_hot_class": tf.io.VarLenFeature(tf.float32),
}
_INPUT_SHAPE = (224, 224, 3)
_TRAIN_BATCH_SIZE = 64
_EVAL_BATCH_SIZE = 64
_EPOCHS = 2
def _parse_fn(example):
    example = tf.io.parse_single_example(example, _IMAGE_FEATURES)
    image = tf.image.decode_jpeg(example["image"], channels=3)
    class_label = tf.cast(example["class"], tf.int32)
    return image, class_label
def _input_fn(file_pattern: List[str], batch_size: int) -> tf.data.Dataset:
    """Generates features and label for training.
    Args:
        file_pattern: List of paths or patterns of input tfrecord files.
        batch_size: representing the number of consecutive elements of returned
            dataset to combine in a single batch.
    Returns:
        A dataset that contains (features, indices) tuple where features is a
            dictionary of Tensors, and indices is a single Tensor of label indices.
    """
    logging.info(f"Reading data from: {file_pattern}")
    tfrecord_filenames = tf.io.gfile.glob(file_pattern[0] + ".gz")
    dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
    dataset = dataset.map(_parse_fn).batch(batch_size)
    return dataset.repeat()
def _make_keras_model() -> tf.keras.Model:
    """Creates a MobileNetV3-based model for classifying flowers data.
    Returns:
    A Keras Model.
    """
    inputs = keras.Input(shape=_INPUT_SHAPE)
    base_model = keras.applications.MobileNetV3Small(
        include_top=False, input_shape=_INPUT_SHAPE, pooling="avg"
    )
    base_model.trainable = False
    x = keras.applications.mobilenet_v3.preprocess_input(inputs)
    x = base_model(
        x, training=False
    )  # Ensures BatchNorm runs in inference model in this model
    outputs = keras.layers.Dense(5, activation="softmax")(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=[keras.metrics.SparseCategoricalAccuracy()],
    )
    model.summary(print_fn=logging.info)
    return model
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
    """Train the model based on given args.
    Args:
        fn_args: Holds args used to train the model as name/value pairs.
    """
    train_dataset = _input_fn(fn_args.train_files, batch_size=_TRAIN_BATCH_SIZE)
    eval_dataset = _input_fn(fn_args.eval_files, batch_size=_EVAL_BATCH_SIZE)
    model = _make_keras_model()
    model.fit(
        train_dataset,
        steps_per_epoch=fn_args.train_steps,
        validation_data=eval_dataset,
        validation_steps=fn_args.eval_steps,
        epochs=_EPOCHS,
    )
    _, acc = model.evaluate(eval_dataset, steps=fn_args.eval_steps)
    logging.info(f"Validation accuracy: {round(acc * 100, 2)}%")
    # Convert the model.
    converter = tf.lite.TFLiteConverter.from_keras_model(model)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    tflite_model = converter.convert()
    # Save the model.
    # The result of the training should be saved in `fn_args.serving_model_dir` directory.
    with tf.io.gfile.GFile(fn_args.serving_model_dir + "/model.tflite", "wb") as f:
        f.write(tflite_model)
_vertex_uploader_module_file = 'vertex_uploader.py'
_vertex_deployer_module_file = 'vertex_deployer.py'
_firebase_publisher_module_file = 'firebase_publisher.py'%%writefile {_vertex_uploader_module_file}
import os
import tensorflow as tf
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.types.standard_artifacts import String
from google.cloud import aiplatform as vertex_ai
from tfx import v1 as tfx
from absl import logging
@component
def VertexUploader(
    project: Parameter[str],
    region: Parameter[str],
    model_display_name: Parameter[str],
    pushed_model_location: Parameter[str],
    serving_image_uri: Parameter[str],
    uploaded_model: tfx.dsl.components.OutputArtifact[String],
):
    vertex_ai.init(project=project, location=region)
    pushed_model_dir = os.path.join(
        pushed_model_location, tf.io.gfile.listdir(pushed_model_location)[-1]
    )
    logging.info(f"Model registry location: {pushed_model_dir}")
    vertex_model = vertex_ai.Model.upload(
        display_name=model_display_name,
        artifact_uri=pushed_model_dir,
        serving_container_image_uri=serving_image_uri,
        parameters_schema_uri=None,
        instance_schema_uri=None,
        explanation_metadata=None,
        explanation_parameters=None,
    )
    uploaded_model.set_string_custom_property(
        "model_resource_name", str(vertex_model.resource_name)
    )
    logging.info(f"Model resource: {str(vertex_model.resource_name)}")
%%writefile {_vertex_deployer_module_file}
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.types.standard_artifacts import String
from google.cloud import aiplatform as vertex_ai
from tfx import v1 as tfx
from absl import logging
@component
def VertexDeployer(
    project: Parameter[str],
    region: Parameter[str],
    model_display_name: Parameter[str],
    deployed_model_display_name: Parameter[str],
):
    logging.info(f"Endpoint display: {deployed_model_display_name}")
    vertex_ai.init(project=project, location=region)
    endpoints = vertex_ai.Endpoint.list(
        filter=f"display_name={deployed_model_display_name}", order_by="update_time"
    )
    if len(endpoints) > 0:
        logging.info(f"Endpoint {deployed_model_display_name} already exists.")
        endpoint = endpoints[-1]
    else:
        endpoint = vertex_ai.Endpoint.create(deployed_model_display_name)
    model = vertex_ai.Model.list(
        filter=f"display_name={model_display_name}", order_by="update_time"
    )[-1]
    endpoint = vertex_ai.Endpoint.list(
        filter=f"display_name={deployed_model_display_name}", order_by="update_time"
    )[-1]
    deployed_model = endpoint.deploy(
        model=model,
        # Syntax from here: https://git.io/JBQDP
        traffic_split={"0": 100},
        machine_type="n1-standard-4",
        min_replica_count=1,
        max_replica_count=1,
    )
    logging.info(f"Model deployed to: {deployed_model}")
%%writefile {_firebase_publisher_module_file}
from tfx import types
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.component.experimental.annotations import Parameter
from tfx import v1 as tfx
from absl import logging
import firebase_admin
from firebase_admin import ml
from firebase_admin import storage
from firebase_admin import credentials
from google.cloud import storage as gcs_storage
@component
def FirebasePublisher(
    pushed_model: tfx.dsl.components.InputArtifact[
        tfx.types.standard_artifacts.PushedModel
    ],
    credential_uri: Parameter[str],
    firebase_dest_gcs_bucket: Parameter[str],
    model_display_name: Parameter[str],
    model_tag: Parameter[str],
) -> tfx.dsl.components.OutputDict(result=str):
    model_uri = f"{pushed_model.uri}/model.tflite"
    assert model_uri.split("://")[0] == "gs"
    assert credential_uri.split("://")[0] == "gs"
    # create gcs client instance
    gcs_client = gcs_storage.Client()
    # get credential for firebase
    credential_gcs_bucket = credential_uri.split("//")[1].split("/")[0]
    credential_blob_path = "/".join(credential_uri.split("//")[1].split("/")[1:])
    bucket = gcs_client.bucket(credential_gcs_bucket)
    blob = bucket.blob(credential_blob_path)
    blob.download_to_filename("credential.json")
    logging.info(f"download credential.json from {credential_uri} is completed")
    # get tflite model file
    tflite_gcs_bucket = model_uri.split("//")[1].split("/")[0]
    tflite_blob_path = "/".join(model_uri.split("//")[1].split("/")[1:])
    bucket = gcs_client.bucket(tflite_gcs_bucket)
    blob = bucket.blob(tflite_blob_path)
    blob.download_to_filename("model.tflite")
    logging.info(f"download model.tflite from {model_uri} is completed")
    firebase_admin.initialize_app(
        credentials.Certificate("credential.json"),
        options={"storageBucket": firebase_dest_gcs_bucket},
    )
    logging.info("firebase_admin initialize app is completed")
    model_list = ml.list_models(list_filter=f"display_name={model_display_name}")
    # update
    if len(model_list.models) > 0:
        # get the first match model
        model = model_list.models[0]
        source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
        model.model_format = ml.TFLiteFormat(model_source=source)
        updated_model = ml.update_model(model)
        ml.publish_model(updated_model.model_id)
        logging.info("model exists, so update it in FireBase ML")
        return {"result": "model updated"}
    # create
    else:
        # load a tflite file and upload it to Cloud Storage
        source = ml.TFLiteGCSModelSource.from_tflite_model_file("model.tflite")
        # create the model object
        tflite_format = ml.TFLiteFormat(model_source=source)
        model = ml.Model(
            display_name=model_display_name,
            tags=[model_tag],
            model_format=tflite_format,
        )
        # Add the model to your Firebase project and publish it
        new_model = ml.create_model(model)
        ml.publish_model(new_model.model_id)
        logging.info("model doesn exists, so create one in FireBase ML")
        return {"result": "model created"}
DATASET_DISPLAY_NAME = "flowers"
VERSION = "tfx-1-0-0"
TFX_IMAGE_URI = f"gcr.io/{GOOGLE_CLOUD_PROJECT}/{DATASET_DISPLAY_NAME}:{VERSION}"
print(f"URI of the custom image: {TFX_IMAGE_URI}")%%writefile Dockerfile
FROM gcr.io/tfx-oss-public/tfx:1.0.0
RUN mkdir -p custom_components
COPY custom_components/* ./custom_components/
RUN pip install --upgrade google-cloud-aiplatform google-cloud-storage firebase-admin# Specify training worker configurations. To minimize costs we can even specify two
# different configurations: a beefier machine for the Endpoint model and slightly less
# powerful machine for the mobile model.
TRAINING_JOB_SPEC = {
    'project': GOOGLE_CLOUD_PROJECT,
    'worker_pool_specs': [{
        'machine_spec': {
            'machine_type': 'n1-standard-4',
            'accelerator_type': 'NVIDIA_TESLA_K80',
            'accelerator_count': 1
        },
        'replica_count': 1,
        'container_spec': {
            'image_uri': 'gcr.io/tfx-oss-public/tfx:{}'.format(tfx.__version__),
        },
    }],
}from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")from custom_components.vertex_uploader import VertexUploader
from custom_components.vertex_deployer import VertexDeployer
from custom_components.firebase_publisher import FirebasePublisher
def _create_pipeline(
    pipeline_name: str,
    pipeline_root: str,
    data_root: str,
    densenet_module_file: str,
    mobilenet_module_file: str,
    serving_model_dir: str,
    firebase_crediential_path: str,
    firebase_gcs_bucket: str,
    project_id: str,
    region: str,
) -> tfx.dsl.Pipeline:
    """Creates a three component flowers pipeline with TFX."""
    # Brings data into the pipeline.
    # input_base: gs://flowers-public/tfrecords-jpeg-224x224
    example_gen = tfx.components.ImportExampleGen(input_base=data_root)
    # Uses user-provided Python function that trains a model.
    densenet_trainer = tfx.extensions.google_cloud_ai_platform.Trainer(
        module_file=densenet_module_file,
        examples=example_gen.outputs["examples"],
        train_args=tfx.proto.TrainArgs(num_steps=52),
        eval_args=tfx.proto.EvalArgs(num_steps=5),
        custom_config={
            tfx.extensions.google_cloud_ai_platform.ENABLE_UCAIP_KEY: True,
            tfx.extensions.google_cloud_ai_platform.UCAIP_REGION_KEY: region,
            tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: TRAINING_JOB_SPEC,
            "use_gpu": True,
        },
    ).with_id("densenet_trainer")
    # Pushes the model to a filesystem destination.
    pushed_model_location = os.path.join(serving_model_dir, "densenet")
    densnet_pusher = tfx.components.Pusher(
        model=densenet_trainer.outputs["model"],
        push_destination=tfx.proto.PushDestination(
            filesystem=tfx.proto.PushDestination.Filesystem(
                base_directory=pushed_model_location
            )
        ),
    ).with_id("densnet_pusher")
    # Vertex AI upload.
    model_display_name = "densenet_flowers_latest"
    uploader = VertexUploader(
        project=project_id,
        region=region,
        model_display_name=model_display_name,
        pushed_model_location=pushed_model_location,
        serving_image_uri="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-5:latest",
    ).with_id("vertex_uploader")
    uploader.add_upstream_node(densnet_pusher)
    # Create an endpoint.
    deployer = VertexDeployer(
        project=project_id,
        region=region,
        model_display_name=model_display_name,
        deployed_model_display_name=model_display_name + "_" + TIMESTAMP,
    ).with_id("vertex_deployer")
    deployer.add_upstream_node(uploader)
    # We repeat the steps for the MobileNet model too but this time we won't
    # be creating an Endpoint. We will first convert the Keras model to TFLite
    # and then push it to Firebase for better operability. 
    mobilenet_trainer = tfx.extensions.google_cloud_ai_platform.Trainer(
        module_file=mobilenet_module_file,
        examples=example_gen.outputs["examples"],
        train_args=tfx.proto.TrainArgs(num_steps=52),
        eval_args=tfx.proto.EvalArgs(num_steps=5),
        custom_config={
            tfx.extensions.google_cloud_ai_platform.ENABLE_UCAIP_KEY: True,
            tfx.extensions.google_cloud_ai_platform.UCAIP_REGION_KEY: region,
            tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: TRAINING_JOB_SPEC,
            "use_gpu": True,
        },
    ).with_id("mobilenet_trainer")
    pushed_location_mobilenet = os.path.join(serving_model_dir, "mobilenet")
    mobilenet_pusher = tfx.components.Pusher(
        model=mobilenet_trainer.outputs["model"],
        push_destination=tfx.proto.PushDestination(
            filesystem=tfx.proto.PushDestination.Filesystem(
                base_directory=pushed_location_mobilenet
            )
        ),
    ).with_id("mobilenet_pusher")
    firebase_publisher = FirebasePublisher(
        pushed_model=mobilenet_pusher.outputs["pushed_model"],
        credential_uri=firebase_crediential_path,
        firebase_dest_gcs_bucket=firebase_gcs_bucket,
        model_display_name=model_display_name,
        model_tag="mobilenet",
    ).with_id("firebase_publisher")
    # Following components will be included in the pipeline.
    components = [
        example_gen,
        densenet_trainer,
        densnet_pusher,
        uploader,
        deployer,
        mobilenet_trainer,
        mobilenet_pusher,
        firebase_publisher,
    ]
    return tfx.dsl.Pipeline(
        pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components
    )
PIPELINE_DEFINITION_FILE = PIPELINE_NAME + '_pipeline.json'
# Important: We need to pass the custom Docker image URI to the
# `KubeflowV2DagRunnerConfig` to take effect.
runner = tfx.orchestration.experimental.KubeflowV2DagRunner(
    config=tfx.orchestration.experimental.KubeflowV2DagRunnerConfig(default_image=TFX_IMAGE_URI),
    output_filename=PIPELINE_DEFINITION_FILE)
_ = runner.run(
    _create_pipeline(
        pipeline_name=PIPELINE_NAME,
        pipeline_root=PIPELINE_ROOT,
        data_root=DATA_ROOT,
        densenet_module_file=os.path.join(MODULE_ROOT, _trainer_densenet_module_file),
        mobilenet_module_file=os.path.join(MODULE_ROOT, _trainer_mobilenet_module_file),
        serving_model_dir=SERVING_MODEL_DIR,
        firebase_crediential_path=FIREBASE_CREDENTIAL_PATH,
        firebase_gcs_bucket=FIREBASE_GCS_BUCKET, 
        project_id=GOOGLE_CLOUD_PROJECT,
        region=GOOGLE_CLOUD_REGION
    )
)from kfp.v2.google import client
pipelines_client = client.AIPlatformClient(
    project_id=GOOGLE_CLOUD_PROJECT,
    region=GOOGLE_CLOUD_REGION,
)
_ = pipelines_client.create_run_from_job_spec(PIPELINE_DEFINITION_FILE, enable_caching=True)from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
import base64vertex_ai.init(project=GOOGLE_CLOUD_PROJECT, location=GOOGLE_CLOUD_REGION)model_display_name = "densenet_flowers_latest"
deployed_model_display_name = model_display_name + "_" + TIMESTAMP
endpoint = vertex_ai.Endpoint.list(
    filter=f'display_name={deployed_model_display_name}',
    order_by="update_time"
)[-1]
endpoint_id = endpoint.name
endpoint_idimage_path = tf.keras.utils.get_file("image.jpg", 
                                            "https://m.economictimes.com/thumb/msid-71307470,width-1201,height-900,resizemode-4,imgsize-1040796/roses.jpg")
bytes = tf.io.read_file(image_path)
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")pushed_model_location = os.path.join(SERVING_MODEL_DIR, "densenet")
model_path_to_deploy = os.path.join(
    pushed_model_location, tf.io.gfile.listdir(pushed_model_location)[-1]
)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
    loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)def predict_image(image, endpoint, parameters_dict):
    # The format of each instance should conform to the deployed model's prediction input schema.
    instances_list = [{serving_input: {"b64": image}}]
    instances = [json_format.ParseDict(s, Value()) for s in instances_list]
    endpoint = vertex_ai.Endpoint(endpoint)
    print(endpoint.predict(instances=instances))
predict_image(b64str, endpoint_id, None) | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	notebooks/Dataset_Prep.ipynb | 
	#@title GCS
#@markdown You should change these values as per your preferences. The copy operation can take ~5 minutes. 
BUCKET_PATH = "gs://flowers-experimental" #@param {type:"string"}
REGION = "us-central1" #@param {type:"string"}
!gsutil mb -l {REGION} {BUCKET_PATH}
!gsutil -m cp -r flower_photos {BUCKET_PATH}import random
random.seed(666)
from google.cloud import storage
from pprint import pprint
import pandas as pd
import osfrom google.colab import auth
auth.authenticate_user()gs_uris = []
storage_client = storage.Client(project="fast-ai-exploration") # Change it accordingly.
blobs = storage_client.list_blobs(BUCKET_PATH.split("/")[-1])
for blob in blobs:
    if ".txt" in blob.name.split("/")[-1]:
        continue
    gs_uri = os.path.join(BUCKET_PATH, blob.name)
    gs_uris.append(gs_uri)
pprint(gs_uris[:5])# Create splits.
random.shuffle(gs_uris)
i = int(len(gs_uris) * 0.9)
train_paths = gs_uris[:i]
test_paths = gs_uris[i:]
i = int(len(train_paths) * 0.05)
valid_paths = train_paths[:i]
train_paths = train_paths[i:]
print(len(train_paths), len(valid_paths), len(test_paths))def derive_labels(gcs_paths, split="training"):
    labels = []
    for gcs_path in gcs_paths:
        label = gcs_path.split("/")[4]
        labels.append(label)
    return labels, [split] * len(gcs_paths)# File format is referred from: https://cloud.google.com/vertex-ai/docs/datasets/prepare-image#csv
train_labels, train_use = derive_labels(train_paths)
val_labels, val_use = derive_labels(valid_paths, split="validation")
test_labels, test_use= derive_labels(test_paths, split="test")gcs_uris = []
labels = []
use = []
gcs_uris.extend(train_paths)
gcs_uris.extend(valid_paths)
gcs_uris.extend(test_paths)
labels.extend(train_labels)
labels.extend(val_labels)
labels.extend(test_labels)
use.extend(train_use)
use.extend(val_use)
use.extend(test_use)import csv
with open("flowers_vertex.csv", "w") as csvfile: 
    csvwriter = csv.writer(csvfile)
    
    for ml_use, gcs_uri, label in zip(use, gcs_uris, labels):
        row = [ml_use, gcs_uri, label]
        csvwriter.writerow(row)   | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	notebooks/Dual_Deployments_With_AutoML.ipynb | 
	import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
    USER_FLAG = "--user"# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
    # Automatically restart kernel after installs
    import IPython
    app = IPython.Application.instance()
    app.kernel.do_shutdown(True)import os
PROJECT_ID = "grounded-atrium-320207"
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
    shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
    PROJECT_ID = shell_output[0]
    print("Project ID: ", PROJECT_ID)import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
    if "google.colab" in sys.modules:
        from google.colab import auth as google_auth
        google_auth.authenticate_user()
    # If you are running this notebook locally, replace the string below with the
    # path to your service account key and run this cell to authenticate your GCP
    # account.
    elif not os.getenv("IS_TESTING"):
        %env GOOGLE_APPLICATION_CREDENTIALS ''BUCKET_NAME = "gs://vertexai_dual_example"
REGION      = "us-central1"  PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "chansung"
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
PIPELINE_ROOTimport kfp
from google.cloud import aiplatform
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
from kfp.v2 import dsl
from kfp.v2.dsl import component@component(
    packages_to_install=["google-cloud-storage", "firebase-admin", "tensorflow"]
)
def push_to_firebase(
    credential_uri: str,
    model_bucket: str,
    firebase_dest_gcs_bucket: str,
    model_display_name: str,
    model_tag: str
):
    import firebase_admin
    from firebase_admin import ml
    from firebase_admin import storage
    from firebase_admin import credentials    
    from google.cloud import storage as gcs_storage
    
    gcs_client = gcs_storage.Client()
    
    # get credential for firebase  
    credential_gcs_bucket = credential_uri.split('//')[1].split('/')[0]
    credential_blob_path = '/'.join(credential_uri.split('//')[1].split('/')[1:])
    
    bucket = gcs_client.bucket(credential_gcs_bucket)
    blob = bucket.blob(credential_blob_path)
    blob.download_to_filename('credential.json')
    
    # get the latest model    
    tflite_blobs = gcs_client.get_bucket(model_bucket).list_blobs()
    tflite_blob = sorted(tflite_blobs, reverse=True, key=lambda blob: blob.name.split('/')[-2])[0]
    tflite_blob.download_to_filename('model.tflite')            
            
    firebase_admin.initialize_app(
        credentials.Certificate('credential.json'),
        options={
            'storageBucket': firebase_dest_gcs_bucket
        }
    )
    model_list = ml.list_models(list_filter=f'display_name={model_display_name}')
    # update
    if len(model_list.models) > 0:
        # get the first match model
        model = model_list.models[0]
        
        source = ml.TFLiteGCSModelSource.from_tflite_model_file('model.tflite')
        model.model_format = ml.TFLiteFormat(model_source=source)
        
        updated_model = ml.update_model(model)
        ml.publish_model(updated_model.model_id)
    # create
    else:    
        # Load a tflite file and upload it to Cloud Storage
        source = ml.TFLiteGCSModelSource.from_tflite_model_file('model.tflite')
        # Create the model object
        tflite_format = ml.TFLiteFormat(model_source=source)
        model = ml.Model(
            display_name=model_display_name,  # This is the name you use from your app to load the model.
            tags=[model_tag],             # Optional tags for easier management.
            model_format=tflite_format)
        # Add the model to your Firebase project and publish it
        new_model = ml.create_model(model)
        ml.publish_model(new_model.model_id)@kfp.dsl.pipeline(name="cloud-mobile-dual-deployment")
def pipeline(project: str = PROJECT_ID):
    ds_op = gcc_aip.ImageDatasetCreateOp(
       project=project,
       display_name="flowers-dataset",
       gcs_source="gs://dataset-meta-gde-csp/flowers_vertex.csv",
       import_schema_uri=aiplatform.schema.dataset.ioformat.image.multi_label_classification,
    )
    configs = [
       {
          "type": "CLOUD",
          "model_type": "CLOUD",
          "display_name": "train-cloud-model",
          "model_display_name": "cloud-model",
          "budget_milli_node_hours": 8000,
       },
       {
          "type": "MOBILE",
          "model_type": "MOBILE_TF_VERSATILE_1",
          "display_name": "train-mobile-model",
          "model_display_name": "mobile-model",
          "budget_milli_node_hours": 1000,
       }
    ]
    with kfp.dsl.ParallelFor(configs) as config:
        training_job_run_op = gcc_aip.AutoMLImageTrainingJobRunOp(
            project=project,
            display_name=config.display_name,
            prediction_type="classification",
            multi_label=True,
            model_type=config.model_type,
            base_model=None,
            dataset=ds_op.outputs["dataset"],
            model_display_name=config.model_display_name,
            budget_milli_node_hours=config.budget_milli_node_hours,
        )
        training_job_run_op.after(ds_op)
        with kfp.dsl.Condition(config.type=='CLOUD'):
            endpoint_op = gcc_aip.ModelDeployOp(
                project=project,
                model=training_job_run_op.outputs["model"]
            )
            endpoint_op.after(training_job_run_op)
            
        with kfp.dsl.Condition(config.type=='MOBILE'):
            export_op = gcc_aip.ModelExportOp( 
                project=project,
                model=training_job_run_op.outputs["model"],
                # tflite, edgetpu-tflite, tf-saved-model, tf-js, core-ml, custom-trained
                export_format_id="tflite",
                artifact_destination="gs://output-model-gde-csp/flower-models/"
            )
            export_op.after(training_job_run_op)
            credential_uri="gs://firebase-ml-bucket-gde-csp/grounded-atrium-320207-firebase-adminsdk-5n9sn-20dbda9947.json"
            model_bucket="output-model-gde-csp"
            firebase_bucket="grounded-atrium-320207.appspot.com"
            
            firebase_op = push_to_firebase(
                ins=export_op.outputs['exported_dataset'],
                credential_uri=credential_uri,
                model_bucket=model_bucket,
                firebase_dest_gcs_bucket=firebase_bucket,
                model_display_name="custom_model",
                model_tag="from_dual_deployment"
            )
            firebase_op.after(export_op)
                    from kfp.v2 import compiler
compiler.Compiler().compile(
    pipeline_func=pipeline, package_path="cloud-mobile-dual-deployment.json"
)from kfp.v2.google.client import AIPlatformClient
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)response = api_client.create_run_from_job_spec(
    "cloud-mobile-dual-deployment.json",
    pipeline_root=PIPELINE_ROOT,
    parameter_values={"project": PROJECT_ID},
) | 
| 
	sayakpaul/Dual-Deployments-on-Vertex-AI | 
	notebooks/Model_Tests.ipynb | 
	from io import BytesIO
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import requests
import base64
from google.cloud.aiplatform.gapic.schema import predict
from google.cloud import aiplatform
import tensorflow as tfdef preprocess_image(image):
    """Preprocesses an image."""
    image = np.array(image)
    image = tf.image.resize(image, (224, 224))
    image = tf.cast(image, tf.uint8)
    return image[None, ...]
def load_image_from_url(url):
    """Loads an image from a URL. Please provide a URL of a valid RGB image."""
    response = requests.get(url)
    image = Image.open(BytesIO(response.content))
    image = preprocess_image(image)
    return image
def tflite_inference(tflite_model_path, image):
    """Runs inference with a TFLite model."""
    # Load the TFLite model from its patth and allocate tensors into memory.
    interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
    interpreter.allocate_tensors()
    # Get the indices of input and output of the model.
    input_details = interpreter.get_input_details()[0]
    input_index = input_details["index"]
    output_index = interpreter.get_output_details()[0]["index"]
    # Scale if needed. 
    if input_details["dtype"] == np.uint8:
        input_scale, input_zero_point = input_details["quantization"]
        image = tf.cast(image, tf.float32)
        image = image / input_scale + input_zero_point
        image = tf.cast(image, tf.uint8)
    # Run inference.
    interpreter.set_tensor(input_index, image)
    interpreter.invoke()
    # Post-processing: remove batch dimension and find the digit with highest
    # probability.
    probability = interpreter.get_tensor(output_index)
    flower_id = np.argmax(probability[0])
    return flower_idimage = load_image_from_url("https://m.economictimes.com/thumb/msid-71307470,width-1201,height-900,resizemode-4,imgsize-1040796/roses.jpg")
plt.imshow(image[0])
plt.axis("off")
plt.show()tflite_model_path = tf.keras.utils.get_file("model.tflite", 
                                            "https://storage.googleapis.com/output-model-gde-csp/flower-models/model-8119506343032782848/tflite/2021-07-28T15%3A48%3A15.623623Z/model.tflite")CLASSES = ["daisy", "dandelion", "roses", "sunflowers", "tulips"]
CLASSES[tflite_inference(tflite_model_path, image)]from google.colab import auth
auth.authenticate_user()# Reference:
# https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-automl
def predict_image_classification_sample(
    project: str,
    endpoint_id: str,
    filename: str,
    location: str = "us-central1",
    api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
    # The AI Platform services require regional API endpoints.
    client_options = {"api_endpoint": api_endpoint}
    # Initialize client that will be used to create and send requests.
    # This client only needs to be created once, and can be reused for multiple requests.
    client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
    with open(filename, "rb") as f:
        file_content = f.read()
    # The format of each instance should conform to the deployed model's prediction input schema.
    encoded_content = base64.b64encode(file_content).decode("utf-8")
    instance = predict.instance.ImageClassificationPredictionInstance(
        content=encoded_content,
    ).to_value()
    instances = [instance]
    # See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters.
    parameters = predict.params.ImageClassificationPredictionParams(
        confidence_threshold=0.5, max_predictions=5,
    ).to_value()
    endpoint = client.endpoint_path(
        project=project, location=location, endpoint=endpoint_id
    )
    response = client.predict(
        endpoint=endpoint, instances=instances, parameters=parameters
    )
    print("response")
    print(" deployed_model_id:", response.deployed_model_id)
    # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions.
    predictions = response.predictions
    for prediction in predictions:
        print(" prediction:", dict(prediction))
image_path = tf.keras.utils.get_file("image.jpg", 
                                            "https://m.economictimes.com/thumb/msid-71307470,width-1201,height-900,resizemode-4,imgsize-1040796/roses.jpg")
predict_image_classification_sample(
    project="881543627888",
    endpoint_id="2892911849701900288",
    location="us-central1",
    filename=image_path
) | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
