Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

/ python / checkpoint.py

## @package checkpoint
# Module caffe2.python.checkpoint





import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import (
    final_output,
    Node,
    Task,
    TaskGroup,
    TaskOutput,
    WorkspaceType,
)

logger = logging.getLogger(__name__)



class Job(context.Managed):
    """
    A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
    `exit_group` which will be run by a JobRunner.

    The `init_group` will be run only once at startup. Its role is to
    initialize globally persistent blobs such as model weights, accumulators
    and data file lists.

    The `epoch_group` will be run in a loop after init_group. The loop will
    exit when any of the stop signals added with `add_stop_condition` is True
    at the end of an epoch.

    The download_group will be run only once, after all the executions of
    epoch_group finish. Its role is to collect the distribute scattered
    parameters back after training.

    The `exit_group` will be run only once at the very end of the job, the
    role of this group is to save the results of training in the end of the job.

    Jobs are context-driven, so that Tasks can be added to the active Job
    without having to explicitly pass the job object around.

    Example of usage:

    def build_reader(partitions):
        with Job.current().init_group:
            reader = HiveReader(init_reader, ..., partitions)
            Task(step=init_reader)
        with Job.current().epoch_group:
            limited_reader = ReaderWithLimit(reader, num_iter=10000)
            data_queue = pipe(limited_reader, num_threads=8)
            Job.current().add_stop_condition(limited_reader.data_finished())
        return data_queue

    def build_hogwild_trainer(reader, model):
        with Job.current().init_group:
            Task(step=model.param_init_net)
        with Job.current().epoch_group:
            pipe(reader, processor=model, num_threads=8)
        with Job.current().exit_group:
            Task(step=model.save_model_net)

    with Job() as job:
        reader = build_reader(partitions)
        model = build_model(params)
        build_hogwild_trainer(reader, model)
    """
    def __init__(self,
                 init_group=None, epoch_group=None,
                 download_group=None, exit_group=None,
                 stop_conditions=None, nodes_to_checkpoint=None):
        self.init_group = init_group or TaskGroup(
            workspace_type=WorkspaceType.GLOBAL)
        self.epoch_group = epoch_group or TaskGroup()
        self.download_group = download_group or TaskGroup()
        self.exit_group = exit_group or TaskGroup()
        self.stop_conditions = stop_conditions or []
        self._nodes_to_checkpoint = nodes_to_checkpoint

    def nodes_to_checkpoint(self):
        if self._nodes_to_checkpoint:
            return self._nodes_to_checkpoint
        else:
            return self.init_group.used_nodes()

    def compile(self, session_class):
        self._nodes_to_checkpoint = self.nodes_to_checkpoint()
        self.init_group = session_class.compile(self.init_group)
        self.epoch_group = session_class.compile(self.epoch_group)
        self.download_group = session_class.compile(self.download_group)
        self.exit_group = session_class.compile(self.exit_group)

    def __enter__(self):
        super(Job, self).__enter__()
        self.epoch_group.__enter__()
        return self

    def __exit__(self, *args):
        self.epoch_group.__exit__()
        super(Job, self).__exit__(*args)

    def add_stop_condition(self, output):
        if isinstance(output, core.BlobReference):
            t = Task(outputs=[output], group=self.epoch_group)
            output = t.outputs()[0]
        assert isinstance(output, TaskOutput)
        self.stop_conditions.append(output)


def get_ckpt_filename(node_name, epoch):
    """Returns the checkpoint filename.

    Args:
        node_name: A string. The name of the node.
        epoch: An integer. The checkpoint epoch.

    Returns:
        ckpt_filename: A string. The filename of the checkpoint.
    """
    return node_name + '.' + str(epoch)


def db_name(epoch, node_name, db_prefix, path_prefix=None):
    """Returns the full db name where checkpoint files are saved.

    Args:
        epoch: An integer. The checkpoint epoch.
        node_name: A string. The name of the node.
        db_prefix: A string. The prefix used to construct full db name.
        path_prefix: A string. Optional param used to construct db name or path
            where checkpoint files are are stored.
    Returns:
        db_name: A string. The absolute path of full_db_name where checkpoint
            files are saved
    """
    if path_prefix:
        db_name = path_prefix + get_ckpt_filename(node_name, epoch)
    else:
        ckpt_filename = get_ckpt_filename(node_name, epoch)
        db_name = os.path.join(db_prefix, ckpt_filename)
    return db_name


class CheckpointManager(object):
    """
    Controls saving and loading of workspaces on every epoch boundary of a job.
    If a CheckpointManager instance is passed to JobRunner, then JobRunner will
    call `init`, `read` and `save` at different moments in between epoch runs.

    Args:
        db_prefix: The prefix used to construct full db name. Since `absolute_path`
            is set to True, this will be used as db_name in SaveOp.
        node_name: Name of the node where this checkpoint_manager is used.
        db_type: Type of database to use for storing checkpoint.
        metadata_handler: An optional object capable of reading/writing
            checkpoint info in storage of choice.
    """

    BLOB_NAMES = "blob_names"

    def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
        self._db_prefix = db_prefix
        self._node_name = node_name
        self._db_type = db_type
        self._metadata_handler = metadata_handler
        # make sure these blobs are the first in the checkpoint file.
        self._net = core.Net('!!checkpoint_mngr')
        self._blob_names = self._net.AddExternalInput(self.BLOB_NAMES)
        self._names_output = None
        self._path_prefix = None
        self._path_type = None
        self._current_db_name = None
        self._current_checkpoint_duration = None

    """
    Initialize the checkpoint manager. Determines all blobs that need to be saved
    or loads from a checkpoint.

    Args:
        nodes: An array of nodes where this checkpoint manager is running. Should
            only contain a single node.
        retrieve_from_epoch: Set to a number to load blobs from this epoch.
        path_prefix: Used to construct db name or path where checkpoint files are
            stored.
        path_type: Indicate the type of path where checkpoint files are stored.
    """
    def init(
        self,
        nodes=None,
        retrieve_from_epoch=None,
        path_prefix=None,
        path_type=None
    ):
        """
        Build a Task that will be run once after the job's `init_group` is run.
        This task will determine which blobs need to be checkpointed.
        If retrieve_from_epoch is not None, then the checkpoint metadata is
        retrieved from a previously saved checkpoint.
        """
        assert nodes is None or len(nodes) == 1, (
            'CheckpointManager only supports single node.')

        with Task(outputs=[self._blob_names]) as task:
            if retrieve_from_epoch is None:
                ops.GetAllBlobNames(
                    [],
                    self._blob_names,
                    include_shared=False)
            else:
                full_db_name = db_name(retrieve_from_epoch,
                                        self._node_name, self._db_prefix, path_prefix)
                db_type = path_type or self._db_type
                logger.info("Initializing checkpoints from = %s"
                            % full_db_name)
                ops.Load(
                    [], self._blob_names,
                    db=full_db_name,
                    db_type=db_type,
                    absolute_path=True,
                    keep_device=True,
                )
        self._names_output = task.outputs()[0]
        return task

    def blob_list(self):
        assert self._names_output
        return self._names_output.fetch().tolist()

    def _timed_task(self, cp_op_name, add_op):
        """
        Build a Task that will measure the time span of checkpoint operations,
        once operation is done, time can be read from _current_checkpoint_duration.

        Args:
            cp_op_name: A string name of the checkpoint operation.
            add_op: A functor to add the checkpoint operation.

        Returns:
            A task with timer.
        """
        with Task(name=cp_op_name) as task:
            with ops.task_init():
                timer = ops.TimerBegin([], counter_name=self._node_name)
            add_op()
            with ops.task_exit():
                time_span_blob = ops.TimerGetAndEnd(timer)
            self._current_checkpoint_duration = final_output(time_span_blob)
        return task

    def collect_checkpoint_stats(self, stats):
        """
        Add one checkpoint stats into the stats.

        Args:
            stats: A dict of checkpoint stats that will be reported.
        """
        if self._current_db_name and self._current_checkpoint_duration:
            stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
        else:
            logger.info(
                "Failed to collect checkpoint stats: {}".format(
                    self._current_db_name
                )
            )

    def load(self, epoch, path_prefix=None, path_type=None):
        """
        Build a Task that will be run by JobRunner when the job is to be
        resumed from a given epoch. This task will run a Load op that will
        load and deserialize all relevant blobs from a persistent storage.
        """
        self._current_db_name = db_name(
            epoch, self._node_name, self._db_prefix, path_prefix
        )
        db_type = path_type or self._db_type
        logger.info("Loading checkpoints from = %s" % self._current_db_name)

        def add_op():
            ops.Load(
                [],
                self.blob_list(),
                db=self._current_db_name,
                db_type=db_type,
                absolute_path=True,
                keep_device=True,
            )

        return self._timed_task('checkpoint_load', add_op)

    def load_blobs_from_checkpoint(self, blob_names, epoch):
        """
        Builds a Task that loads only the necessary blobs from a checkpoint of
        the given epoch. The necessary blobs are given in the blob_names
        argument.

        Args:
            blob_names: A list of strings. Each string is the name of a
                blob.
            epoch: The checkpoint epoch to load from.

        Returns:
            A Task which loads the specified blobs from the checkpoint of the
            given epoch.
        """
        self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
        logger.info('Load from %s' % self._current_db_name)

        def add_op():
            ops.Load(
                [],
                blob_names,
                db=self._current_db_name,
                db_type=self._db_type,
                absolute_path=True,
                allow_incomplete=True)

        return self._timed_task('checkpoint_partial_load', add_op)

    def check_db_exists(self, epoch):
        logger.info('Check existence of %s' %
                    db_name(epoch, self._node_name, self._db_prefix))
        with Task() as task:
            existence = ops.Const(False)
            ops.DBExists(
                [],
                [existence],
                db_name=db_name(epoch, self._node_name, self._db_prefix),
                db_type=self._db_type,
                absolute_path=True)
            task.add_output(existence)
        return task

    def report_checkpoint_stats(self, action_name):
        """
        Report checkpoint operation stats for current node.

        Args:
            action_name: A string of the name of checkpoint operation.
        """
        all_stats = {}
Loading ...