# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
from paddle.static import (
    default_main_program,
    default_startup_program,
    program_guard,
)

from .common import OP_ROLE_KEY, CollectiveHelper, OpRole
from .meta_optimizer_base import MetaOptimizerBase

__all__ = []


class LocalSGDOptimizer(MetaOptimizerBase):
    def __init__(self, optimizer):
        super().__init__(optimizer)
        self.inner_opt = optimizer
        self.meta_optimizers_white_list = ['AMPOptimizer']
        self.meta_optimizers_black_list = [
            "AdaptiveLocalSGDOptimizer",
        ]
        self.snapshot_key = '@SNAPSHOT'

    def _can_apply(self):
        if not self.role_maker._is_collective:
            return False

        if not self.user_defined_strategy.localsgd:
            return False

        if self.role_maker._worker_num() <= 1:
            return False

        return isinstance(
            self.inner_opt,
            (
                paddle.optimizer.momentum.Momentum,
                paddle.optimizer.sgd.SGD,
            ),
        )

    def _disable_strategy(self, dist_strategy):
        dist_strategy.localsgd = False
        dist_strategy.localsgd_configs = {}

    def _enable_strategy(self, dist_strategy, context):
        dist_strategy.localsgd = True
        dist_strategy.localsgd_configs = {"k_steps": 1, "begin_step": 1}

    def snapshot_name(self, param_name):
        return param_name + self.snapshot_key

    def create_snapshot_vars(self, program):
        block = program.global_block()

        non_dist_params = []
        for param in block.iter_parameters():
            if not param.is_distributed:
                non_dist_params.append(param)

        p2s = []
        for param in non_dist_params:
            snapshot = block.create_var(
                name=self.snapshot_name(param.name),
                shape=param.shape,
                persistable=True,
                stop_gradient=True,
                dtype=param.dtype,
            )
            p2s.append([param, snapshot])
        return p2s

    def init_snapshot_vars(self, startup_program, param2snapshot):
        with program_guard(startup_program):
            for param, snapshot in param2snapshot:
                paddle.assign(param, snapshot)

    def minimize_impl(
        self, loss, startup_program=None, parameter_list=None, no_grad_set=None
    ):
        minimized = self.inner_opt.minimize(
            loss, startup_program=startup_program
        )

        k_steps_value = self.user_defined_strategy.localsgd_configs['k_steps']
        begin_step_value = self.user_defined_strategy.localsgd_configs[
            'begin_step'
        ]

        if startup_program is None:
            startup_program = default_startup_program()
        main_block = loss.block

        self.nrings = 2
        collective_helper = CollectiveHelper(self.role_maker, self.nrings)
        collective_helper.update_startup_program(startup_program)
        p2s = self.create_snapshot_vars(startup_program)
        self.init_snapshot_vars(startup_program, p2s)

        p2s = self.create_snapshot_vars(main_block.program)
        with program_guard(main_block.program, startup_program):
            step = paddle.optimizer.lr.autoincreased_step_counter(begin=1)
            k_steps = paddle.static.create_global_var(
                name="k_steps",
                shape=[1],
                value=k_steps_value,
                dtype='int64',
                persistable=True,
            )

            begin_step = paddle.static.create_global_var(
                name="begin_step",
                shape=[1],
                value=begin_step_value,
                dtype='int64',
                persistable=True,
            )

            last_step = paddle.static.create_global_var(
                name="last_step",
                shape=[1],
                value=begin_step_value,
                dtype='int64',
                persistable=True,
            )

            def communicate():
                sub_block = default_main_program().current_block()
                ring_id = -1
                for param, snapshot in p2s:
                    sub_block.append_op(
                        type='elementwise_sub',
                        inputs={'X': [snapshot], 'Y': [param]},
                        outputs={'Out': [param]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    sub_block.append_op(
                        type='c_sync_calc_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    ring_id = (ring_id + 1) % self.nrings
                    sub_block.append_op(
                        type='all_reduce',
                        inputs={'x': [param]},
                        outputs={'out': [param]},
                        attrs={
                            'ring_id': ring_id,
                            'reduce_type': paddle.distributed.ReduceOp.SUM,
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )

                for ring_id in range(self.nrings):
                    sub_block.append_op(
                        type='c_sync_comm_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={
                            'ring_id': ring_id,
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )

                for param, snapshot in p2s:
                    sub_block.append_op(
                        type='scale',
                        inputs={'X': [param]},
                        outputs={'Out': [param]},
                        attrs={
                            'scale': 1.0 / self.role_maker._worker_num(),
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )
                    sub_block.append_op(
                        type='elementwise_sub',
                        inputs={'X': [snapshot], 'Y': [param]},
                        outputs={'Out': [param]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    sub_block.append_op(
                        type='assign',
                        inputs={'X': [param]},
                        outputs={'Out': [snapshot]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                paddle.assign(step, last_step)

            def begin_localsgd():
                paddle.static.nn.cond(step - last_step == k_steps, communicate)

            paddle.static.nn.cond(
                step > begin_step, begin_localsgd, communicate
            )
        return minimized


class AdaptiveLocalSGDOptimizer(MetaOptimizerBase):
    def __init__(self, optimizer):
        super().__init__(optimizer)
        self.inner_opt = optimizer
        self.meta_optimizers_white_list = ['AMPOptimizer']
        self.meta_optimizers_black_list = [
            "LocalSGDOptimizer",
        ]
        self.snapshot_key = '@SNAPSHOT'

    def _can_apply(self):
        if not self.role_maker._is_collective:
            return False

        if not self.user_defined_strategy.adaptive_localsgd:
            return False

        if self.role_maker._worker_num() <= 1:
            return False

        return isinstance(
            self.inner_opt,
            (
                paddle.optimizer.Momentum,
                paddle.optimizer.sgd.SGD,
            ),
        )

    def _disable_strategy(self, dist_strategy):
        dist_strategy.adaptive_localsgd = False
        dist_strategy.adaptive_localsgd_configs = {}

    def _enable_strategy(self, dist_strategy, context):
        dist_strategy.adaptive_localsgd = True
        dist_strategy.adaptive_localsgd_configs = {
            "init_k_steps": 1,
            "begin_step": 1,
        }

    def snapshot_name(self, param_name):
        return param_name + self.snapshot_key

    def create_snapshot_vars(self, program):
        block = program.global_block()

        non_dist_params = []
        for param in block.iter_parameters():
            if not param.is_distributed:
                non_dist_params.append(param)

        p2s = []
        for param in non_dist_params:
            snapshot = block.create_var(
                name=self.snapshot_name(param.name),
                shape=param.shape,
                persistable=True,
                stop_gradient=True,
                dtype=param.dtype,
            )
            p2s.append([param, snapshot])
        return p2s

    def init_snapshot_vars(self, startup_program, param2snapshot):
        with program_guard(startup_program):
            for param, snapshot in param2snapshot:
                paddle.assign(param, snapshot)

    def _generate_avg_loss(self, program_block, loss, avg_loss):
        program_block.append_op(
            type='all_reduce',
            inputs={'x': [loss]},
            outputs={'out': [avg_loss]},
            attrs={
                'ring_id': 0,
                OP_ROLE_KEY: OpRole.Optimize,
                'reduce_type': paddle.distributed.ReduceOp.SUM,
            },
        )
        program_block.append_op(
            type='c_sync_calc_stream',
            inputs={'X': [avg_loss]},
            outputs={'Out': [avg_loss]},
            attrs={OP_ROLE_KEY: OpRole.Optimize},
        )

        program_block.append_op(
            type='scale',
            inputs={'X': [avg_loss]},
            outputs={'Out': [avg_loss]},
            attrs={
                'scale': 1.0 / self.role_maker._worker_num(),
                OP_ROLE_KEY: OpRole.Optimize,
            },
        )

    def minimize_impl(
        self, loss, startup_program=None, parameter_list=None, no_grad_set=None
    ):
        minimized = self.inner_opt.minimize(
            loss, startup_program=startup_program
        )

        init_k_steps = self.user_defined_strategy.adaptive_localsgd_configs[
            'init_k_steps'
        ]
        begin_step_value = self.user_defined_strategy.adaptive_localsgd_configs[
            'begin_step'
        ]

        if startup_program is None:
            startup_program = default_startup_program()
        main_block = loss.block

        self.nrings = 2
        collective_helper = CollectiveHelper(self.role_maker, self.nrings)
        collective_helper.update_startup_program(startup_program)
        p2s = self.create_snapshot_vars(startup_program)
        self.init_snapshot_vars(startup_program, p2s)

        p2s = self.create_snapshot_vars(main_block.program)
        with program_guard(main_block.program, startup_program):
            step = paddle.optimizer.lr.autoincreased_step_counter(begin=1)

            k_steps = paddle.static.create_global_var(
                name="k_steps",
                shape=[1],
                value=int(init_k_steps),
                dtype='int64',
                persistable=True,
            )

            begin_step = paddle.static.create_global_var(
                name="begin_step",
                shape=[1],
                value=int(begin_step_value),
                dtype='int64',
                persistable=True,
            )

            last_step = paddle.static.create_global_var(
                name="last_step",
                shape=[1],
                value=0,
                dtype='int64',
                persistable=True,
            )

            avg_loss = paddle.static.create_global_var(
                name="avg_loss",
                shape=[1],
                value=float(0),
                dtype=loss.dtype,
                persistable=True,
            )

            lr_0 = paddle.static.create_global_var(
                name="lr_0",
                shape=[1],
                value=float(0),
                dtype='float32',
                persistable=True,
            )

            loss_0 = paddle.static.create_global_var(
                name="loss_0",
                shape=[1],
                value=float(0),
                dtype='float32',
                persistable=True,
            )

            global_lr = self.inner_opt._global_learning_rate()

            def initialize():
                self._generate_avg_loss(main_block, loss, avg_loss)
                paddle.assign(avg_loss, loss_0)
                paddle.assign(global_lr, lr_0)

            paddle.static.nn.cond(step == 1, initialize)

            def communicate():
                sub_block = default_main_program().current_block()
                ring_id = -1
                for param, snapshot in p2s:
                    sub_block.append_op(
                        type='elementwise_sub',
                        inputs={'X': [snapshot], 'Y': [param]},
                        outputs={'Out': [param]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    sub_block.append_op(
                        type='c_sync_calc_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    ring_id = (ring_id + 1) % self.nrings
                    sub_block.append_op(
                        type='all_reduce',
                        inputs={'x': [param]},
                        outputs={'out': [param]},
                        attrs={
                            'ring_id': ring_id,
                            'reduce_type': paddle.distributed.ReduceOp.SUM,
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )

                for ring_id in range(self.nrings):
                    sub_block.append_op(
                        type='c_sync_comm_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={
                            'ring_id': ring_id,
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )

                for param, snapshot in p2s:
                    sub_block.append_op(
                        type='scale',
                        inputs={'X': [param]},
                        outputs={'Out': [param]},
                        attrs={
                            'scale': 1.0 / self.role_maker._worker_num(),
                            OP_ROLE_KEY: OpRole.Optimize,
                        },
                    )
                    sub_block.append_op(
                        type='elementwise_sub',
                        inputs={'X': [snapshot], 'Y': [param]},
                        outputs={'Out': [param]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                    sub_block.append_op(
                        type='assign',
                        inputs={'X': [param]},
                        outputs={'Out': [snapshot]},
                        attrs={OP_ROLE_KEY: OpRole.Optimize},
                    )
                paddle.assign(step, last_step)

            def communicate_avg_loss():
                communicate()
                self._generate_avg_loss(main_block, loss, avg_loss)

                next_local_steps = paddle.cast(
                    paddle.ceil(
                        paddle.sqrt(
                            lr_0
                            * avg_loss
                            / (global_lr * loss_0)
                            * float(init_k_steps)
                        )
                    ),
                    dtype='int64',
                )
                max_local_steps = paddle.full(
                    shape=[1], dtype='int64', fill_value=16
                )
                min_local_steps = paddle.full(
                    shape=[1], dtype='int64', fill_value=1
                )
                next_local_steps = paddle.minimum(
                    next_local_steps, max_local_steps
                )
                next_local_steps = paddle.maximum(
                    next_local_steps, min_local_steps
                )
                paddle.assign(next_local_steps, k_steps)

            def begin_localsgd():
                paddle.static.nn.cond(
                    step - last_step == k_steps, communicate_avg_loss
                )

            paddle.static.nn.cond(
                step > begin_step, begin_localsgd, communicate
            )

        return minimized
