/******************************************************************************* * Copyright 2016-2018 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #ifndef CPU_JIT_AVX2_1x1_CONVOLUTION_HPP #define CPU_JIT_AVX2_1x1_CONVOLUTION_HPP #include "c_types_map.hpp" #include "memory_tracking.hpp" #include "mkldnn_thread.hpp" #include "utils.hpp" #include "cpu_convolution_pd.hpp" #include "cpu_primitive.hpp" #include "cpu_reducer.hpp" #include "jit_avx2_1x1_conv_kernel_f32.hpp" #include "jit_uni_1x1_conv_utils.hpp" namespace mkldnn { namespace impl { namespace cpu { struct jit_avx2_1x1_convolution_fwd_t: public cpu_primitive_t { // TODO: (Roma) Code duplication duplication! Remove with templates // (maybe...)! struct pd_t: public cpu_convolution_fwd_pd_t { pd_t(engine_t *engine, const convolution_desc_t *adesc, const primitive_attr_t *attr, const typename pd_t::base_class *hint_fwd_pd) : cpu_convolution_fwd_pd_t(engine, adesc, attr, hint_fwd_pd) , jcp_(), rtus_() {} DECLARE_COMMON_PD_T( JIT_IMPL_NAME_HELPER("jit_1x1:", avx2, ""), jit_avx2_1x1_convolution_fwd_t); status_t init() { bool ok = true && is_fwd() && set_default_alg_kind(alg_kind::convolution_direct) && expect_data_types(data_type::f32, data_type::f32, data_type::f32, data_type::f32, data_type::f32) && !has_zero_dim_memory() && set_default_formats(); if (!ok) return status::unimplemented; const convolution_desc_t *conv_d = desc(); const memory_desc_t *src_d = src_md(); rtus_prepare(this, conv_d, src_d, dst_md()); status_t status = jit_avx2_1x1_conv_kernel_f32::init_conf(jcp_, *conv_d, *src_d, *weights_md(), *dst_md(), *attr()); if (status != status::success) return status; auto scratchpad = scratchpad_registry().registrar(); jit_avx2_1x1_conv_kernel_f32::init_scratchpad(scratchpad, jcp_); rtus_prepare_space_info(this, scratchpad); return status::success; } jit_1x1_conv_conf_t jcp_; reduce_to_unit_stride_t rtus_; protected: bool set_default_formats() { using namespace format_tag; auto dat_tag = utils::pick(ndims() - 3, nCw8c, nChw8c, nCdhw8c); auto wei_tag = with_groups() ? utils::pick(ndims() - 3, gOIw8i8o, gOIhw8i8o) : utils::pick(ndims() - 3, OIw8i8o, OIhw8i8o); return set_default_formats_common(dat_tag, wei_tag, dat_tag); } }; template friend void init_rtus_driver(conv_t *self); jit_avx2_1x1_convolution_fwd_t(const pd_t *apd) : cpu_primitive_t(apd) , kernel_(nullptr), rtus_driver_(nullptr) { kernel_ = new jit_avx2_1x1_conv_kernel_f32(pd()->jcp_, *pd()->attr()); init_rtus_driver(this); } ~jit_avx2_1x1_convolution_fwd_t() { delete kernel_; delete rtus_driver_; } typedef typename prec_traits::type data_t; virtual status_t execute(const exec_ctx_t &ctx) const override { execute_forward(ctx); return status::success; } private: void execute_forward(const exec_ctx_t &ctx) const; const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); } jit_avx2_1x1_conv_kernel_f32 *kernel_; rtus_driver_t *rtus_driver_; }; struct jit_avx2_1x1_convolution_bwd_data_t: public cpu_primitive_t { struct pd_t: public cpu_convolution_bwd_data_pd_t { pd_t(engine_t *engine, const convolution_desc_t *adesc, const primitive_attr_t *attr, const convolution_fwd_pd_t *hint_fwd_pd) : cpu_convolution_bwd_data_pd_t(engine, adesc, attr, hint_fwd_pd) , jcp_(), rtus_() {} DECLARE_COMMON_PD_T( JIT_IMPL_NAME_HELPER("jit_1x1:", avx2, ""), jit_avx2_1x1_convolution_bwd_data_t); status_t init() { bool ok = true && desc()->prop_kind == prop_kind::backward_data && set_default_alg_kind(alg_kind::convolution_direct) && expect_data_types(data_type::f32, data_type::f32, data_type::undef, data_type::f32, data_type::f32) && !has_zero_dim_memory() && set_default_formats(); if (!ok) return status::unimplemented; const convolution_desc_t *conv_d = desc(); const memory_desc_t *diff_src_d = diff_src_md(); rtus_prepare(this, conv_d, diff_src_d, diff_dst_md()); status_t status = jit_avx2_1x1_conv_kernel_f32::init_conf(jcp_, *conv_d, *diff_src_d, *weights_md(), *diff_dst_md(), *attr()); if (status != status::success) return status; auto scratchpad = scratchpad_registry().registrar(); jit_avx2_1x1_conv_kernel_f32::init_scratchpad(scratchpad, jcp_); rtus_prepare_space_info(this, scratchpad); return status::success; } jit_1x1_conv_conf_t jcp_; reduce_to_unit_stride_t rtus_; protected: bool set_default_formats() { using namespace format_tag; auto dat_tag = utils::pick(ndims() - 3, nCw8c, nChw8c, nCdhw8c); auto wei_tag = with_groups() ? utils::pick(ndims() - 3, gOIw8o8i, gOIhw8o8i) : utils::pick(ndims() - 3, OIw8o8i, OIhw8o8i); return set_default_formats_common(dat_tag, wei_tag, dat_tag); } }; template friend void init_rtus_driver(conv_t *self); jit_avx2_1x1_convolution_bwd_data_t(const pd_t *apd) : cpu_primitive_t(apd) , kernel_(nullptr) , rtus_driver_(nullptr) { kernel_ = new jit_avx2_1x1_conv_kernel_f32(pd()->jcp_, *pd()->attr()); init_rtus_driver(this); } ~jit_avx2_1x1_convolution_bwd_data_t() { delete kernel_; delete rtus_driver_; } typedef typename prec_traits::type data_t; virtual status_t execute(const exec_ctx_t &ctx) const override { execute_backward_data(ctx); return status::success; } private: void execute_backward_data(const exec_ctx_t &ctx) const; const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); } jit_avx2_1x1_conv_kernel_f32 *kernel_; rtus_driver_t *rtus_driver_; }; struct jit_avx2_1x1_convolution_bwd_weights_t: public cpu_primitive_t { struct pd_t: public cpu_convolution_bwd_weights_pd_t { pd_t(engine_t *engine, const convolution_desc_t *adesc, const primitive_attr_t *attr, const convolution_fwd_pd_t *hint_fwd_pd) : cpu_convolution_bwd_weights_pd_t(engine, adesc, attr, hint_fwd_pd) , jcp_(), rtus_() {} DECLARE_COMMON_PD_T( JIT_IMPL_NAME_HELPER("jit_1x1:", avx2, ""), jit_avx2_1x1_convolution_bwd_weights_t); status_t init() { bool ok = true && desc()->prop_kind == prop_kind::backward_weights && set_default_alg_kind(alg_kind::convolution_direct) && expect_data_types(data_type::f32, data_type::f32, data_type::f32, data_type::f32, data_type::f32) && !has_zero_dim_memory() && set_default_formats(); if (!ok) return status::unimplemented; const convolution_desc_t *conv_d = desc(); const memory_desc_t *src_d = src_md(); rtus_prepare(this, conv_d, src_d, diff_dst_md()); status_t status = jit_avx2_1x1_conv_kernel_f32::init_conf(jcp_, *conv_d, *src_d, *diff_weights_md(), *diff_dst_md(), *attr()); if (status != status::success) return status; init_balancers(); auto scratchpad = scratchpad_registry().registrar(); jit_avx2_1x1_conv_kernel_f32::init_scratchpad(scratchpad, jcp_); rtus_prepare_space_info(this, scratchpad); auto reducer_bia_scratchpad = memory_tracking::registrar_t( scratchpad, memory_tracking::names::prefix_reducer_bia); reducer_bia_conf_.init_scratchpad(reducer_bia_scratchpad); auto reducer_wei_scratchpad = memory_tracking::registrar_t( scratchpad, memory_tracking::names::prefix_reducer_wei); reducer_wei_conf_.init_scratchpad(reducer_wei_scratchpad); return status::success; } jit_1x1_conv_conf_t jcp_; cpu_reducer_t::conf_t reducer_bia_conf_; cpu_reducer_2d_t::conf_t reducer_wei_conf_; reduce_to_unit_stride_t rtus_; protected: bool set_default_formats() { using namespace format_tag; auto dat_tag = utils::pick(ndims() - 3, nCw8c, nChw8c, nCdhw8c); auto wei_tag = with_groups() ? utils::pick(ndims() - 3, gOIw8i8o, gOIhw8i8o) : utils::pick(ndims() - 3, OIw8i8o, OIhw8i8o); return set_default_formats_common(dat_tag, wei_tag, dat_tag); } private: void init_balancers() { const int ic_block = jcp_.bcast_block; const int nb_ic = jcp_.nb_bcast; const int nb_ic_blocking = jcp_.nb_bcast_blocking; const int bcast_work = utils::div_up(nb_ic, nb_ic_blocking); const int oc_block = jcp_.load_block; const int nb_oc = jcp_.nb_load; const int nb_oc_blocking = jcp_.nb_load_blocking; const int load_work = utils::div_up(nb_oc, nb_oc_blocking); const int job_size = nb_oc_blocking * nb_ic_blocking * ic_block * oc_block; const int njobs_x = bcast_work; const int njobs_y = jcp_.ngroups * load_work; const int max_threads = mkldnn_get_max_threads(); const size_t max_buffer_size = max_threads * job_size * 8; if (with_bias()) { reducer_bia_conf_.init(reduce_balancer_t(max_threads, oc_block, jcp_.ngroups * jcp_.oc / oc_block, jcp_.mb, max_buffer_size)); } reducer_wei_conf_.init( reduce_balancer_t(max_threads, job_size, njobs_y * njobs_x, jcp_.mb * jcp_.nb_reduce, max_buffer_size), job_size / nb_oc_blocking, nb_oc_blocking, ic_block, nb_ic * ic_block * oc_block, nb_oc); } }; template friend void init_rtus_driver(conv_t *self); jit_avx2_1x1_convolution_bwd_weights_t(const pd_t *apd); ~jit_avx2_1x1_convolution_bwd_weights_t() { delete kernel_; delete rtus_driver_; delete reducer_weights_; delete reducer_bias_; } typedef typename prec_traits::type data_t; virtual status_t execute(const exec_ctx_t &ctx) const override { execute_backward_weights(ctx); return status::success; } private: void execute_backward_weights(const exec_ctx_t &ctx) const; const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); } jit_avx2_1x1_conv_kernel_f32 *kernel_; cpu_reducer_2d_t *reducer_weights_; cpu_reducer_t *reducer_bias_; rtus_driver_t *rtus_driver_; }; } } } #endif