// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2022 Marvell International Ltd. * * IPD Support. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include cvmx_ipd_config_t cvmx_ipd_cfg = { .first_mbuf_skip = 184, .ipd_enable = 1, .cache_mode = CVMX_IPD_OPC_MODE_STT, .packet_pool = { 0, 2048, 0 }, .wqe_pool = { 1, 128, 0 }, .port_config = { CVMX_PIP_PORT_CFG_MODE_SKIPL2, CVMX_POW_TAG_TYPE_ORDERED, CVMX_PIP_TAG_MODE_TUPLE, .tag_fields = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } } }; #define IPD_RED_AVG_DLY 1000 #define IPD_RED_PRB_DLY 1000 void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip, u64 not_first_mbuff_skip, u64 first_back, u64 second_back, u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode, u64 back_pres_enable_flag) { cvmx_ipd_1st_mbuff_skip_t first_skip; cvmx_ipd_mbuff_not_first_skip_t not_first_skip; cvmx_ipd_packet_mbuff_size_t size; cvmx_ipd_1st_next_ptr_back_t first_back_struct; cvmx_ipd_second_next_ptr_back_t second_back_struct; cvmx_ipd_wqe_fpa_queue_t wqe_pool; cvmx_ipd_ctl_status_t ipd_ctl_reg; /* Enforce 1st skip minimum if WQE shares the buffer with packet */ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) { union cvmx_ipd_ctl_status ctl_status; ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS); if (ctl_status.s.no_wptr != 0 && first_mbuff_skip < 16) first_mbuff_skip = 16; } first_skip.u64 = 0; first_skip.s.skip_sz = first_mbuff_skip; csr_wr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64); not_first_skip.u64 = 0; not_first_skip.s.skip_sz = not_first_mbuff_skip; csr_wr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64); size.u64 = 0; size.s.mb_size = mbuff_size; csr_wr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64); first_back_struct.u64 = 0; first_back_struct.s.back = first_back; csr_wr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64); second_back_struct.u64 = 0; second_back_struct.s.back = second_back; csr_wr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64); wqe_pool.u64 = 0; wqe_pool.s.wqe_pool = wqe_fpa_pool; csr_wr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64); ipd_ctl_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); ipd_ctl_reg.s.opc_mode = cache_mode; ipd_ctl_reg.s.pbp_en = back_pres_enable_flag; csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64); /* Note: the example RED code is below */ } /** * Enable IPD */ void cvmx_ipd_enable(void) { cvmx_ipd_ctl_status_t ipd_reg; ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); /* * busy-waiting for rst_done in o68 */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) while (ipd_reg.s.rst_done != 0) ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS); if (ipd_reg.s.ipd_en) debug("Warning: Enabling IPD when IPD already enabled.\n"); ipd_reg.s.ipd_en = 1; if (cvmx_ipd_cfg.enable_len_M8_fix) ipd_reg.s.len_m8 = 1; csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); }