1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/iopoll.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
#include "iris_core.h"
#include "iris_vpu_common.h"
#include "iris_vpu_register_defines.h"
#define WRAPPER_TZ_BASE_OFFS 0x000C0000
#define AON_BASE_OFFS 0x000E0000
#define CPU_IC_BASE_OFFS (CPU_BASE_OFFS)
#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C)
#define CLEAR_XTENSA2HOST_INTR BIT(0)
#define CTRL_INIT (CPU_CS_BASE_OFFS + 0x48)
#define CTRL_STATUS (CPU_CS_BASE_OFFS + 0x4C)
#define CTRL_INIT_IDLE_MSG_BMSK 0x40000000
#define CTRL_ERROR_STATUS__M 0xfe
#define CTRL_STATUS_PC_READY 0x100
#define QTBL_INFO (CPU_CS_BASE_OFFS + 0x50)
#define QTBL_ENABLE BIT(0)
#define QTBL_ADDR (CPU_CS_BASE_OFFS + 0x54)
#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58)
#define SFR_ADDR (CPU_CS_BASE_OFFS + 0x5C)
#define UC_REGION_ADDR (CPU_CS_BASE_OFFS + 0x64)
#define UC_REGION_SIZE (CPU_CS_BASE_OFFS + 0x68)
#define CPU_CS_H2XSOFTINTEN (CPU_CS_BASE_OFFS + 0x148)
#define HOST2XTENSA_INTR_ENABLE BIT(0)
#define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
#define MSK_SIGNAL_FROM_TENSILICA BIT(0)
#define MSK_CORE_POWER_ON BIT(1)
#define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x150)
#define CPU_IC_SOFTINT_H2A_SHFT 0x0
#define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C)
#define WRAPPER_INTR_STATUS_A2HWD_BMSK BIT(3)
#define WRAPPER_INTR_STATUS_A2H_BMSK BIT(2)
#define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10)
#define WRAPPER_INTR_MASK_A2HWD_BMSK BIT(3)
#define WRAPPER_INTR_MASK_A2HCPU_BMSK BIT(2)
#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
#define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10)
#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
#define CTL_AXI_CLK_HALT BIT(0)
#define CTL_CLK_HALT BIT(1)
#define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
#define RESET_HIGH BIT(0)
#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
#define REQ_POWER_DOWN_PREP BIT(0)
#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
static void iris_vpu_interrupt_init(struct iris_core *core)
{
u32 mask_val;
mask_val = readl(core->reg_base + WRAPPER_INTR_MASK);
mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK |
WRAPPER_INTR_MASK_A2HCPU_BMSK);
writel(mask_val, core->reg_base + WRAPPER_INTR_MASK);
}
static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
{
u32 queue_size, value;
const struct vpu_ops *vpu_ops = core->iris_platform_data->vpu_ops;
/* Iris hardware requires 4K queue alignment */
queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
(IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
value = (u32)core->iface_q_table_daddr;
writel(value, core->reg_base + UC_REGION_ADDR);
/* Iris hardware requires 1M queue alignment */
value = ALIGN(SFR_SIZE + queue_size, SZ_1M);
writel(value, core->reg_base + UC_REGION_SIZE);
value = (u32)core->iface_q_table_daddr;
writel(value, core->reg_base + QTBL_ADDR);
writel(QTBL_ENABLE, core->reg_base + QTBL_INFO);
if (core->sfr_daddr) {
value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
writel(value, core->reg_base + SFR_ADDR);
}
if (vpu_ops->program_bootup_registers)
vpu_ops->program_bootup_registers(core);
}
int iris_vpu_boot_firmware(struct iris_core *core)
{
u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000;
iris_vpu_setup_ucregion_memory_map(core);
writel(ctrl_init, core->reg_base + CTRL_INIT);
writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3);
while (!ctrl_status && count < max_tries) {
ctrl_status = readl(core->reg_base + CTRL_STATUS);
if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) {
dev_err(core->dev, "invalid setting for uc_region\n");
break;
}
usleep_range(50, 100);
count++;
}
if (count >= max_tries) {
dev_err(core->dev, "error booting up iris firmware\n");
return -ETIME;
}
writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN);
writel(0x0, core->reg_base + CPU_CS_X2RPMH);
return 0;
}
void iris_vpu_raise_interrupt(struct iris_core *core)
{
writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT);
}
void iris_vpu_clear_interrupt(struct iris_core *core)
{
u32 intr_status, mask;
intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS);
mask = (WRAPPER_INTR_STATUS_A2H_BMSK |
WRAPPER_INTR_STATUS_A2HWD_BMSK |
CTRL_INIT_IDLE_MSG_BMSK);
if (intr_status & mask)
core->intr_status |= intr_status;
writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR);
}
int iris_vpu_watchdog(struct iris_core *core, u32 intr_status)
{
if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) {
dev_err(core->dev, "received watchdog interrupt\n");
return -ETIME;
}
return 0;
}
int iris_vpu_prepare_pc(struct iris_core *core)
{
u32 wfi_status, idle_status, pc_ready;
u32 ctrl_status, val = 0;
int ret;
ctrl_status = readl(core->reg_base + CTRL_STATUS);
pc_ready = ctrl_status & CTRL_STATUS_PC_READY;
idle_status = ctrl_status & BIT(30);
if (pc_ready)
return 0;
wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
wfi_status &= BIT(0);
if (!wfi_status || !idle_status)
goto skip_power_off;
ret = core->hfi_ops->sys_pc_prep(core);
if (ret)
goto skip_power_off;
ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val,
val & CTRL_STATUS_PC_READY, 250, 2500);
if (ret)
goto skip_power_off;
ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS,
val, val & BIT(0), 250, 2500);
if (ret)
goto skip_power_off;
return 0;
skip_power_off:
ctrl_status = readl(core->reg_base + CTRL_STATUS);
wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
wfi_status &= BIT(0);
dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
wfi_status, idle_status, pc_ready, ctrl_status);
return -EAGAIN;
}
int iris_vpu_power_off_controller(struct iris_core *core)
{
u32 val = 0;
int ret;
writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
val, val & BIT(0), 200, 2000);
if (ret)
goto disable_power;
writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
val, val & BIT(0), 200, 2000);
if (ret)
goto disable_power;
writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
val, val == 0, 200, 2000);
if (ret)
goto disable_power;
writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
disable_power:
iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
return 0;
}
void iris_vpu_power_off_hw(struct iris_core *core)
{
dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false);
iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
iris_disable_unprepare_clock(core, IRIS_HW_CLK);
}
void iris_vpu_power_off(struct iris_core *core)
{
dev_pm_opp_set_rate(core->dev, 0);
core->iris_platform_data->vpu_ops->power_off_hw(core);
core->iris_platform_data->vpu_ops->power_off_controller(core);
iris_unset_icc_bw(core);
if (!iris_vpu_watchdog(core, core->intr_status))
disable_irq_nosync(core->irq);
}
int iris_vpu_power_on_controller(struct iris_core *core)
{
u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
int ret;
ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
if (ret)
return ret;
ret = reset_control_bulk_reset(rst_tbl_size, core->resets);
if (ret)
goto err_disable_power;
ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
if (ret)
goto err_disable_power;
ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
if (ret)
goto err_disable_clock;
return 0;
err_disable_clock:
iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
err_disable_power:
iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
return ret;
}
int iris_vpu_power_on_hw(struct iris_core *core)
{
int ret;
ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
if (ret)
return ret;
ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
if (ret)
goto err_disable_power;
ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
if (ret)
goto err_disable_clock;
return 0;
err_disable_clock:
iris_disable_unprepare_clock(core, IRIS_HW_CLK);
err_disable_power:
iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
return ret;
}
int iris_vpu_power_on(struct iris_core *core)
{
u32 freq;
int ret;
ret = iris_set_icc_bw(core, INT_MAX);
if (ret)
goto err;
ret = core->iris_platform_data->vpu_ops->power_on_controller(core);
if (ret)
goto err_unvote_icc;
ret = core->iris_platform_data->vpu_ops->power_on_hw(core);
if (ret)
goto err_power_off_ctrl;
freq = core->power.clk_freq ? core->power.clk_freq :
(u32)ULONG_MAX;
dev_pm_opp_set_rate(core->dev, freq);
core->iris_platform_data->set_preset_registers(core);
iris_vpu_interrupt_init(core);
core->intr_status = 0;
enable_irq(core->irq);
return 0;
err_power_off_ctrl:
core->iris_platform_data->vpu_ops->power_off_controller(core);
err_unvote_icc:
iris_unset_icc_bw(core);
err:
dev_err(core->dev, "power on failed\n");
return ret;
}
|