1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT(lm) \
(0x40 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT2(lm) \
(0x70 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT3(lm) \
(0xA0 + (((lm) - LM_0) * 0x004))
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MIXER_BORDER_OUT BIT(24)
#define CTL_FLUSH_MASK_CTL BIT(17)
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define DSC_IDX 22
#define INTF_IDX 31
#define WB_IDX 16
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
struct dpu_hw_blk_reg_map *b)
{
int i;
for (i = 0; i < m->ctl_count; i++) {
if (ctl == m->ctl[i].id) {
b->blk_addr = addr + m->ctl[i].base;
b->log_mask = DPU_DBG_MASK_CTL;
return &m->ctl[i];
}
}
return ERR_PTR(-ENOMEM);
}
static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
enum dpu_lm lm)
{
int i;
int stages = -EINVAL;
for (i = 0; i < count; i++) {
if (lm == mixer[i].id) {
stages = mixer[i].sblk->maxblendstages;
break;
}
}
return stages;
}
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
return DPU_REG_READ(c, CTL_FLUSH);
}
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
{
return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
trace_dpu_hw_ctl_update_pending_flush(flushbits,
ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
{
return ctx->pending_flush_mask;
}
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
ctx->pending_merge_3d_flush_mask);
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
switch (sspp) {
case SSPP_VIG0:
ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_CURSOR0:
ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
switch (lm) {
case LM_0:
ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
default:
break;
}
ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
switch (intf) {
case INTF_0:
ctx->pending_flush_mask |= BIT(31);
break;
case INTF_1:
ctx->pending_flush_mask |= BIT(30);
break;
case INTF_2:
ctx->pending_flush_mask |= BIT(29);
break;
case INTF_3:
ctx->pending_flush_mask |= BIT(28);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
switch (wb) {
case WB_0:
case WB_1:
case WB_2:
ctx->pending_flush_mask |= BIT(WB_IDX);
break;
default:
break;
}
}
static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
ctx->pending_flush_mask |= BIT(WB_IDX);
}
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
ctx->pending_flush_mask |= BIT(INTF_IDX);
}
static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
enum dpu_merge_3d merge_3d)
{
ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp)
{
switch (dspp) {
case DSPP_0:
ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
ctx->pending_flush_mask |= BIT(21);
break;
default:
break;
}
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
ktime_t timeout;
u32 status;
timeout = ktime_add_us(ktime_get(), timeout_us);
/*
* it takes around 30us to have mdp finish resetting its ctl path
* poll every 50us so that reset should be completed at 1st poll
*/
do {
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x1;
if (status)
usleep_range(20, 50);
} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
return status;
}
static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
return -EINVAL;
return 0;
}
static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 status;
status = DPU_REG_READ(c, CTL_SW_RESET);
status &= 0x01;
if (!status)
return 0;
pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
return -EINVAL;
}
return 0;
}
static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int i;
for (i = 0; i < ctx->mixer_count; i++) {
enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
}
static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
if (stages < 0)
return;
if (test_bit(DPU_MIXER_SOURCESPLIT,
&ctx->mixer_hw_caps->features))
pipes_per_stage = PIPES_PER_STAGE;
else
pipes_per_stage = 1;
mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
if (!stage_cfg)
goto exit;
for (i = 0; i <= stages; i++) {
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
ext = i >= 7;
for (j = 0 ; j < pipes_per_stage; j++) {
enum dpu_sspp_multirect_index rect_index =
stage_cfg->multirect_index[i][j];
switch (stage_cfg->stage[i][j]) {
case SSPP_VIG0:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
} else {
mixercfg |= mix << 0;
mixercfg_ext |= ext << 0;
}
break;
case SSPP_VIG1:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
} else {
mixercfg |= mix << 3;
mixercfg_ext |= ext << 2;
}
break;
case SSPP_VIG2:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
} else {
mixercfg |= mix << 6;
mixercfg_ext |= ext << 4;
}
break;
case SSPP_VIG3:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
} else {
mixercfg |= mix << 26;
mixercfg_ext |= ext << 6;
}
break;
case SSPP_RGB0:
mixercfg |= mix << 9;
mixercfg_ext |= ext << 8;
break;
case SSPP_RGB1:
mixercfg |= mix << 12;
mixercfg_ext |= ext << 10;
break;
case SSPP_RGB2:
mixercfg |= mix << 15;
mixercfg_ext |= ext << 12;
break;
case SSPP_RGB3:
mixercfg |= mix << 29;
mixercfg_ext |= ext << 14;
break;
case SSPP_DMA0:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
} else {
mixercfg |= mix << 18;
mixercfg_ext |= ext << 16;
}
break;
case SSPP_DMA1:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
} else {
mixercfg |= mix << 21;
mixercfg_ext |= ext << 18;
}
break;
case SSPP_DMA2:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
} else {
mix |= (i + 1) & 0xF;
mixercfg_ext2 |= mix << 0;
}
break;
case SSPP_DMA3:
if (rect_index == DPU_SSPP_RECT_1) {
mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
} else {
mix |= (i + 1) & 0xF;
mixercfg_ext2 |= mix << 4;
}
break;
case SSPP_CURSOR0:
mixercfg_ext |= ((i + 1) & 0xF) << 20;
break;
case SSPP_CURSOR1:
mixercfg_ext |= ((i + 1) & 0xF) << 26;
break;
default:
break;
}
}
}
exit:
DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}
static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
* added in SW. Power on reset value is not disable.
*/
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
if (cfg->dsc)
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
if (cfg->dsc) {
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
}
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_cfg = 0;
intf_cfg |= (cfg->intf & 0xF) << 4;
if (cfg->mode_3d) {
intf_cfg |= BIT(19);
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
if (cfg->wb)
intf_cfg |= (cfg->wb & 0x3) + 2;
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
intf_cfg &= ~(0x3 << 15);
break;
case DPU_CTL_MODE_SEL_CMD:
intf_cfg |= BIT(17);
intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
break;
default:
pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
return;
}
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
u32 merge3d_active = 0;
/*
* This API resets each portion of the CTL path namely,
* clearing the sspps staged on the lm, merge_3d block,
* interfaces , writeback etc to ensure clean teardown of the pipeline.
* This will be used for writeback to begin with to have a
* proper teardown of the writeback session but upon further
* validation, this can be extended to all interfaces.
*/
if (cfg->merge_3d) {
merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
merge3d_active);
}
dpu_hw_ctl_clear_all_blendstages(ctx);
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
}
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
int i;
u32 val = 0;
if (fetch_active) {
for (i = 0; i < SSPP_MAX; i++) {
if (test_bit(i, fetch_active) &&
fetch_tbl[i] != CTL_INVALID_BIT)
val |= BIT(fetch_tbl[i]);
}
}
DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
}
static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
unsigned long cap)
{
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
ops->get_flush_register = dpu_hw_ctl_get_flush_register;
ops->trigger_start = dpu_hw_ctl_trigger_start;
ops->is_started = dpu_hw_ctl_is_started;
ops->trigger_pending = dpu_hw_ctl_trigger_pending;
ops->reset = dpu_hw_ctl_reset_control;
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
{
struct dpu_hw_ctl *c;
const struct dpu_ctl_cfg *cfg;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
cfg = _ctl_offset(idx, m, addr, &c->hw);
if (IS_ERR_OR_NULL(cfg)) {
kfree(c);
pr_err("failed to create dpu_hw_ctl %d\n", idx);
return ERR_PTR(-EINVAL);
}
c->caps = cfg;
_setup_ctl_ops(&c->ops, c->caps->features);
c->idx = idx;
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
return c;
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
{
kfree(ctx);
}
|