1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
|
// SPDX-License-Identifier: GPL-2.0+
#include "lan966x_main.h"
#define LAN966X_TAPRIO_TIMEOUT_MS 1000
#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
/* Minimum supported cycle time in nanoseconds */
#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
/* Maximum supported cycle time in nanoseconds */
#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
/* Total number of TAS GCL entries */
#define LAN966X_TAPRIO_NUM_GCL 256
/* TAPRIO link speeds for calculation of guard band */
enum lan966x_taprio_link_speed {
LAN966X_TAPRIO_SPEED_NO_GB,
LAN966X_TAPRIO_SPEED_10,
LAN966X_TAPRIO_SPEED_100,
LAN966X_TAPRIO_SPEED_1000,
LAN966X_TAPRIO_SPEED_2500,
};
/* TAPRIO list states */
enum lan966x_taprio_state {
LAN966X_TAPRIO_STATE_ADMIN,
LAN966X_TAPRIO_STATE_ADVANCING,
LAN966X_TAPRIO_STATE_PENDING,
LAN966X_TAPRIO_STATE_OPERATING,
LAN966X_TAPRIO_STATE_TERMINATING,
LAN966X_TAPRIO_STATE_MAX,
};
/* TAPRIO GCL command */
enum lan966x_taprio_gcl_cmd {
LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
};
static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
{
return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
}
static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
u32 val;
val = lan_rd(lan966x, QSYS_TAS_LST);
return QSYS_TAS_LST_LIST_STATE_GET(val);
}
static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
u32 list)
{
struct lan966x *lan966x = port->lan966x;
lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
QSYS_TAS_CFG_CTRL_LIST_NUM,
lan966x, QSYS_TAS_CFG_CTRL);
return lan966x_taprio_list_state_get(port);
}
static void lan966x_taprio_list_state_set(struct lan966x_port *port,
u32 state)
{
struct lan966x *lan966x = port->lan966x;
lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
QSYS_TAS_LST_LIST_STATE,
lan966x, QSYS_TAS_LST);
}
static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
u32 list)
{
struct lan966x *lan966x = port->lan966x;
bool pending, operating;
unsigned long end;
u32 state;
end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
/* It is required to try multiple times to set the state of list,
* because the HW can overwrite this.
*/
do {
state = lan966x_taprio_list_state_get(port);
pending = false;
operating = false;
if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
state == LAN966X_TAPRIO_STATE_PENDING) {
lan966x_taprio_list_state_set(port,
LAN966X_TAPRIO_STATE_ADMIN);
pending = true;
}
if (state == LAN966X_TAPRIO_STATE_OPERATING) {
lan966x_taprio_list_state_set(port,
LAN966X_TAPRIO_STATE_TERMINATING);
operating = true;
}
/* If the entry was in pending and now gets in admin, then there
* is nothing else to do, so just bail out
*/
state = lan966x_taprio_list_state_get(port);
if (pending &&
state == LAN966X_TAPRIO_STATE_ADMIN)
return 0;
/* If the list was in operating and now is in terminating or
* admin, then is OK to exit but it needs to wait until the list
* will get in admin. It is not required to set the state
* again.
*/
if (operating &&
(state == LAN966X_TAPRIO_STATE_TERMINATING ||
state == LAN966X_TAPRIO_STATE_ADMIN))
break;
} while (!time_after(jiffies, end));
end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
do {
state = lan966x_taprio_list_state_get(port);
if (state == LAN966X_TAPRIO_STATE_ADMIN)
break;
} while (!time_after(jiffies, end));
/* If the list was in operating mode, it could be stopped while some
* queues where closed, so make sure to restore "all-queues-open"
*/
if (operating) {
lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
lan966x, QSYS_TAS_GS_CTRL);
lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
lan966x, QSYS_TAS_GATE_STATE);
}
return 0;
}
static int lan966x_taprio_shutdown(struct lan966x_port *port)
{
u32 i, list, state;
int err;
for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
list = lan966x_taprio_list_index(port, i);
state = lan966x_taprio_list_index_state_get(port, list);
if (state == LAN966X_TAPRIO_STATE_ADMIN)
continue;
err = lan966x_taprio_list_shutdown(port, list);
if (err)
return err;
}
return 0;
}
/* Find a suitable list for a new schedule. First priority is a list in state
* pending. Second priority is a list in state admin.
*/
static int lan966x_taprio_find_list(struct lan966x_port *port,
struct tc_taprio_qopt_offload *qopt,
int *new_list, int *obs_list)
{
int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
int err, oper = -1;
u32 i;
*new_list = -1;
*obs_list = -1;
/* If there is already an entry in operating mode, return this list in
* obs_list, such that when the new list will get activated the
* operating list will be stopped. In this way is possible to have
* smooth transitions between the lists
*/
for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
list[i] = lan966x_taprio_list_index(port, i);
state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
oper = list[i];
}
for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
err = lan966x_taprio_shutdown(port);
if (err)
return err;
*new_list = list[i];
*obs_list = (oper == -1) ? *new_list : oper;
return 0;
}
}
for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
*new_list = list[i];
*obs_list = (oper == -1) ? *new_list : oper;
return 0;
}
}
return -ENOSPC;
}
static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
{
u64 total_time = 0;
u32 i;
/* This is not supported by th HW */
if (qopt->cycle_time_extension)
return -EOPNOTSUPP;
/* There is a limited number of gcl entries that can be used, they are
* shared by all ports
*/
if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
return -EINVAL;
/* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
return -EINVAL;
for (i = 0; i < qopt->num_entries; ++i) {
struct tc_taprio_sched_entry *entry = &qopt->entries[i];
/* Don't allow intervals bigger than 1 sec or smaller than 1
* usec
*/
if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
return -EINVAL;
if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
return -EINVAL;
total_time += qopt->entries[i].interval;
}
/* Don't allow the total time of intervals be bigger than 1 sec */
if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
return -EINVAL;
/* The HW expects that the cycle time to be at least as big as sum of
* each interval of gcl
*/
if (qopt->cycle_time < total_time)
return -EINVAL;
return 0;
}
static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
unsigned long *free_list)
{
struct lan966x *lan966x = port->lan966x;
u32 num_free, state, list;
u32 base, next, max_list;
/* By default everything is free */
bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
num_free = LAN966X_TAPRIO_NUM_GCL;
/* Iterate over all gcl entries and find out which are free. And mark
* those that are not free.
*/
max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
for (list = 0; list < max_list; ++list) {
state = lan966x_taprio_list_index_state_get(port, list);
if (state == LAN966X_TAPRIO_STATE_ADMIN)
continue;
base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
next = base;
do {
clear_bit(next, free_list);
num_free--;
lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
lan966x, QSYS_TAS_CFG_CTRL);
next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
} while (base != next);
}
return num_free;
}
static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
struct tc_taprio_sched_entry *entry,
u32 next_entry)
{
struct lan966x *lan966x = port->lan966x;
/* Setup a single gcl entry */
lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
lan966x, QSYS_TAS_GCL_CT_CFG);
lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
lan966x, QSYS_TAS_GCL_CT_CFG2);
lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
}
static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
struct tc_taprio_qopt_offload *qopt,
int list)
{
DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
struct lan966x *lan966x = port->lan966x;
u32 i, base, next;
if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
return -ENOSPC;
/* Select list */
lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
QSYS_TAS_CFG_CTRL_LIST_NUM,
lan966x, QSYS_TAS_CFG_CTRL);
/* Setup the address of the first gcl entry */
base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
lan966x, QSYS_TAS_LIST_CFG);
/* Iterate over entries and add them to the gcl list */
next = base;
for (i = 0; i < qopt->num_entries; ++i) {
lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
lan966x, QSYS_TAS_CFG_CTRL);
/* If the entry is last, point back to the start of the list */
if (i == qopt->num_entries - 1)
next = base;
else
next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
next + 1);
lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
}
return 0;
}
/* Calculate new base_time based on cycle_time. The HW recommends to have the
* new base time at least 2 * cycle type + current time
*/
static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
const u32 cycle_time,
const ktime_t org_base_time,
ktime_t *new_base_time)
{
ktime_t current_time, threshold_time;
struct timespec64 ts;
/* Get the current time and calculate the threshold_time */
lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
current_time = timespec64_to_ktime(ts);
threshold_time = current_time + (2 * cycle_time);
/* If the org_base_time is in enough in future just use it */
if (org_base_time >= threshold_time) {
*new_base_time = org_base_time;
return;
}
/* If the org_base_time is smaller than current_time, calculate the new
* base time as following.
*/
if (org_base_time <= current_time) {
u64 tmp = current_time - org_base_time;
u32 rem = 0;
if (tmp > cycle_time)
div_u64_rem(tmp, cycle_time, &rem);
rem = cycle_time - rem;
*new_base_time = threshold_time + rem;
return;
}
/* The only left place for org_base_time is between current_time and
* threshold_time. In this case the new_base_time is calculated like
* org_base_time + 2 * cycletime
*/
*new_base_time = org_base_time + 2 * cycle_time;
}
int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
{
struct lan966x *lan966x = port->lan966x;
u8 taprio_speed;
switch (speed) {
case SPEED_10:
taprio_speed = LAN966X_TAPRIO_SPEED_10;
break;
case SPEED_100:
taprio_speed = LAN966X_TAPRIO_SPEED_100;
break;
case SPEED_1000:
taprio_speed = LAN966X_TAPRIO_SPEED_1000;
break;
case SPEED_2500:
taprio_speed = LAN966X_TAPRIO_SPEED_2500;
break;
default:
return -EINVAL;
}
lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
QSYS_TAS_PROFILE_CFG_LINK_SPEED,
lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
return 0;
}
int lan966x_taprio_add(struct lan966x_port *port,
struct tc_taprio_qopt_offload *qopt)
{
struct lan966x *lan966x = port->lan966x;
int err, new_list, obs_list;
struct timespec64 ts;
ktime_t base_time;
err = lan966x_taprio_check(qopt);
if (err)
return err;
err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
if (err)
return err;
err = lan966x_taprio_gcl_setup(port, qopt, new_list);
if (err)
return err;
lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
qopt->base_time, &base_time);
ts = ktime_to_timespec64(base_time);
lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
lan966x, QSYS_TAS_BT_NSEC);
lan_wr(lower_32_bits(ts.tv_sec),
lan966x, QSYS_TAS_BT_SEC_LSB);
lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
lan966x, QSYS_TAS_BT_SEC_MSB);
lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
lan966x, QSYS_TAS_STARTUP_CFG);
/* Start list processing */
lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
QSYS_TAS_LST_LIST_STATE,
lan966x, QSYS_TAS_LST);
return err;
}
int lan966x_taprio_del(struct lan966x_port *port)
{
return lan966x_taprio_shutdown(port);
}
void lan966x_taprio_init(struct lan966x *lan966x)
{
int num_taprio_lists;
int p;
lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
lan966x_ptp_get_period_ps()),
lan966x, QSYS_TAS_STM_CFG);
num_taprio_lists = lan966x->num_phys_ports *
LAN966X_TAPRIO_ENTRIES_PER_PORT;
/* For now we always use guard band on all queues */
lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
lan966x, QSYS_TAS_CFG_CTRL);
for (p = 0; p < lan966x->num_phys_ports; p++)
lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
QSYS_TAS_PROFILE_CFG_PORT_NUM,
lan966x, QSYS_TAS_PROFILE_CFG(p));
}
void lan966x_taprio_deinit(struct lan966x *lan966x)
{
int p;
for (p = 0; p < lan966x->num_phys_ports; ++p) {
if (!lan966x->ports[p])
continue;
lan966x_taprio_del(lan966x->ports[p]);
}
}
|