1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2023 Intel Corporation */
#ifndef _IDPF_CONTROLQ_API_H_
#define _IDPF_CONTROLQ_API_H_
#include "idpf_mem.h"
struct idpf_hw;
/* Used for queue init, response and events */
enum idpf_ctlq_type {
IDPF_CTLQ_TYPE_MAILBOX_TX = 0,
IDPF_CTLQ_TYPE_MAILBOX_RX = 1,
IDPF_CTLQ_TYPE_CONFIG_TX = 2,
IDPF_CTLQ_TYPE_CONFIG_RX = 3,
IDPF_CTLQ_TYPE_EVENT_RX = 4,
IDPF_CTLQ_TYPE_RDMA_TX = 5,
IDPF_CTLQ_TYPE_RDMA_RX = 6,
IDPF_CTLQ_TYPE_RDMA_COMPL = 7
};
/* Generic Control Queue Structures */
struct idpf_ctlq_reg {
/* used for queue tracking */
u32 head;
u32 tail;
/* Below applies only to default mb (if present) */
u32 len;
u32 bah;
u32 bal;
u32 len_mask;
u32 len_ena_mask;
u32 head_mask;
};
/* Generic queue msg structure */
struct idpf_ctlq_msg {
u8 vmvf_type; /* represents the source of the message on recv */
#define IDPF_VMVF_TYPE_VF 0
#define IDPF_VMVF_TYPE_VM 1
#define IDPF_VMVF_TYPE_PF 2
u8 host_id;
/* 3b field used only when sending a message to CP - to be used in
* combination with target func_id to route the message
*/
#define IDPF_HOST_ID_MASK 0x7
u16 opcode;
u16 data_len; /* data_len = 0 when no payload is attached */
union {
u16 func_id; /* when sending a message */
u16 status; /* when receiving a message */
};
union {
struct {
u32 chnl_opcode;
u32 chnl_retval;
} mbx;
} cookie;
union {
#define IDPF_DIRECT_CTX_SIZE 16
#define IDPF_INDIRECT_CTX_SIZE 8
/* 16 bytes of context can be provided or 8 bytes of context
* plus the address of a DMA buffer
*/
u8 direct[IDPF_DIRECT_CTX_SIZE];
struct {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
struct {
u32 rsvd;
u16 data;
u16 flags;
} sw_cookie;
} ctx;
};
/* Generic queue info structures */
/* MB, CONFIG and EVENT q do not have extended info */
struct idpf_ctlq_create_info {
enum idpf_ctlq_type type;
int id; /* absolute queue offset passed as input
* -1 for default mailbox if present
*/
u16 len; /* Queue length passed as input */
u16 buf_size; /* buffer size passed as input */
u64 base_address; /* output, HPA of the Queue start */
struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
int ext_info_size;
void *ext_info; /* Specific to q type */
};
/* Control Queue information */
struct idpf_ctlq_info {
struct list_head cq_list;
enum idpf_ctlq_type cq_type;
int q_id;
spinlock_t cq_lock; /* control queue lock */
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
u16 next_to_post; /* starting descriptor to post buffers
* to after recev
*/
struct idpf_dma_mem desc_ring; /* descriptor ring memory
* idpf_dma_mem is defined in OSdep.h
*/
union {
struct idpf_dma_mem **rx_buff;
struct idpf_ctlq_msg **tx_msg;
} bi;
u16 buf_size; /* queue buffer size */
u16 ring_size; /* Number of descriptors */
struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
};
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
* @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
* any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
/* Will init all required q including default mb. "q_info" is an array of
* create_info structs equal to the number of control queues to be created.
*/
int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
struct idpf_ctlq_create_info *q_info);
/* Allocate and initialize a single control queue, which will be added to the
* control queue list; returns a handle to the created control queue
*/
int idpf_ctlq_add(struct idpf_hw *hw,
struct idpf_ctlq_create_info *qinfo,
struct idpf_ctlq_info **cq);
/* Deinitialize and deallocate a single control queue */
void idpf_ctlq_remove(struct idpf_hw *hw,
struct idpf_ctlq_info *cq);
/* Sends messages to HW and will also free the buffer*/
int idpf_ctlq_send(struct idpf_hw *hw,
struct idpf_ctlq_info *cq,
u16 num_q_msg,
struct idpf_ctlq_msg q_msg[]);
/* Receives messages and called by interrupt handler/polling
* initiated by app/process. Also caller is supposed to free the buffers
*/
int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg);
/* Reclaims send descriptors on HW write back */
int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
struct idpf_ctlq_msg *msg_status[]);
/* Indicate RX buffers are done being processed */
int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
struct idpf_ctlq_info *cq,
u16 *buff_count,
struct idpf_dma_mem **buffs);
/* Will destroy all q including the default mb */
void idpf_ctlq_deinit(struct idpf_hw *hw);
#endif /* _IDPF_CONTROLQ_API_H_ */
|