"requirements/requirements.txt" did not exist on "97787881ee2af488f3f92c6889430a96a466a86c"
Commit d4666c97 authored by Antoine Kaufmann's avatar Antoine Kaufmann
Browse files

Reformat closer to google style

parent eb125a88
......@@ -24,9 +24,10 @@
#pragma once
#include <stdint.h>
#include <list>
#include <vector>
#include <stdint.h>
extern "C" {
#include <simbricks/proto/pcie.h>
}
......@@ -36,92 +37,92 @@ typedef uint32_t reg_t;
typedef uint64_t addr_t;
typedef uint16_t ptr_t;
#define REG_FW_ID 0x0000
#define REG_FW_VER 0x0004
#define REG_BOARD_ID 0x0008
#define REG_BOARD_VER 0x000C
#define REG_PHC_COUNT 0x0010
#define REG_PHC_OFFSET 0x0014
#define REG_PHC_STRIDE 0x0018
#define REG_IF_COUNT 0x0020
#define REG_IF_STRIDE 0x0024
#define REG_IF_CSR_OFFSET 0x002C
#define IF_FEATURE_RSS (1 << 0)
#define IF_FEATURE_PTP_TS (1 << 4)
#define IF_FEATURE_TX_CSUM (1 << 8)
#define IF_FEATURE_RX_CSUM (1 << 9)
#define IF_FEATURE_RX_HASH (1 << 10)
#define PHC_REG_FEATURES 0x0200
#define PHC_REG_PTP_CUR_SEC_L 0x0218
#define PHC_REG_PTP_CUR_SEC_H 0x021C
#define PHC_REG_PTP_SET_FNS 0x0230
#define PHC_REG_PTP_SET_NS 0x0234
#define PHC_REG_PTP_SET_SEC_L 0x0238
#define PHC_REG_PTP_SET_SEC_H 0x023C
#define IF_REG_IF_ID 0x80000
#define IF_REG_IF_FEATURES 0x80004
#define IF_REG_EVENT_QUEUE_COUNT 0x80010
#define IF_REG_EVENT_QUEUE_OFFSET 0x80014
#define IF_REG_TX_QUEUE_COUNT 0x80020
#define IF_REG_TX_QUEUE_OFFSET 0x80024
#define IF_REG_TX_CPL_QUEUE_COUNT 0x80028
#define IF_REG_TX_CPL_QUEUE_OFFSET 0x8002C
#define IF_REG_RX_QUEUE_COUNT 0x80030
#define IF_REG_RX_QUEUE_OFFSET 0x80034
#define IF_REG_RX_CPL_QUEUE_COUNT 0x80038
#define IF_REG_RX_CPL_QUEUE_OFFSET 0x8003C
#define IF_REG_PORT_COUNT 0x80040
#define IF_REG_PORT_OFFSET 0x80044
#define IF_REG_PORT_STRIDE 0x80048
#define REG_FW_ID 0x0000
#define REG_FW_VER 0x0004
#define REG_BOARD_ID 0x0008
#define REG_BOARD_VER 0x000C
#define REG_PHC_COUNT 0x0010
#define REG_PHC_OFFSET 0x0014
#define REG_PHC_STRIDE 0x0018
#define REG_IF_COUNT 0x0020
#define REG_IF_STRIDE 0x0024
#define REG_IF_CSR_OFFSET 0x002C
#define IF_FEATURE_RSS (1 << 0)
#define IF_FEATURE_PTP_TS (1 << 4)
#define IF_FEATURE_TX_CSUM (1 << 8)
#define IF_FEATURE_RX_CSUM (1 << 9)
#define IF_FEATURE_RX_HASH (1 << 10)
#define PHC_REG_FEATURES 0x0200
#define PHC_REG_PTP_CUR_SEC_L 0x0218
#define PHC_REG_PTP_CUR_SEC_H 0x021C
#define PHC_REG_PTP_SET_FNS 0x0230
#define PHC_REG_PTP_SET_NS 0x0234
#define PHC_REG_PTP_SET_SEC_L 0x0238
#define PHC_REG_PTP_SET_SEC_H 0x023C
#define IF_REG_IF_ID 0x80000
#define IF_REG_IF_FEATURES 0x80004
#define IF_REG_EVENT_QUEUE_COUNT 0x80010
#define IF_REG_EVENT_QUEUE_OFFSET 0x80014
#define IF_REG_TX_QUEUE_COUNT 0x80020
#define IF_REG_TX_QUEUE_OFFSET 0x80024
#define IF_REG_TX_CPL_QUEUE_COUNT 0x80028
#define IF_REG_TX_CPL_QUEUE_OFFSET 0x8002C
#define IF_REG_RX_QUEUE_COUNT 0x80030
#define IF_REG_RX_QUEUE_OFFSET 0x80034
#define IF_REG_RX_CPL_QUEUE_COUNT 0x80038
#define IF_REG_RX_CPL_QUEUE_OFFSET 0x8003C
#define IF_REG_PORT_COUNT 0x80040
#define IF_REG_PORT_OFFSET 0x80044
#define IF_REG_PORT_STRIDE 0x80048
#define QUEUE_ACTIVE_MASK 0x80000000
#define QUEUE_ARM_MASK 0x80000000
#define QUEUE_CONT_MASK 0x40000000
#define EVENT_QUEUE_BASE_ADDR_REG 0x100000
#define EVENT_QUEUE_BASE_ADDR_REG 0x100000
#define EVENT_QUEUE_ACTIVE_LOG_SIZE_REG 0x100008
#define EVENT_QUEUE_INTERRUPT_INDEX_REG 0x10000C
#define EVENT_QUEUE_HEAD_PTR_REG 0x100010
#define EVENT_QUEUE_TAIL_PTR_REG 0x100018
#define EVENT_QUEUE_HEAD_PTR_REG 0x100010
#define EVENT_QUEUE_TAIL_PTR_REG 0x100018
#define TX_QUEUE_BASE_ADDR_REG 0x200000
#define TX_QUEUE_BASE_ADDR_REG 0x200000
#define TX_QUEUE_ACTIVE_LOG_SIZE_REG 0x200008
#define TX_QUEUE_CPL_QUEUE_INDEX_REG 0x20000C
#define TX_QUEUE_HEAD_PTR_REG 0x200010
#define TX_QUEUE_TAIL_PTR_REG 0x200018
#define TX_QUEUE_HEAD_PTR_REG 0x200010
#define TX_QUEUE_TAIL_PTR_REG 0x200018
#define TX_CPL_QUEUE_BASE_ADDR_REG 0x400000
#define TX_CPL_QUEUE_BASE_ADDR_REG 0x400000
#define TX_CPL_QUEUE_ACTIVE_LOG_SIZE_REG 0x400008
#define TX_CPL_QUEUE_INTERRUPT_INDEX_REG 0x40000C
#define TX_CPL_QUEUE_HEAD_PTR_REG 0x400010
#define TX_CPL_QUEUE_TAIL_PTR_REG 0x400018
#define TX_CPL_QUEUE_HEAD_PTR_REG 0x400010
#define TX_CPL_QUEUE_TAIL_PTR_REG 0x400018
#define RX_QUEUE_BASE_ADDR_REG 0x600000
#define RX_QUEUE_BASE_ADDR_REG 0x600000
#define RX_QUEUE_ACTIVE_LOG_SIZE_REG 0x600008
#define RX_QUEUE_CPL_QUEUE_INDEX_REG 0x60000C
#define RX_QUEUE_HEAD_PTR_REG 0x600010
#define RX_QUEUE_TAIL_PTR_REG 0x600018
#define RX_QUEUE_HEAD_PTR_REG 0x600010
#define RX_QUEUE_TAIL_PTR_REG 0x600018
#define RX_CPL_QUEUE_BASE_ADDR_REG 0x700000
#define RX_CPL_QUEUE_BASE_ADDR_REG 0x700000
#define RX_CPL_QUEUE_ACTIVE_LOG_SIZE_REG 0x700008
#define RX_CPL_QUEUE_INTERRUPT_INDEX_REG 0x70000C
#define RX_CPL_QUEUE_HEAD_PTR_REG 0x700010
#define RX_CPL_QUEUE_TAIL_PTR_REG 0x700018
#define RX_CPL_QUEUE_HEAD_PTR_REG 0x700010
#define RX_CPL_QUEUE_TAIL_PTR_REG 0x700018
#define PORT_REG_PORT_ID 0x800000
#define PORT_REG_PORT_FEATURES 0x800004
#define PORT_REG_PORT_MTU 0x800008
#define PORT_REG_SCHED_COUNT 0x800010
#define PORT_REG_SCHED_OFFSET 0x800014
#define PORT_REG_SCHED_STRIDE 0x800018
#define PORT_REG_SCHED_TYPE 0x80001C
#define PORT_REG_SCHED_ENABLE 0x800040
#define PORT_REG_RSS_MASK 0x800080
#define PORT_REG_PORT_ID 0x800000
#define PORT_REG_PORT_FEATURES 0x800004
#define PORT_REG_PORT_MTU 0x800008
#define PORT_REG_SCHED_COUNT 0x800010
#define PORT_REG_SCHED_OFFSET 0x800014
#define PORT_REG_SCHED_STRIDE 0x800018
#define PORT_REG_SCHED_TYPE 0x80001C
#define PORT_REG_SCHED_ENABLE 0x800040
#define PORT_REG_RSS_MASK 0x800080
#define PORT_QUEUE_ENABLE 0x900000
#define PORT_QUEUE_ENABLE 0x900000
namespace corundum {
......@@ -133,209 +134,209 @@ namespace corundum {
class DescRing;
struct Desc {
uint16_t rsvd0;
uint16_t tx_csum_cmd;
uint32_t len;
uint64_t addr;
uint16_t rsvd0;
uint16_t tx_csum_cmd;
uint32_t len;
uint64_t addr;
} __attribute__((packed));
struct Cpl {
uint16_t queue;
uint16_t index;
uint16_t len;
uint16_t rsvd0;
uint32_t ts_ns;
uint16_t ts_s;
uint16_t rx_csum;
uint32_t rx_hash;
uint8_t rx_hash_type;
uint8_t rsvd1;
uint8_t rsvd2;
uint8_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
uint16_t queue;
uint16_t index;
uint16_t len;
uint16_t rsvd0;
uint32_t ts_ns;
uint16_t ts_s;
uint16_t rx_csum;
uint32_t rx_hash;
uint8_t rx_hash_type;
uint8_t rsvd1;
uint8_t rsvd2;
uint8_t rsvd3;
uint32_t rsvd4;
uint32_t rsvd5;
} __attribute__((packed));
#define EVENT_TYPE_TX_CPL 0x0000
#define EVENT_TYPE_RX_CPL 0x0001
struct Event {
uint16_t type;
uint16_t source;
uint16_t type;
uint16_t source;
} __attribute__((packed));
struct RxData {
size_t len;
uint8_t data[MAX_DMA_LEN];
size_t len;
uint8_t data[MAX_DMA_LEN];
};
#define DMA_TYPE_DESC 0
#define DMA_TYPE_MEM 1
#define DMA_TYPE_DESC 0
#define DMA_TYPE_MEM 1
#define DMA_TYPE_TX_CPL 2
#define DMA_TYPE_RX_CPL 3
#define DMA_TYPE_EVENT 4
#define DMA_TYPE_EVENT 4
struct DMAOp : public nicbm::DMAOp {
DMAOp() {
data = databuf;
}
uint8_t type;
DescRing *ring;
RxData *rx_data;
uint64_t tag;
uint8_t databuf[MAX_DMA_LEN];
DMAOp() {
data = databuf;
}
uint8_t type;
DescRing *ring;
RxData *rx_data;
uint64_t tag;
uint8_t databuf[MAX_DMA_LEN];
};
class DescRing {
public:
DescRing();
~DescRing();
addr_t dmaAddr();
size_t sizeLog();
unsigned index();
ptr_t headPtr();
ptr_t tailPtr();
void setDMALower(uint32_t addr);
void setDMAUpper(uint32_t addr);
void setSizeLog(size_t size_log);
void setIndex(unsigned index);
virtual void setHeadPtr(ptr_t ptr);
void setTailPtr(ptr_t ptr);
virtual void dmaDone(DMAOp *op) = 0;
protected:
bool empty();
bool full();
bool updatePtr(ptr_t ptr, bool head);
addr_t _dmaAddr;
size_t _sizeLog;
size_t _size;
size_t _sizeMask;
unsigned _index;
ptr_t _headPtr;
ptr_t _tailPtr;
ptr_t _currHead;
ptr_t _currTail;
bool active;
bool armed;
std::vector<bool> cplDma;
public:
DescRing();
~DescRing();
addr_t dmaAddr();
size_t sizeLog();
unsigned index();
ptr_t headPtr();
ptr_t tailPtr();
void setDMALower(uint32_t addr);
void setDMAUpper(uint32_t addr);
void setSizeLog(size_t size_log);
void setIndex(unsigned index);
virtual void setHeadPtr(ptr_t ptr);
void setTailPtr(ptr_t ptr);
virtual void dmaDone(DMAOp *op) = 0;
protected:
bool empty();
bool full();
bool updatePtr(ptr_t ptr, bool head);
addr_t _dmaAddr;
size_t _sizeLog;
size_t _size;
size_t _sizeMask;
unsigned _index;
ptr_t _headPtr;
ptr_t _tailPtr;
ptr_t _currHead;
ptr_t _currTail;
bool active;
bool armed;
std::vector<bool> cplDma;
};
class EventRing : public DescRing {
public:
EventRing();
~EventRing();
public:
EventRing();
~EventRing();
void dmaDone(DMAOp *op) override;
void issueEvent(unsigned type, unsigned source);
void dmaDone(DMAOp *op) override;
void issueEvent(unsigned type, unsigned source);
};
class CplRing : public DescRing {
public:
CplRing(EventRing *eventRing);
~CplRing();
void dmaDone(DMAOp *op) override;
void complete(unsigned index, size_t len, bool tx);
private:
struct CplData {
unsigned index;
size_t len;
bool tx;
};
EventRing *eventRing;
std::list<CplData> pending;
public:
CplRing(EventRing *eventRing);
~CplRing();
void dmaDone(DMAOp *op) override;
void complete(unsigned index, size_t len, bool tx);
private:
struct CplData {
unsigned index;
size_t len;
bool tx;
};
EventRing *eventRing;
std::list<CplData> pending;
};
class TxRing : public DescRing {
public:
TxRing(CplRing *cplRing);
~TxRing();
public:
TxRing(CplRing *cplRing);
~TxRing();
void setHeadPtr(ptr_t ptr) override;
void dmaDone(DMAOp *op) override;
void setHeadPtr(ptr_t ptr) override;
void dmaDone(DMAOp *op) override;
private:
CplRing *txCplRing;
private:
CplRing *txCplRing;
};
class RxRing : public DescRing {
public:
RxRing(CplRing *cplRing);
~RxRing();
public:
RxRing(CplRing *cplRing);
~RxRing();
void dmaDone(DMAOp *op) override;
void rx(RxData *rx_data);
void dmaDone(DMAOp *op) override;
void rx(RxData *rx_data);
private:
CplRing *rxCplRing;
private:
CplRing *rxCplRing;
};
class Port {
public:
Port();
~Port();
unsigned id();
unsigned features();
size_t mtu();
size_t schedCount();
addr_t schedOffset();
addr_t schedStride();
unsigned schedType();
unsigned rssMask();
void setId(unsigned id);
void setFeatures(unsigned features);
void setMtu(size_t mtu);
void setSchedCount(size_t count);
void setSchedOffset(addr_t offset);
void setSchedStride(addr_t stride);
void setSchedType(unsigned type);
void setRssMask(unsigned mask);
void schedEnable();
void schedDisable();
void queueEnable();
void queueDisable();
private:
unsigned _id;
unsigned _features;
size_t _mtu;
size_t _schedCount;
addr_t _schedOffset;
addr_t _schedStride;
unsigned _schedType;
unsigned _rssMask;
bool _schedEnable;
bool _queueEnable;
public:
Port();
~Port();
unsigned id();
unsigned features();
size_t mtu();
size_t schedCount();
addr_t schedOffset();
addr_t schedStride();
unsigned schedType();
unsigned rssMask();
void setId(unsigned id);
void setFeatures(unsigned features);
void setMtu(size_t mtu);
void setSchedCount(size_t count);
void setSchedOffset(addr_t offset);
void setSchedStride(addr_t stride);
void setSchedType(unsigned type);
void setRssMask(unsigned mask);
void schedEnable();
void schedDisable();
void queueEnable();
void queueDisable();
private:
unsigned _id;
unsigned _features;
size_t _mtu;
size_t _schedCount;
addr_t _schedOffset;
addr_t _schedStride;
unsigned _schedType;
unsigned _rssMask;
bool _schedEnable;
bool _queueEnable;
};
class Corundum : public nicbm::SimpleDevice<reg_t> {
public:
Corundum();
~Corundum();
virtual void setup_intro(struct cosim_pcie_proto_dev_intro &di);
virtual reg_t reg_read(uint8_t bar, addr_t addr);
virtual void reg_write(uint8_t bar, addr_t addr, reg_t val);
virtual void dma_complete(nicbm::DMAOp &op);
virtual void eth_rx(uint8_t port, const void *data, size_t len);
private:
EventRing eventRing;
TxRing txRing;
CplRing txCplRing;
RxRing rxRing;
CplRing rxCplRing;
Port port;
uint32_t features;
public:
Corundum();
~Corundum();
virtual void setup_intro(struct cosim_pcie_proto_dev_intro &di);
virtual reg_t reg_read(uint8_t bar, addr_t addr);
virtual void reg_write(uint8_t bar, addr_t addr, reg_t val);
virtual void dma_complete(nicbm::DMAOp &op);
virtual void eth_rx(uint8_t port, const void *data, size_t len);
private:
EventRing eventRing;
TxRing txRing;
CplRing txCplRing;
RxRing rxRing;
CplRing rxCplRing;
Port port;
uint32_t features;
};
} // namespace corundum
......@@ -22,19 +22,18 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdlib.h>
#include <assert.h>
#include <cosim_pcie_proto.h>
#include <fcntl.h>
#include <nicsim.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/mman.h>
#include <unistd.h>
#include <signal.h>
#include <fcntl.h>
#include <assert.h>
#include <nicsim.h>
#include <cosim_pcie_proto.h>
static uint8_t *d2h_queue;
static size_t d2h_pos;
......@@ -46,179 +45,168 @@ static size_t h2d_pos;
static size_t h2d_elen;
static size_t h2d_enum;
static void sigint_handler(int dummy)
{
exit(1);
static void sigint_handler(int dummy) {
exit(1);
}
static int uxsocket_init()
{
int cfd;
static int uxsocket_init() {
int cfd;
if ((cfd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
return -1;
}
if ((cfd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
return -1;
}
struct sockaddr_un saun;
memset(&saun, 0, sizeof(saun));
saun.sun_family = AF_UNIX;
memcpy(saun.sun_path, "/tmp/cosim-pci", strlen("/tmp/cosim-pci"));
struct sockaddr_un saun;
memset(&saun, 0, sizeof(saun));
saun.sun_family = AF_UNIX;
memcpy(saun.sun_path, "/tmp/cosim-pci", strlen("/tmp/cosim-pci"));
if (connect(cfd, (struct sockaddr *)&saun, sizeof(saun)) == -1) {
close(cfd);
return -1;
}
if (connect(cfd, (struct sockaddr *)&saun, sizeof(saun)) == -1) {
close(cfd);
return -1;
}
return cfd;
return cfd;
}
static int queue_create(const struct cosim_pcie_proto_dev_intro di)
{
int fd = -1;
if ((fd = open("/dev/shm/dummy_nic_shm", O_RDWR)) == -1) {
perror("Failed to open shm file");
goto error;
}
static int queue_create(const struct cosim_pcie_proto_dev_intro di) {
int fd = -1;
if ((fd = open("/dev/shm/dummy_nic_shm", O_RDWR)) == -1) {
perror("Failed to open shm file");
goto error;
}
void *addr;
if ((addr = mmap(NULL, 32 * 1024 * 1024, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0)) == (void *)-1) {
perror("mmap failed");
goto error;
}
void *addr;
if ((addr = mmap(NULL, 32 * 1024 * 1024, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0)) == (void *)-1) {
perror("mmap failed");
goto error;
}
d2h_queue = (uint8_t *)addr + di.d2h_offset;
d2h_pos = 0;
d2h_elen = di.d2h_elen;
d2h_enum = di.d2h_nentries;
d2h_queue = (uint8_t *)addr + di.d2h_offset;
d2h_pos = 0;
d2h_elen = di.d2h_elen;
d2h_enum = di.d2h_nentries;
h2d_queue = (uint8_t *)addr + di.h2d_offset;
h2d_pos = 0;
h2d_elen = di.h2d_elen;
h2d_enum = di.h2d_nentries;
h2d_queue = (uint8_t *)addr + di.h2d_offset;
h2d_pos = 0;
h2d_elen = di.h2d_elen;
h2d_enum = di.h2d_nentries;
return 0;
return 0;
error:
if (fd > 0) {
close(fd);
}
return -1;
if (fd > 0) {
close(fd);
}
return -1;
}
volatile union cosim_pcie_proto_h2d *h2d_alloc()
{
volatile union cosim_pcie_proto_h2d *msg =
(volatile union cosim_pcie_proto_h2d *)
(h2d_queue + h2d_pos * h2d_elen);
volatile union cosim_pcie_proto_h2d *h2d_alloc() {
volatile union cosim_pcie_proto_h2d *msg =
(volatile union cosim_pcie_proto_h2d *)(h2d_queue + h2d_pos * h2d_elen);
if ((msg->dummy.own_type & COSIM_PCIE_PROTO_H2D_OWN_MASK) !=
COSIM_PCIE_PROTO_H2D_OWN_HOST) {
fprintf(stderr, "cosim: failed to allocate h2d message\n");
exit(1);
}
if ((msg->dummy.own_type & COSIM_PCIE_PROTO_H2D_OWN_MASK) !=
COSIM_PCIE_PROTO_H2D_OWN_HOST) {
fprintf(stderr, "cosim: failed to allocate h2d message\n");
exit(1);
}
h2d_pos = (h2d_pos + 1) % h2d_enum;
return msg;
h2d_pos = (h2d_pos + 1) % h2d_enum;
return msg;
}
volatile union cosim_pcie_proto_d2h *d2h_poll()
{
volatile union cosim_pcie_proto_d2h *msg;
volatile union cosim_pcie_proto_d2h *d2h_poll() {
volatile union cosim_pcie_proto_d2h *msg;
msg = (volatile union cosim_pcie_proto_d2h *)
(d2h_queue + d2h_pos * d2h_elen);
if ((msg->dummy.own_type & COSIM_PCIE_PROTO_D2H_OWN_MASK) ==
COSIM_PCIE_PROTO_D2H_OWN_DEV) {
return NULL;
}
return msg;
msg = (volatile union cosim_pcie_proto_d2h *)(d2h_queue + d2h_pos * d2h_elen);
if ((msg->dummy.own_type & COSIM_PCIE_PROTO_D2H_OWN_MASK) ==
COSIM_PCIE_PROTO_D2H_OWN_DEV) {
return NULL;
}
return msg;
}
void d2h_done(volatile union cosim_pcie_proto_d2h *msg)
{
msg->dummy.own_type = (msg->dummy.own_type & COSIM_PCIE_PROTO_D2H_MSG_MASK)
| COSIM_PCIE_PROTO_D2H_OWN_DEV;
d2h_pos = (d2h_pos + 1) % d2h_enum;
void d2h_done(volatile union cosim_pcie_proto_d2h *msg) {
msg->dummy.own_type = (msg->dummy.own_type & COSIM_PCIE_PROTO_D2H_MSG_MASK) |
COSIM_PCIE_PROTO_D2H_OWN_DEV;
d2h_pos = (d2h_pos + 1) % d2h_enum;
}
static void dev_read(uint64_t offset, uint16_t len)
{
volatile union cosim_pcie_proto_h2d *h2d_msg = h2d_alloc();
volatile struct cosim_pcie_proto_h2d_read *read = &h2d_msg->read;
read->req_id = 0xF;
read->offset = offset;
read->len = len;
read->bar = 0;
read->own_type = COSIM_PCIE_PROTO_H2D_MSG_READ |
COSIM_PCIE_PROTO_H2D_OWN_DEV;
volatile union cosim_pcie_proto_d2h *d2h_msg = NULL;
while (d2h_msg == NULL) {
d2h_msg = d2h_poll();
}
volatile struct cosim_pcie_proto_d2h_readcomp *rc;
rc = &d2h_msg->readcomp;
assert(rc->req_id == 0xF);
printf("received readcomp with data ");
for (int i = 0; i < read->len; i++) {
printf("%x ", ((const uint8_t *)rc->data)[i]);
}
printf("\n");
d2h_done(d2h_msg);
static void dev_read(uint64_t offset, uint16_t len) {
volatile union cosim_pcie_proto_h2d *h2d_msg = h2d_alloc();
volatile struct cosim_pcie_proto_h2d_read *read = &h2d_msg->read;
read->req_id = 0xF;
read->offset = offset;
read->len = len;
read->bar = 0;
read->own_type = COSIM_PCIE_PROTO_H2D_MSG_READ | COSIM_PCIE_PROTO_H2D_OWN_DEV;
volatile union cosim_pcie_proto_d2h *d2h_msg = NULL;
while (d2h_msg == NULL) {
d2h_msg = d2h_poll();
}
volatile struct cosim_pcie_proto_d2h_readcomp *rc;
rc = &d2h_msg->readcomp;
assert(rc->req_id == 0xF);
printf("received readcomp with data ");
for (int i = 0; i < read->len; i++) {
printf("%x ", ((const uint8_t *)rc->data)[i]);
}
printf("\n");
d2h_done(d2h_msg);
}
int main(int argc, char *argv[])
{
signal(SIGINT, sigint_handler);
int cfd;
if ((cfd = uxsocket_init()) < 0) {
fprintf(stderr, "Failed to open unix socket\n");
return -1;
}
int main(int argc, char *argv[]) {
signal(SIGINT, sigint_handler);
struct cosim_pcie_proto_dev_intro di;
if (recv(cfd, &di, sizeof(di), 0) != sizeof(di)) {
perror("Failed to receive dev_intro");
close(cfd);
return -1;
}
int cfd;
if ((cfd = uxsocket_init()) < 0) {
fprintf(stderr, "Failed to open unix socket\n");
return -1;
}
if (queue_create(di) != 0) {
fprintf(stderr, "Failed to create shm queues\n");
close(cfd);
return -1;
}
struct cosim_pcie_proto_dev_intro di;
if (recv(cfd, &di, sizeof(di), 0) != sizeof(di)) {
perror("Failed to receive dev_intro");
close(cfd);
return -1;
}
struct cosim_pcie_proto_host_intro hi;
hi.flags = COSIM_PCIE_PROTO_FLAGS_HI_SYNC;
if (send(cfd, &hi, sizeof(hi), 0) != sizeof(hi)) {
perror("Failed to send host_intro");
close(cfd);
return -1;
}
if (queue_create(di) != 0) {
fprintf(stderr, "Failed to create shm queues\n");
close(cfd);
return -1;
}
while (1) {
int op_type;
uint64_t offset;
uint16_t len;
printf("op type (0-read): ");
scanf("%d", &op_type);
printf("offset: ");
scanf("%lx", &offset);
printf("len: ");
scanf("%hu", &len);
switch (op_type) {
case 0:
dev_read(offset, len);
break;
default:
fprintf(stderr, "Unimplemented type %u\n", op_type);
}
struct cosim_pcie_proto_host_intro hi;
hi.flags = COSIM_PCIE_PROTO_FLAGS_HI_SYNC;
if (send(cfd, &hi, sizeof(hi), 0) != sizeof(hi)) {
perror("Failed to send host_intro");
close(cfd);
return -1;
}
while (1) {
int op_type;
uint64_t offset;
uint16_t len;
printf("op type (0-read): ");
scanf("%d", &op_type);
printf("offset: ");
scanf("%lx", &offset);
printf("len: ");
scanf("%hu", &len);
switch (op_type) {
case 0:
dev_read(offset, len);
break;
default:
fprintf(stderr, "Unimplemented type %u\n", op_type);
}
}
close(cfd);
return 0;
close(cfd);
return 0;
}
......@@ -33,8 +33,8 @@ namespace headers {
#define ETH_ADDR_LEN 6
#define ETH_TYPE_IP 0x0800
#define ETH_TYPE_ARP 0x0806
#define ETH_TYPE_IP 0x0800
#define ETH_TYPE_ARP 0x0806
struct eth_addr {
uint8_t addr[ETH_ADDR_LEN];
......@@ -46,11 +46,10 @@ struct eth_hdr {
uint16_t type;
} __attribute__((packed));
/******************************************************************************/
/* IPv4 */
#define IPH_V(hdr) ((hdr)->_v_hl >> 4)
#define IPH_V(hdr) ((hdr)->_v_hl >> 4)
#define IPH_HL(hdr) ((hdr)->_v_hl & 0x0f)
#define IPH_TOS(hdr) ((hdr)->_tos)
#define IPH_ECN(hdr) ((hdr)->_tos & 0x3)
......@@ -61,19 +60,19 @@ struct eth_hdr {
#define IP_HLEN 20
#define IP_PROTO_IP 0
#define IP_PROTO_ICMP 1
#define IP_PROTO_IGMP 2
#define IP_PROTO_IP 0
#define IP_PROTO_ICMP 1
#define IP_PROTO_IGMP 2
#define IP_PROTO_IPENCAP 4
#define IP_PROTO_UDP 17
#define IP_PROTO_UDP 17
#define IP_PROTO_UDPLITE 136
#define IP_PROTO_TCP 6
#define IP_PROTO_DCCP 33
#define IP_PROTO_TCP 6
#define IP_PROTO_DCCP 33
#define IP_ECN_NONE 0x0
#define IP_ECN_ECT0 0x2
#define IP_ECN_ECT1 0x1
#define IP_ECN_CE 0x3
#define IP_ECN_NONE 0x0
#define IP_ECN_ECT0 0x2
#define IP_ECN_ECT1 0x1
#define IP_ECN_CE 0x3
struct ip_hdr {
/* version / header length */
......@@ -97,7 +96,6 @@ struct ip_hdr {
uint32_t dest;
} __attribute__((packed));
/******************************************************************************/
/* ARP */
......@@ -118,7 +116,6 @@ struct arp_hdr {
uint32_t tpa;
} __attribute__((packed));
/******************************************************************************/
/* TCP */
......@@ -130,7 +127,7 @@ struct arp_hdr {
#define TCP_URG 0x20U
#define TCP_ECE 0x40U
#define TCP_CWR 0x80U
#define TCP_NS 0x100U
#define TCP_NS 0x100U
#define TCP_FLAGS 0x1ffU
......@@ -138,23 +135,26 @@ struct arp_hdr {
#define TCP_HLEN 20
#define TCPH_HDRLEN(phdr) (ntohs((phdr)->_hdrlen_rsvd_flags) >> 12)
#define TCPH_FLAGS(phdr) (ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS)
#define TCPH_HDRLEN_SET(phdr, len) (phdr)->_hdrlen_rsvd_flags = \
htons(((len) << 12) | TCPH_FLAGS(phdr))
#define TCPH_FLAGS_SET(phdr, flags) (phdr)->_hdrlen_rsvd_flags = \
(((phdr)->_hdrlen_rsvd_flags & PP_HTONS( \
(uint16_t)(~(uint16_t)(TCP_FLAGS)))) | htons(flags))
#define TCPH_FLAGS(phdr) (ntohs((phdr)->_hdrlen_rsvd_flags) & TCP_FLAGS)
#define TCPH_HDRLEN_SET(phdr, len) \
(phdr)->_hdrlen_rsvd_flags = htons(((len) << 12) | TCPH_FLAGS(phdr))
#define TCPH_FLAGS_SET(phdr, flags) \
(phdr)->_hdrlen_rsvd_flags = \
(((phdr)->_hdrlen_rsvd_flags & \
PP_HTONS((uint16_t)(~(uint16_t)(TCP_FLAGS)))) | \
htons(flags))
#define TCPH_HDRLEN_FLAGS_SET(phdr, len, flags) \
(phdr)->_hdrlen_rsvd_flags = htons(((len) << 12) | (flags))
(phdr)->_hdrlen_rsvd_flags = htons(((len) << 12) | (flags))
#define TCPH_SET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = \
((phdr)->_hdrlen_rsvd_flags | htons(flags))
#define TCPH_UNSET_FLAG(phdr, flags) (phdr)->_hdrlen_rsvd_flags = \
htons(ntohs((phdr)->_hdrlen_rsvd_flags) | (TCPH_FLAGS(phdr) & ~(flags)) )
#define TCPH_SET_FLAG(phdr, flags) \
(phdr)->_hdrlen_rsvd_flags = ((phdr)->_hdrlen_rsvd_flags | htons(flags))
#define TCPH_UNSET_FLAG(phdr, flags) \
(phdr)->_hdrlen_rsvd_flags = \
htons(ntohs((phdr)->_hdrlen_rsvd_flags) | (TCPH_FLAGS(phdr) & ~(flags)))
#define TCP_TCPLEN(seg) ((seg)->len + ((TCPH_FLAGS((seg)->tcphdr) & \
(TCP_FIN | TCP_SYN)) != 0))
#define TCP_TCPLEN(seg) \
((seg)->len + ((TCPH_FLAGS((seg)->tcphdr) & (TCP_FIN | TCP_SYN)) != 0))
struct tcp_hdr {
uint16_t src;
......@@ -167,7 +167,6 @@ struct tcp_hdr {
uint16_t urgp;
} __attribute__((packed));
/******************************************************************************/
/* UDP */
......@@ -178,7 +177,6 @@ struct udp_hdr {
uint16_t chksum;
} __attribute__((packed));
/******************************************************************************/
/* whole packets */
......@@ -189,18 +187,18 @@ struct pkt_arp {
struct pkt_ip {
struct eth_hdr eth;
struct ip_hdr ip;
struct ip_hdr ip;
} __attribute__((packed));
struct pkt_tcp {
struct eth_hdr eth;
struct ip_hdr ip;
struct ip_hdr ip;
struct tcp_hdr tcp;
} __attribute__((packed));
struct pkt_udp {
struct eth_hdr eth;
struct ip_hdr ip;
struct ip_hdr ip;
struct udp_hdr udp;
} __attribute__((packed));
......
......@@ -24,409 +24,400 @@
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include "sims/nic/i40e_bm/i40e_bm.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
#include "sims/nic/i40e_bm/i40e_bm.h"
using namespace i40e;
extern nicbm::Runner *runner;
queue_admin_tx::queue_admin_tx(i40e_bm &dev_, uint64_t &reg_base_,
uint32_t &reg_len_, uint32_t &reg_head_, uint32_t &reg_tail_)
: queue_base("atx", reg_head_, reg_tail_), dev(dev_), reg_base(reg_base_),
reg_len(reg_len_)
{
desc_len = 32;
ctxs_init();
uint32_t &reg_len_, uint32_t &reg_head_,
uint32_t &reg_tail_)
: queue_base("atx", reg_head_, reg_tail_),
dev(dev_),
reg_base(reg_base_),
reg_len(reg_len_) {
desc_len = 32;
ctxs_init();
}
queue_base::desc_ctx &queue_admin_tx::desc_ctx_create()
{
return *new admin_desc_ctx(*this, dev);
queue_base::desc_ctx &queue_admin_tx::desc_ctx_create() {
return *new admin_desc_ctx(*this, dev);
}
void queue_admin_tx::reg_updated()
{
base = reg_base;
len = (reg_len & I40E_GL_ATQLEN_ATQLEN_MASK) >> I40E_GL_ATQLEN_ATQLEN_SHIFT;
void queue_admin_tx::reg_updated() {
base = reg_base;
len = (reg_len & I40E_GL_ATQLEN_ATQLEN_MASK) >> I40E_GL_ATQLEN_ATQLEN_SHIFT;
if (!enabled && (reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
if (!enabled && (reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
#ifdef DEBUG_ADMINQ
log << " enable base=" << base << " len=" << len <<
logger::endl;
log << " enable base=" << base << " len=" << len << logger::endl;
#endif
enabled = true;
} else if (enabled && !(reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
enabled = true;
} else if (enabled && !(reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
#ifdef DEBUG_ADMINQ
log << " disable" << logger::endl;
log << " disable" << logger::endl;
#endif
enabled = false;
}
enabled = false;
}
queue_base::reg_updated();
queue_base::reg_updated();
}
queue_admin_tx::admin_desc_ctx::admin_desc_ctx(queue_admin_tx &queue_,
i40e_bm &dev_)
: i40e::queue_base::desc_ctx(queue_), aq(queue_), dev(dev_)
{
d = reinterpret_cast <struct i40e_aq_desc *> (desc);
i40e_bm &dev_)
: i40e::queue_base::desc_ctx(queue_), aq(queue_), dev(dev_) {
d = reinterpret_cast<struct i40e_aq_desc *>(desc);
}
void queue_admin_tx::admin_desc_ctx::data_written(uint64_t addr, size_t len)
{
processed();
void queue_admin_tx::admin_desc_ctx::data_written(uint64_t addr, size_t len) {
processed();
}
void queue_admin_tx::admin_desc_ctx::desc_compl_prepare(uint16_t retval,
uint16_t extra_flags)
{
d->flags &= ~0x1ff;
d->flags |= I40E_AQ_FLAG_DD | I40E_AQ_FLAG_CMP | extra_flags;
if (retval)
d->flags |= I40E_AQ_FLAG_ERR;
d->retval = retval;
uint16_t extra_flags) {
d->flags &= ~0x1ff;
d->flags |= I40E_AQ_FLAG_DD | I40E_AQ_FLAG_CMP | extra_flags;
if (retval)
d->flags |= I40E_AQ_FLAG_ERR;
d->retval = retval;
#ifdef DEBUG_ADMINQ
queue.log << " desc_compl_prepare index=" << index << " retval=" <<
retval << logger::endl;
queue.log << " desc_compl_prepare index=" << index << " retval=" << retval
<< logger::endl;
#endif
}
void queue_admin_tx::admin_desc_ctx::desc_complete(uint16_t retval,
uint16_t extra_flags)
{
desc_compl_prepare(retval, extra_flags);
processed();
uint16_t extra_flags) {
desc_compl_prepare(retval, extra_flags);
processed();
}
void queue_admin_tx::admin_desc_ctx::desc_complete_indir(uint16_t retval,
const void *data, size_t len, uint16_t extra_flags, bool ignore_datalen)
{
if (!ignore_datalen && len > d->datalen) {
queue.log << "queue_admin_tx::desc_complete_indir: data too long ("
<< len << ") got buffer for (" << d->datalen << ")" << logger::endl;
abort();
}
d->datalen = len;
desc_compl_prepare(retval, extra_flags);
uint64_t addr = d->params.external.addr_low |
(((uint64_t) d->params.external.addr_high) << 32);
data_write(addr, len, data);
const void *data,
size_t len,
uint16_t extra_flags,
bool ignore_datalen) {
if (!ignore_datalen && len > d->datalen) {
queue.log << "queue_admin_tx::desc_complete_indir: data too long (" << len
<< ") got buffer for (" << d->datalen << ")" << logger::endl;
abort();
}
d->datalen = len;
desc_compl_prepare(retval, extra_flags);
uint64_t addr = d->params.external.addr_low |
(((uint64_t)d->params.external.addr_high) << 32);
data_write(addr, len, data);
}
void queue_admin_tx::admin_desc_ctx::prepare()
{
if ((d->flags & I40E_AQ_FLAG_RD)) {
uint64_t addr = d->params.external.addr_low |
(((uint64_t) d->params.external.addr_high) << 32);
void queue_admin_tx::admin_desc_ctx::prepare() {
if ((d->flags & I40E_AQ_FLAG_RD)) {
uint64_t addr = d->params.external.addr_low |
(((uint64_t)d->params.external.addr_high) << 32);
#ifdef DEBUG_ADMINQ
queue.log << " desc with buffer opc=" << d->opcode << " addr=" <<
addr << logger::endl;
queue.log << " desc with buffer opc=" << d->opcode << " addr=" << addr
<< logger::endl;
#endif
data_fetch(addr, d->datalen);
} else {
prepared();
}
data_fetch(addr, d->datalen);
} else {
prepared();
}
}
void queue_admin_tx::admin_desc_ctx::process()
{
void queue_admin_tx::admin_desc_ctx::process() {
#ifdef DEBUG_ADMINQ
queue.log << " descriptor " << index << " fetched" << logger::endl;
queue.log << " descriptor " << index << " fetched" << logger::endl;
#endif
if (d->opcode == i40e_aqc_opc_get_version) {
if (d->opcode == i40e_aqc_opc_get_version) {
#ifdef DEBUG_ADMINQ
queue.log << " get version" << logger::endl;
queue.log << " get version" << logger::endl;
#endif
struct i40e_aqc_get_version *gv =
reinterpret_cast<struct i40e_aqc_get_version *>(d->params.raw);
gv->rom_ver = 0;
gv->fw_build = 0;
gv->fw_major = 0;
gv->fw_minor = 0;
gv->api_major = I40E_FW_API_VERSION_MAJOR;
gv->api_minor = I40E_FW_API_VERSION_MINOR_X710;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_request_resource) {
struct i40e_aqc_get_version *gv =
reinterpret_cast<struct i40e_aqc_get_version *>(d->params.raw);
gv->rom_ver = 0;
gv->fw_build = 0;
gv->fw_major = 0;
gv->fw_minor = 0;
gv->api_major = I40E_FW_API_VERSION_MAJOR;
gv->api_minor = I40E_FW_API_VERSION_MINOR_X710;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_request_resource) {
#ifdef DEBUG_ADMINQ
queue.log << " request resource" << logger::endl;
queue.log << " request resource" << logger::endl;
#endif
struct i40e_aqc_request_resource *rr =
reinterpret_cast<struct i40e_aqc_request_resource *>(
d->params.raw);
rr->timeout = 180000;
struct i40e_aqc_request_resource *rr =
reinterpret_cast<struct i40e_aqc_request_resource *>(d->params.raw);
rr->timeout = 180000;
#ifdef DEBUG_ADMINQ
queue.log << " res_id=" << rr->resource_id << logger::endl;
queue.log << " res_nu=" << rr->resource_number << logger::endl;
queue.log << " res_id=" << rr->resource_id << logger::endl;
queue.log << " res_nu=" << rr->resource_number << logger::endl;
#endif
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_release_resource) {
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_release_resource) {
#ifdef DEBUG_ADMINQ
queue.log << " release resource" << logger::endl;
queue.log << " release resource" << logger::endl;
#endif
#ifdef DEBUG_ADMINQ
struct i40e_aqc_request_resource *rr =
reinterpret_cast<struct i40e_aqc_request_resource *>(
d->params.raw);
queue.log << " res_id=" << rr->resource_id << logger::endl;
queue.log << " res_nu=" << rr->resource_number << logger::endl;
struct i40e_aqc_request_resource *rr =
reinterpret_cast<struct i40e_aqc_request_resource *>(d->params.raw);
queue.log << " res_id=" << rr->resource_id << logger::endl;
queue.log << " res_nu=" << rr->resource_number << logger::endl;
#endif
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_clear_pxe_mode) {
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_clear_pxe_mode) {
#ifdef DEBUG_ADMINQ
queue.log << " clear PXE mode" << logger::endl;
queue.log << " clear PXE mode" << logger::endl;
#endif
dev.regs.gllan_rctl_0 &= ~I40E_GLLAN_RCTL_0_PXE_MODE_MASK;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_list_func_capabilities ||
d->opcode == i40e_aqc_opc_list_dev_capabilities) {
dev.regs.gllan_rctl_0 &= ~I40E_GLLAN_RCTL_0_PXE_MODE_MASK;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_list_func_capabilities ||
d->opcode == i40e_aqc_opc_list_dev_capabilities) {
#ifdef DEBUG_ADMINQ
queue.log << " get dev/fun caps" << logger::endl;
queue.log << " get dev/fun caps" << logger::endl;
#endif
struct i40e_aqc_list_capabilites *lc =
reinterpret_cast<struct i40e_aqc_list_capabilites *>(
d->params.raw);
struct i40e_aqc_list_capabilities_element_resp caps[] = {
{ I40E_AQ_CAP_ID_RSS, 1, 0, 512, 6, 0, {} },
{ I40E_AQ_CAP_ID_RXQ, 1, 0, dev.NUM_QUEUES, 0, 0, {} },
{ I40E_AQ_CAP_ID_TXQ, 1, 0, dev.NUM_QUEUES, 0, 0, {} },
{ I40E_AQ_CAP_ID_MSIX, 1, 0, dev.NUM_PFINTS, 0, 0, {} },
{ I40E_AQ_CAP_ID_VSI, 1, 0, dev.NUM_VSIS, 0, 0, {} },
{ I40E_AQ_CAP_ID_DCB, 1, 0, 1, 1, 1, {} },
};
size_t num_caps = sizeof(caps) / sizeof(caps[0]);
if (sizeof(caps) <= d->datalen) {
struct i40e_aqc_list_capabilites *lc =
reinterpret_cast<struct i40e_aqc_list_capabilites *>(d->params.raw);
struct i40e_aqc_list_capabilities_element_resp caps[] = {
{I40E_AQ_CAP_ID_RSS, 1, 0, 512, 6, 0, {}},
{I40E_AQ_CAP_ID_RXQ, 1, 0, dev.NUM_QUEUES, 0, 0, {}},
{I40E_AQ_CAP_ID_TXQ, 1, 0, dev.NUM_QUEUES, 0, 0, {}},
{I40E_AQ_CAP_ID_MSIX, 1, 0, dev.NUM_PFINTS, 0, 0, {}},
{I40E_AQ_CAP_ID_VSI, 1, 0, dev.NUM_VSIS, 0, 0, {}},
{I40E_AQ_CAP_ID_DCB, 1, 0, 1, 1, 1, {}},
};
size_t num_caps = sizeof(caps) / sizeof(caps[0]);
if (sizeof(caps) <= d->datalen) {
#ifdef DEBUG_ADMINQ
queue.log << " data fits" << logger::endl;
queue.log << " data fits" << logger::endl;
#endif
// data fits within the buffer
lc->count = num_caps;
desc_complete_indir(0, caps, sizeof(caps));
} else {
// data fits within the buffer
lc->count = num_caps;
desc_complete_indir(0, caps, sizeof(caps));
} else {
#ifdef DEBUG_ADMINQ
queue.log << " data doesn't fit" << logger::endl;
queue.log << " data doesn't fit" << logger::endl;
#endif
// data does not fit
d->datalen = sizeof(caps);
desc_complete(I40E_AQ_RC_ENOMEM);
}
} else if (d->opcode == i40e_aqc_opc_lldp_stop) {
// data does not fit
d->datalen = sizeof(caps);
desc_complete(I40E_AQ_RC_ENOMEM);
}
} else if (d->opcode == i40e_aqc_opc_lldp_stop) {
#ifdef DEBUG_ADMINQ
queue.log << " lldp stop" << logger::endl;
queue.log << " lldp stop" << logger::endl;
#endif
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_mac_address_read) {
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_mac_address_read) {
#ifdef DEBUG_ADMINQ
queue.log << " read mac" << logger::endl;
queue.log << " read mac" << logger::endl;
#endif
struct i40e_aqc_mac_address_read *ar =
reinterpret_cast<struct i40e_aqc_mac_address_read *>(
d->params.raw);
struct i40e_aqc_mac_address_read *ar =
reinterpret_cast<struct i40e_aqc_mac_address_read *>(d->params.raw);
struct i40e_aqc_mac_address_read_data ard;
uint64_t mac = runner->get_mac_addr();
struct i40e_aqc_mac_address_read_data ard;
uint64_t mac = runner->get_mac_addr();
#ifdef DEBUG_ADMINQ
queue.log << " mac = " << mac << logger::endl;
queue.log << " mac = " << mac << logger::endl;
#endif
memcpy(ard.pf_lan_mac, &mac, 6);
memcpy(ard.port_mac, &mac, 6);
memcpy(ard.pf_lan_mac, &mac, 6);
memcpy(ard.port_mac, &mac, 6);
ar->command_flags = I40E_AQC_LAN_ADDR_VALID | I40E_AQC_PORT_ADDR_VALID;
desc_complete_indir(0, &ard, sizeof(ard));
} else if (d->opcode == i40e_aqc_opc_get_phy_abilities) {
ar->command_flags = I40E_AQC_LAN_ADDR_VALID | I40E_AQC_PORT_ADDR_VALID;
desc_complete_indir(0, &ard, sizeof(ard));
} else if (d->opcode == i40e_aqc_opc_get_phy_abilities) {
#ifdef DEBUG_ADMINQ
queue.log << " get phy abilities" << logger::endl;
queue.log << " get phy abilities" << logger::endl;
#endif
struct i40e_aq_get_phy_abilities_resp par;
memset(&par, 0, sizeof(par));
struct i40e_aq_get_phy_abilities_resp par;
memset(&par, 0, sizeof(par));
par.phy_type = (1ULL << I40E_PHY_TYPE_40GBASE_CR4_CU);
par.link_speed = I40E_LINK_SPEED_40GB;
par.abilities = I40E_AQ_PHY_LINK_ENABLED |
I40E_AQ_PHY_AN_ENABLED;
par.eee_capability = 0;
par.phy_type = (1ULL << I40E_PHY_TYPE_40GBASE_CR4_CU);
par.link_speed = I40E_LINK_SPEED_40GB;
par.abilities = I40E_AQ_PHY_LINK_ENABLED | I40E_AQ_PHY_AN_ENABLED;
par.eee_capability = 0;
d->params.external.param0 = 0;
d->params.external.param1 = 0;
d->params.external.param0 = 0;
d->params.external.param1 = 0;
desc_complete_indir(0, &par, sizeof(par), 0, true);
} else if (d->opcode == i40e_aqc_opc_get_link_status) {
desc_complete_indir(0, &par, sizeof(par), 0, true);
} else if (d->opcode == i40e_aqc_opc_get_link_status) {
#ifdef DEBUG_ADMINQ
queue.log << " link status" << logger::endl;
queue.log << " link status" << logger::endl;
#endif
struct i40e_aqc_get_link_status *gls =
reinterpret_cast<struct i40e_aqc_get_link_status *>(
d->params.raw);
gls->command_flags &= I40E_AQ_LSE_IS_ENABLED; // should actually return
// status of link status
// notification
gls->phy_type = I40E_PHY_TYPE_40GBASE_CR4_CU;
gls->link_speed = I40E_LINK_SPEED_40GB;
gls->link_info = I40E_AQ_LINK_UP_FUNCTION | I40E_AQ_LINK_UP_PORT |
I40E_AQ_MEDIA_AVAILABLE | I40E_AQ_SIGNAL_DETECT;
// might need qualified module
gls->an_info = I40E_AQ_AN_COMPLETED | I40E_AQ_LP_AN_ABILITY;
gls->ext_info = 0;
gls->loopback = I40E_AQ_LINK_POWER_CLASS_4 <<
I40E_AQ_PWR_CLASS_SHIFT_LB;
gls->max_frame_size = dev.MAX_MTU;
gls->config = I40E_AQ_CONFIG_CRC_ENA;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_switch_config) {
struct i40e_aqc_get_link_status *gls =
reinterpret_cast<struct i40e_aqc_get_link_status *>(d->params.raw);
gls->command_flags &= I40E_AQ_LSE_IS_ENABLED; // should actually return
// status of link status
// notification
gls->phy_type = I40E_PHY_TYPE_40GBASE_CR4_CU;
gls->link_speed = I40E_LINK_SPEED_40GB;
gls->link_info = I40E_AQ_LINK_UP_FUNCTION | I40E_AQ_LINK_UP_PORT |
I40E_AQ_MEDIA_AVAILABLE | I40E_AQ_SIGNAL_DETECT;
// might need qualified module
gls->an_info = I40E_AQ_AN_COMPLETED | I40E_AQ_LP_AN_ABILITY;
gls->ext_info = 0;
gls->loopback = I40E_AQ_LINK_POWER_CLASS_4 << I40E_AQ_PWR_CLASS_SHIFT_LB;
gls->max_frame_size = dev.MAX_MTU;
gls->config = I40E_AQ_CONFIG_CRC_ENA;
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_switch_config) {
#ifdef DEBUG_ADMINQ
queue.log << " get switch config" << logger::endl;
queue.log << " get switch config" << logger::endl;
#endif
struct i40e_aqc_switch_seid *sw = reinterpret_cast<
struct i40e_aqc_switch_seid *>(d->params.raw);
struct i40e_aqc_get_switch_config_header_resp hr;
/* Not sure why dpdk doesn't like this?
struct i40e_aqc_switch_config_element_resp els[] = {
// EMC
{ I40E_AQ_SW_ELEM_TYPE_EMP, I40E_AQ_SW_ELEM_REV_1, 1, 513, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// MAC
{ I40E_AQ_SW_ELEM_TYPE_MAC, I40E_AQ_SW_ELEM_REV_1, 2, 0, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// PF
{ I40E_AQ_SW_ELEM_TYPE_PF, I40E_AQ_SW_ELEM_REV_1, 16, 512, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// VSI PF
{ I40E_AQ_SW_ELEM_TYPE_VSI, I40E_AQ_SW_ELEM_REV_1, 512, 2, 16, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// VSI PF
{ I40E_AQ_SW_ELEM_TYPE_VSI, I40E_AQ_SW_ELEM_REV_1, 513, 2, 1, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
};*/
struct i40e_aqc_switch_config_element_resp els[] = {
// VSI PF
{ I40E_AQ_SW_ELEM_TYPE_VSI, I40E_AQ_SW_ELEM_REV_1, 512, 2, 16, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
};
// find start idx
size_t cnt = sizeof(els) / sizeof(els[0]);
size_t first = 0;
for (first = 0; first < cnt && els[first].seid < sw->seid; first++) {}
// figure out how many fit in the buffer
size_t max = (d->datalen - sizeof(hr)) / sizeof(els[0]);
size_t report = cnt - first;
if (report > max) {
report = max;
sw->seid = els[first + report].seid;
} else {
sw->seid = 0;
}
// prepare header
memset(&hr, 0, sizeof(hr));
hr.num_reported = report;
hr.num_total = cnt;
struct i40e_aqc_switch_seid *sw =
reinterpret_cast<struct i40e_aqc_switch_seid *>(d->params.raw);
struct i40e_aqc_get_switch_config_header_resp hr;
/* Not sure why dpdk doesn't like this?
struct i40e_aqc_switch_config_element_resp els[] = {
// EMC
{ I40E_AQ_SW_ELEM_TYPE_EMP, I40E_AQ_SW_ELEM_REV_1, 1, 513, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// MAC
{ I40E_AQ_SW_ELEM_TYPE_MAC, I40E_AQ_SW_ELEM_REV_1, 2, 0, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// PF
{ I40E_AQ_SW_ELEM_TYPE_PF, I40E_AQ_SW_ELEM_REV_1, 16, 512, 0, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// VSI PF
{ I40E_AQ_SW_ELEM_TYPE_VSI, I40E_AQ_SW_ELEM_REV_1, 512, 2, 16, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
// VSI PF
{ I40E_AQ_SW_ELEM_TYPE_VSI, I40E_AQ_SW_ELEM_REV_1, 513, 2, 1, {},
I40E_AQ_CONN_TYPE_REGULAR, 0, 0},
};*/
struct i40e_aqc_switch_config_element_resp els[] = {
// VSI PF
{I40E_AQ_SW_ELEM_TYPE_VSI,
I40E_AQ_SW_ELEM_REV_1,
512,
2,
16,
{},
I40E_AQ_CONN_TYPE_REGULAR,
0,
0},
};
// find start idx
size_t cnt = sizeof(els) / sizeof(els[0]);
size_t first = 0;
for (first = 0; first < cnt && els[first].seid < sw->seid; first++) {
}
// figure out how many fit in the buffer
size_t max = (d->datalen - sizeof(hr)) / sizeof(els[0]);
size_t report = cnt - first;
if (report > max) {
report = max;
sw->seid = els[first + report].seid;
} else {
sw->seid = 0;
}
// prepare header
memset(&hr, 0, sizeof(hr));
hr.num_reported = report;
hr.num_total = cnt;
#ifdef DEBUG_ADMINQ
queue.log << " report=" << report << " cnt=" << cnt <<
" seid=" << sw->seid << logger::endl;
queue.log << " report=" << report << " cnt=" << cnt
<< " seid=" << sw->seid << logger::endl;
#endif
// create temporary contiguous buffer
size_t buflen = sizeof(hr) + sizeof(els[0]) * report;
uint8_t buf[buflen];
memcpy(buf, &hr, sizeof(hr));
memcpy(buf + sizeof(hr), els + first, sizeof(els[0]) * report);
// create temporary contiguous buffer
size_t buflen = sizeof(hr) + sizeof(els[0]) * report;
uint8_t buf[buflen];
memcpy(buf, &hr, sizeof(hr));
memcpy(buf + sizeof(hr), els + first, sizeof(els[0]) * report);
desc_complete_indir(0, buf, buflen);
} else if (d->opcode == i40e_aqc_opc_set_switch_config) {
desc_complete_indir(0, buf, buflen);
} else if (d->opcode == i40e_aqc_opc_set_switch_config) {
#ifdef DEBUG_ADMINQ
queue.log << " set switch config" << logger::endl;
queue.log << " set switch config" << logger::endl;
#endif
/* TODO: lots of interesting things here like l2 filtering etc. that are
* relevant.
struct i40e_aqc_set_switch_config *sc =
reinterpret_cast<struct i40e_aqc_set_switch_config *>(
d->params.raw);
*/
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_vsi_parameters) {
/* TODO: lots of interesting things here like l2 filtering etc. that are
* relevant.
struct i40e_aqc_set_switch_config *sc =
reinterpret_cast<struct i40e_aqc_set_switch_config *>(
d->params.raw);
*/
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_vsi_parameters) {
#ifdef DEBUG_ADMINQ
queue.log << " get vsi parameters" << logger::endl;
queue.log << " get vsi parameters" << logger::endl;
#endif
/*struct i40e_aqc_add_get_update_vsi *v =
reinterpret_cast<struct i40e_aqc_add_get_update_vsi *>(
d->params.raw);*/
struct i40e_aqc_vsi_properties_data pd;
memset(&pd, 0, sizeof(pd));
pd.valid_sections |= I40E_AQ_VSI_PROP_SWITCH_VALID |
I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
I40E_AQ_VSI_PROP_QUEUE_OPT_VALID |
I40E_AQ_VSI_PROP_SCHED_VALID;
desc_complete_indir(0, &pd, sizeof(pd));
} else if (d->opcode == i40e_aqc_opc_update_vsi_parameters) {
/*struct i40e_aqc_add_get_update_vsi *v =
reinterpret_cast<struct i40e_aqc_add_get_update_vsi *>(
d->params.raw);*/
struct i40e_aqc_vsi_properties_data pd;
memset(&pd, 0, sizeof(pd));
pd.valid_sections |=
I40E_AQ_VSI_PROP_SWITCH_VALID | I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
I40E_AQ_VSI_PROP_QUEUE_OPT_VALID | I40E_AQ_VSI_PROP_SCHED_VALID;
desc_complete_indir(0, &pd, sizeof(pd));
} else if (d->opcode == i40e_aqc_opc_update_vsi_parameters) {
#ifdef DEBUG_ADMINQ
queue.log << " update vsi parameters" << logger::endl;
queue.log << " update vsi parameters" << logger::endl;
#endif
/* TODO */
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_set_dcb_parameters) {
/* TODO */
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_set_dcb_parameters) {
#ifdef DEBUG_ADMINQ
queue.log << " set dcb parameters" << logger::endl;
queue.log << " set dcb parameters" << logger::endl;
#endif
/* TODO */
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_configure_vsi_bw_limit) {
/* TODO */
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_configure_vsi_bw_limit) {
#ifdef DEBUG_ADMINQ
queue.log << " configure vsi bw limit" << logger::endl;
queue.log << " configure vsi bw limit" << logger::endl;
#endif
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_query_vsi_bw_config) {
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_query_vsi_bw_config) {
#ifdef DEBUG_ADMINQ
queue.log << " query vsi bw config" << logger::endl;
queue.log << " query vsi bw config" << logger::endl;
#endif
struct i40e_aqc_query_vsi_bw_config_resp bwc;
memset(&bwc, 0, sizeof(bwc));
for (size_t i = 0; i < 8; i++)
bwc.qs_handles[i] = 0xffff;
desc_complete_indir(0, &bwc, sizeof(bwc));
} else if (d->opcode == i40e_aqc_opc_query_vsi_ets_sla_config) {
struct i40e_aqc_query_vsi_bw_config_resp bwc;
memset(&bwc, 0, sizeof(bwc));
for (size_t i = 0; i < 8; i++) bwc.qs_handles[i] = 0xffff;
desc_complete_indir(0, &bwc, sizeof(bwc));
} else if (d->opcode == i40e_aqc_opc_query_vsi_ets_sla_config) {
#ifdef DEBUG_ADMINQ
queue.log << " query vsi ets sla config" << logger::endl;
queue.log << " query vsi ets sla config" << logger::endl;
#endif
struct i40e_aqc_query_vsi_ets_sla_config_resp sla;
memset(&sla, 0, sizeof(sla));
for (size_t i = 0; i < 8; i++)
sla.share_credits[i] = 127;
desc_complete_indir(0, &sla, sizeof(sla));
} else if (d->opcode == i40e_aqc_opc_remove_macvlan) {
struct i40e_aqc_query_vsi_ets_sla_config_resp sla;
memset(&sla, 0, sizeof(sla));
for (size_t i = 0; i < 8; i++) sla.share_credits[i] = 127;
desc_complete_indir(0, &sla, sizeof(sla));
} else if (d->opcode == i40e_aqc_opc_remove_macvlan) {
#ifdef DEBUG_ADMINQ
queue.log << " remove macvlan" << logger::endl;
queue.log << " remove macvlan" << logger::endl;
#endif
struct i40e_aqc_macvlan *m = reinterpret_cast<
struct i40e_aqc_macvlan *>(d->params.raw);
struct i40e_aqc_remove_macvlan_element_data *rve =
reinterpret_cast<struct i40e_aqc_remove_macvlan_element_data *>(
data);
for (uint16_t i = 0; i < m->num_addresses; i++)
rve[i].error_code = I40E_AQC_REMOVE_MACVLAN_SUCCESS;
desc_complete_indir(0, data, d->datalen);
} else {
struct i40e_aqc_macvlan *m =
reinterpret_cast<struct i40e_aqc_macvlan *>(d->params.raw);
struct i40e_aqc_remove_macvlan_element_data *rve =
reinterpret_cast<struct i40e_aqc_remove_macvlan_element_data *>(data);
for (uint16_t i = 0; i < m->num_addresses; i++)
rve[i].error_code = I40E_AQC_REMOVE_MACVLAN_SUCCESS;
desc_complete_indir(0, data, d->datalen);
} else {
#ifdef DEBUG_ADMINQ
queue.log << " uknown opcode=" << d->opcode << logger::endl;
queue.log << " uknown opcode=" << d->opcode << logger::endl;
#endif
// desc_complete(I40E_AQ_RC_ESRCH);
desc_complete(0);
}
// desc_complete(I40E_AQ_RC_ESRCH);
desc_complete(0);
}
}
......@@ -17,84 +17,84 @@ typedef uint16_t __le16;
typedef uint32_t __le32;
typedef uint64_t __le64;
#include "sims/nic/i40e_bm/base/i40e_adminq_cmd.h"
#include "sims/nic/i40e_bm/base/i40e_devids.h"
#include "sims/nic/i40e_bm/base/i40e_register.h"
#include "sims/nic/i40e_bm/base/i40e_adminq_cmd.h"
#include "sims/nic/i40e_bm/base/i40e_rxtxq.h"
/* from i40e_types.h */
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
#define I40E_SR_OPTION_ROM_PTR 0x05
#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
#define I40E_SR_EMP_IMAGE_PTR 0x0B
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_EMP_MODULE_PTR 0x0F
#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
#define I40E_SR_NVM_MAP_VERSION 0x29
#define I40E_SR_NVM_IMAGE_VERSION 0x2A
#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PXE_SETUP_PTR 0x30
#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
#define I40E_SR_OPTION_ROM_PTR 0x05
#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
#define I40E_SR_EMP_IMAGE_PTR 0x0B
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_EMP_MODULE_PTR 0x0F
#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
#define I40E_SR_NVM_MAP_VERSION 0x29
#define I40E_SR_NVM_IMAGE_VERSION 0x2A
#define I40E_SR_NVM_STRUCTURE_VERSION 0x2B
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PXE_SETUP_PTR 0x30
#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
#define I40E_SR_NVM_ORIGINAL_EETRACK_LO 0x34
#define I40E_SR_NVM_ORIGINAL_EETRACK_HI 0x35
#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
#define I40E_SR_FEATURE_CONFIGURATION_PTR 0x49
#define I40E_SR_CONFIGURATION_METADATA_PTR 0x4D
#define I40E_SR_IMMEDIATE_VALUES_PTR 0x4E
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
#define I40E_SR_OCP_ENABLED BIT(15)
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
#define I40E_SR_OCP_ENABLED BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
#define I40E_SR_BUF_ALIGNMENT 4096
#define I40E_SR_WORDS_IN_1KB 512
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
#define I40E_SR_BUF_ALIGNMENT 4096
#define I40E_SR_WORDS_IN_1KB 512
/* Checksum should be calculated such that after adding all the words,
* including the checksum word itself, the sum should be 0xBABA.
*/
#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
#endif // I40E_BASE_WRAPPER_H_
......@@ -22,12 +22,14 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sims/nic/i40e_bm/i40e_bm.h"
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include "sims/nic/i40e_bm/i40e_bm.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
nicbm::Runner *runner;
......@@ -36,834 +38,798 @@ namespace i40e {
i40e_bm::i40e_bm()
: log("i40e"),
pf_atq(*this, regs.pf_atqba, regs.pf_atqlen, regs.pf_atqh, regs.pf_atqt),
hmc(*this), shram(*this), lanmgr(*this, NUM_QUEUES)
{
reset(false);
pf_atq(*this, regs.pf_atqba, regs.pf_atqlen, regs.pf_atqh, regs.pf_atqt),
hmc(*this),
shram(*this),
lanmgr(*this, NUM_QUEUES) {
reset(false);
}
i40e_bm::~i40e_bm()
{
i40e_bm::~i40e_bm() {
}
void i40e_bm::setup_intro(struct cosim_pcie_proto_dev_intro &di)
{
di.bars[BAR_REGS].len = 4 * 1024 * 1024;
di.bars[BAR_REGS].flags = COSIM_PCIE_PROTO_BAR_64;
di.bars[BAR_IO].len = 32;
di.bars[BAR_IO].flags = COSIM_PCIE_PROTO_BAR_IO;
di.bars[BAR_MSIX].len = 32 * 1024;
di.bars[BAR_MSIX].flags = COSIM_PCIE_PROTO_BAR_64 |
COSIM_PCIE_PROTO_BAR_DUMMY;
di.pci_vendor_id = I40E_INTEL_VENDOR_ID;
di.pci_device_id = I40E_DEV_ID_QSFP_A;
di.pci_class = 0x02;
di.pci_subclass = 0x00;
di.pci_revision = 0x01;
di.pci_msi_nvecs = 32;
di.pci_msix_nvecs = 0x80;
di.pci_msix_table_bar = BAR_MSIX;
di.pci_msix_pba_bar = BAR_MSIX;
di.pci_msix_table_offset = 0x0;
di.pci_msix_pba_offset = 0x1000;
di.psi_msix_cap_offset = 0x70;
void i40e_bm::setup_intro(struct cosim_pcie_proto_dev_intro &di) {
di.bars[BAR_REGS].len = 4 * 1024 * 1024;
di.bars[BAR_REGS].flags = COSIM_PCIE_PROTO_BAR_64;
di.bars[BAR_IO].len = 32;
di.bars[BAR_IO].flags = COSIM_PCIE_PROTO_BAR_IO;
di.bars[BAR_MSIX].len = 32 * 1024;
di.bars[BAR_MSIX].flags =
COSIM_PCIE_PROTO_BAR_64 | COSIM_PCIE_PROTO_BAR_DUMMY;
di.pci_vendor_id = I40E_INTEL_VENDOR_ID;
di.pci_device_id = I40E_DEV_ID_QSFP_A;
di.pci_class = 0x02;
di.pci_subclass = 0x00;
di.pci_revision = 0x01;
di.pci_msi_nvecs = 32;
di.pci_msix_nvecs = 0x80;
di.pci_msix_table_bar = BAR_MSIX;
di.pci_msix_pba_bar = BAR_MSIX;
di.pci_msix_table_offset = 0x0;
di.pci_msix_pba_offset = 0x1000;
di.psi_msix_cap_offset = 0x70;
}
void i40e_bm::dma_complete(nicbm::DMAOp &op)
{
dma_base &dma = dynamic_cast<dma_base &>(op);
void i40e_bm::dma_complete(nicbm::DMAOp &op) {
dma_base &dma = dynamic_cast<dma_base &>(op);
#ifdef DEBUG_DEV
log << "dma_complete(" << &op << ")" << logger::endl;
log << "dma_complete(" << &op << ")" << logger::endl;
#endif
dma.done();
dma.done();
}
void i40e_bm::eth_rx(uint8_t port, const void *data, size_t len)
{
void i40e_bm::eth_rx(uint8_t port, const void *data, size_t len) {
#ifdef DEBUG_DEV
log << "i40e: received packet len=" << len << logger::endl;
log << "i40e: received packet len=" << len << logger::endl;
#endif
lanmgr.packet_received(data, len);
lanmgr.packet_received(data, len);
}
void i40e_bm::reg_read(uint8_t bar, uint64_t addr, void *dest, size_t len)
{
uint32_t *dest_p = reinterpret_cast <uint32_t *> (dest);
if (len == 4) {
dest_p[0] = reg_read32(bar, addr);
} else if (len == 8) {
dest_p[0] = reg_read32(bar, addr);
dest_p[1] = reg_read32(bar, addr + 4);
} else {
log << "currently we only support 4/8B reads (got " << len << ")"
<< logger::endl;
abort();
}
void i40e_bm::reg_read(uint8_t bar, uint64_t addr, void *dest, size_t len) {
uint32_t *dest_p = reinterpret_cast<uint32_t *>(dest);
if (len == 4) {
dest_p[0] = reg_read32(bar, addr);
} else if (len == 8) {
dest_p[0] = reg_read32(bar, addr);
dest_p[1] = reg_read32(bar, addr + 4);
} else {
log << "currently we only support 4/8B reads (got " << len << ")"
<< logger::endl;
abort();
}
}
uint32_t i40e_bm::reg_read32(uint8_t bar, uint64_t addr)
{
if (bar == BAR_REGS) {
return reg_mem_read32(addr);
} else if (bar == BAR_IO) {
return reg_io_read(addr);
} else {
log << "invalid BAR " << (int) bar << logger::endl;
abort();
}
uint32_t i40e_bm::reg_read32(uint8_t bar, uint64_t addr) {
if (bar == BAR_REGS) {
return reg_mem_read32(addr);
} else if (bar == BAR_IO) {
return reg_io_read(addr);
} else {
log << "invalid BAR " << (int)bar << logger::endl;
abort();
}
}
void i40e_bm::reg_write(uint8_t bar, uint64_t addr, const void *src, size_t len)
{
const uint32_t *src_p = reinterpret_cast<const uint32_t *> (src);
if (len == 4) {
reg_write32(bar, addr, src_p[0]);
} else if (len == 8) {
reg_write32(bar, addr, src_p[0]);
reg_write32(bar, addr + 4, src_p[1]);
} else {
log << "currently we only support 4/8B writes (got " << len << ")"
<< logger::endl;
abort();
}
void i40e_bm::reg_write(uint8_t bar, uint64_t addr, const void *src,
size_t len) {
const uint32_t *src_p = reinterpret_cast<const uint32_t *>(src);
if (len == 4) {
reg_write32(bar, addr, src_p[0]);
} else if (len == 8) {
reg_write32(bar, addr, src_p[0]);
reg_write32(bar, addr + 4, src_p[1]);
} else {
log << "currently we only support 4/8B writes (got " << len << ")"
<< logger::endl;
abort();
}
}
void i40e_bm::reg_write32(uint8_t bar, uint64_t addr, uint32_t val)
{
if (bar == BAR_REGS) {
reg_mem_write32(addr, val);
} else if (bar == BAR_IO) {
reg_io_write(addr, val);
} else {
log << "invalid BAR " << (int) bar << logger::endl;
abort();
}
void i40e_bm::reg_write32(uint8_t bar, uint64_t addr, uint32_t val) {
if (bar == BAR_REGS) {
reg_mem_write32(addr, val);
} else if (bar == BAR_IO) {
reg_io_write(addr, val);
} else {
log << "invalid BAR " << (int)bar << logger::endl;
abort();
}
}
uint32_t i40e_bm::reg_io_read(uint64_t addr)
{
log << "unhandled io read addr=" << addr << logger::endl;
return 0;
uint32_t i40e_bm::reg_io_read(uint64_t addr) {
log << "unhandled io read addr=" << addr << logger::endl;
return 0;
}
void i40e_bm::reg_io_write(uint64_t addr, uint32_t val)
{
log << "unhandled io write addr=" << addr << " val="
<< val << logger::endl;
void i40e_bm::reg_io_write(uint64_t addr, uint32_t val) {
log << "unhandled io write addr=" << addr << " val=" << val << logger::endl;
}
uint32_t i40e_bm::reg_mem_read32(uint64_t addr)
{
uint32_t val = 0;
if (addr >= I40E_PFINT_DYN_CTLN(0) &&
addr < I40E_PFINT_DYN_CTLN(NUM_PFINTS - 1)) {
val = regs.pfint_dyn_ctln[(addr - I40E_PFINT_DYN_CTLN(0)) / 4];
} else if (addr >= I40E_PFINT_LNKLSTN(0) &&
addr <= I40E_PFINT_LNKLSTN(NUM_PFINTS - 1)) {
val = regs.pfint_lnklstn[(addr - I40E_PFINT_LNKLSTN(0)) / 4];
} else if (addr >= I40E_PFINT_RATEN(0) &&
addr <= I40E_PFINT_RATEN(NUM_PFINTS - 1)) {
val = regs.pfint_raten[(addr - I40E_PFINT_RATEN(0)) / 4];
} else if (addr >= I40E_GLLAN_TXPRE_QDIS(0) &&
addr < I40E_GLLAN_TXPRE_QDIS(12)) {
val = regs.gllan_txpre_qdis[(addr - I40E_GLLAN_TXPRE_QDIS(0)) / 4];
} else if (addr >= I40E_QINT_TQCTL(0) &&
addr <= I40E_QINT_TQCTL(NUM_QUEUES - 1)) {
val = regs.qint_tqctl[(addr - I40E_QINT_TQCTL(0)) / 4];
} else if (addr >= I40E_QTX_ENA(0) &&
addr <= I40E_QTX_ENA(NUM_QUEUES - 1)) {
val = regs.qtx_ena[(addr - I40E_QTX_ENA(0)) / 4];
} else if (addr >= I40E_QTX_TAIL(0) &&
addr <= I40E_QTX_TAIL(NUM_QUEUES - 1)) {
val = regs.qtx_tail[(addr - I40E_QTX_TAIL(0)) / 4];
} else if (addr >= I40E_QTX_CTL(0) &&
addr <= I40E_QTX_CTL(NUM_QUEUES - 1)) {
val = regs.qtx_ctl[(addr - I40E_QTX_CTL(0)) / 4];
} else if (addr >= I40E_QINT_RQCTL(0) &&
addr <= I40E_QINT_RQCTL(NUM_QUEUES - 1)) {
val = regs.qint_rqctl[(addr - I40E_QINT_RQCTL(0)) / 4];
} else if (addr >= I40E_QRX_ENA(0) &&
addr <= I40E_QRX_ENA(NUM_QUEUES - 1)) {
val = regs.qrx_ena[(addr - I40E_QRX_ENA(0)) / 4];
} else if (addr >= I40E_QRX_TAIL(0) &&
addr <= I40E_QRX_TAIL(NUM_QUEUES - 1)) {
val = regs.qrx_tail[(addr - I40E_QRX_TAIL(0)) / 4];
} else if (addr >= I40E_GLHMC_LANTXBASE(0) &&
addr <= I40E_GLHMC_LANTXBASE(I40E_GLHMC_LANTXBASE_MAX_INDEX)) {
val = regs.glhmc_lantxbase[(addr - I40E_GLHMC_LANTXBASE(0)) / 4];
} else if (addr >= I40E_GLHMC_LANTXCNT(0) &&
addr <= I40E_GLHMC_LANTXCNT(I40E_GLHMC_LANTXCNT_MAX_INDEX)) {
val = regs.glhmc_lantxcnt[(addr - I40E_GLHMC_LANTXCNT(0)) / 4];
} else if (addr >= I40E_GLHMC_LANRXBASE(0) &&
addr <= I40E_GLHMC_LANRXBASE(I40E_GLHMC_LANRXBASE_MAX_INDEX)) {
val = regs.glhmc_lanrxbase[(addr - I40E_GLHMC_LANRXBASE(0)) / 4];
} else if (addr >= I40E_GLHMC_LANRXCNT(0) &&
addr <= I40E_GLHMC_LANRXCNT(I40E_GLHMC_LANRXCNT_MAX_INDEX)) {
val = regs.glhmc_lanrxcnt[(addr - I40E_GLHMC_LANRXCNT(0)) / 4];
} else if (addr >= I40E_PFQF_HKEY(0) &&
addr <= I40E_PFQF_HKEY(I40E_PFQF_HKEY_MAX_INDEX)) {
val = regs.pfqf_hkey[(addr - I40E_PFQF_HKEY(0)) / 128];
} else if (addr >= I40E_PFQF_HLUT(0) &&
addr <= I40E_PFQF_HLUT(I40E_PFQF_HLUT_MAX_INDEX)) {
val = regs.pfqf_hlut[(addr - I40E_PFQF_HLUT(0)) / 128];
} else if (addr >= I40E_PFINT_ITRN(0, 0) &&
addr <= I40E_PFINT_ITRN(0, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[0][(addr - I40E_PFINT_ITRN(0, 0)) / 4];
} else if (addr >= I40E_PFINT_ITRN(1, 0) &&
addr <= I40E_PFINT_ITRN(1, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[1][(addr - I40E_PFINT_ITRN(1, 0)) / 4];
} else if (addr >= I40E_PFINT_ITRN(2, 0) &&
addr <= I40E_PFINT_ITRN(2, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[2][(addr - I40E_PFINT_ITRN(2, 0)) / 4];
} else {
switch (addr) {
case I40E_PFGEN_CTRL:
val = 0; /* we always simulate immediate reset */
break;
case I40E_GL_FWSTS:
val = 0;
break;
case I40E_GLPCI_CAPSUP:
val = 0;
break;
case I40E_GLNVM_ULD:
val = 0xffffffff;
break;
case I40E_GLNVM_GENS:
val = I40E_GLNVM_GENS_NVM_PRES_MASK |
(6 << I40E_GLNVM_GENS_SR_SIZE_SHIFT); // shadow ram 64kb
break;
case I40E_GLNVM_FLA:
val = I40E_GLNVM_FLA_LOCKED_MASK; // normal flash programming
// mode
break;
case I40E_GLGEN_RSTCTL:
val = regs.glgen_rstctl;
break;
case I40E_GLGEN_STAT:
val = regs.glgen_stat;
break;
case I40E_GLVFGEN_TIMER:
val = runner->time_ps() / 1000000;
break;
case I40E_PFINT_LNKLST0:
val = regs.pfint_lnklst0;
break;
case I40E_PFINT_ICR0_ENA:
val = regs.pfint_icr0_ena;
break;
case I40E_PFINT_ICR0:
val = regs.pfint_icr0;
// read clears
regs.pfint_icr0 = 0;
break;
case I40E_PFINT_STAT_CTL0:
val = regs.pfint_stat_ctl0;
break;
case I40E_PFINT_DYN_CTL0:
val = regs.pfint_dyn_ctl0;
break;
case I40E_PFINT_ITR0(0):
val = regs.pfint_itr0[0];
break;
case I40E_PFINT_ITR0(1):
val = regs.pfint_itr0[1];
break;
case I40E_PFINT_ITR0(2):
val = regs.pfint_itr0[2];
break;
case I40E_GLPCI_CNF2:
// that is ugly, but linux driver needs this not to crash
val = ((NUM_PFINTS - 2) << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) |
(2 << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT);
break;
case I40E_GLNVM_SRCTL:
val = regs.glnvm_srctl;
break;
case I40E_GLNVM_SRDATA:
val = regs.glnvm_srdata;
break;
case I40E_PFLAN_QALLOC:
val = (0 << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) |
((NUM_QUEUES - 1) << I40E_PFLAN_QALLOC_LASTQ_SHIFT) |
(1 << I40E_PFLAN_QALLOC_VALID_SHIFT);
break;
case I40E_PF_VT_PFALLOC:
val = 0; // we don't currently support VFs
break;
case I40E_PFGEN_PORTNUM:
val = (0 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
break;
case I40E_GLLAN_RCTL_0:
val = regs.gllan_rctl_0;
break;
case I40E_GLHMC_LANTXOBJSZ:
val = 7; // 128 B
break;
case I40E_GLHMC_LANQMAX:
val = NUM_QUEUES;
break;
case I40E_GLHMC_LANRXOBJSZ:
val = 5; // 32 B
break;
case I40E_GLHMC_FCOEMAX:
val = 0;
break;
case I40E_GLHMC_FCOEDDPOBJSZ:
val = 0;
break;
case I40E_GLHMC_FCOEFMAX:
// needed to make linux driver happy
val = 0x1000 << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
break;
case I40E_GLHMC_FCOEFOBJSZ:
val = 0;
break;
case I40E_PFHMC_SDCMD:
val = regs.pfhmc_sdcmd;
break;
case I40E_PFHMC_SDDATALOW:
val = regs.pfhmc_sddatalow;
break;
case I40E_PFHMC_SDDATAHIGH:
val = regs.pfhmc_sddatahigh;
break;
case I40E_PFHMC_PDINV:
val = regs.pfhmc_pdinv;
break;
case I40E_PFHMC_ERRORINFO:
val = regs.pfhmc_errorinfo;
break;
case I40E_PFHMC_ERRORDATA:
val = regs.pfhmc_errordata;
break;
case I40E_PF_ATQBAL:
val = regs.pf_atqba;
break;
case I40E_PF_ATQBAH:
val = regs.pf_atqba >> 32;
break;
case I40E_PF_ATQLEN:
val = regs.pf_atqlen;
break;
case I40E_PF_ATQH:
val = regs.pf_atqh;
break;
case I40E_PF_ATQT:
val = regs.pf_atqt;
break;
case I40E_PF_ARQBAL:
val = regs.pf_arqba;
break;
case I40E_PF_ARQBAH:
val = regs.pf_arqba >> 32;
break;
case I40E_PF_ARQLEN:
val = regs.pf_arqlen;
break;
case I40E_PF_ARQH:
val = regs.pf_arqh;
break;
case I40E_PF_ARQT:
val = regs.pf_arqt;
break;
case I40E_PRTMAC_LINKSTA:
val = I40E_REG_LINK_UP | I40E_REG_SPEED_25_40GB;
break;
case I40E_PRTMAC_MACC:
val = 0;
break;
case I40E_PFQF_CTL_0:
val = regs.pfqf_ctl_0;
break;
case I40E_PRTDCB_FCCFG:
val = regs.prtdcb_fccfg;
break;
case I40E_PRTDCB_MFLCN:
val = regs.prtdcb_mflcn;
break;
case I40E_PRT_L2TAGSEN:
val = regs.prt_l2tagsen;
break;
case I40E_PRTQF_CTL_0:
val = regs.prtqf_ctl_0;
break;
case I40E_GLRPB_GHW:
val = regs.glrpb_ghw;
break;
case I40E_GLRPB_GLW:
val = regs.glrpb_glw;
break;
case I40E_GLRPB_PHW:
val = regs.glrpb_phw;
break;
case I40E_GLRPB_PLW:
val = regs.glrpb_plw;
break;
default:
uint32_t i40e_bm::reg_mem_read32(uint64_t addr) {
uint32_t val = 0;
if (addr >= I40E_PFINT_DYN_CTLN(0) &&
addr < I40E_PFINT_DYN_CTLN(NUM_PFINTS - 1)) {
val = regs.pfint_dyn_ctln[(addr - I40E_PFINT_DYN_CTLN(0)) / 4];
} else if (addr >= I40E_PFINT_LNKLSTN(0) &&
addr <= I40E_PFINT_LNKLSTN(NUM_PFINTS - 1)) {
val = regs.pfint_lnklstn[(addr - I40E_PFINT_LNKLSTN(0)) / 4];
} else if (addr >= I40E_PFINT_RATEN(0) &&
addr <= I40E_PFINT_RATEN(NUM_PFINTS - 1)) {
val = regs.pfint_raten[(addr - I40E_PFINT_RATEN(0)) / 4];
} else if (addr >= I40E_GLLAN_TXPRE_QDIS(0) &&
addr < I40E_GLLAN_TXPRE_QDIS(12)) {
val = regs.gllan_txpre_qdis[(addr - I40E_GLLAN_TXPRE_QDIS(0)) / 4];
} else if (addr >= I40E_QINT_TQCTL(0) &&
addr <= I40E_QINT_TQCTL(NUM_QUEUES - 1)) {
val = regs.qint_tqctl[(addr - I40E_QINT_TQCTL(0)) / 4];
} else if (addr >= I40E_QTX_ENA(0) && addr <= I40E_QTX_ENA(NUM_QUEUES - 1)) {
val = regs.qtx_ena[(addr - I40E_QTX_ENA(0)) / 4];
} else if (addr >= I40E_QTX_TAIL(0) &&
addr <= I40E_QTX_TAIL(NUM_QUEUES - 1)) {
val = regs.qtx_tail[(addr - I40E_QTX_TAIL(0)) / 4];
} else if (addr >= I40E_QTX_CTL(0) && addr <= I40E_QTX_CTL(NUM_QUEUES - 1)) {
val = regs.qtx_ctl[(addr - I40E_QTX_CTL(0)) / 4];
} else if (addr >= I40E_QINT_RQCTL(0) &&
addr <= I40E_QINT_RQCTL(NUM_QUEUES - 1)) {
val = regs.qint_rqctl[(addr - I40E_QINT_RQCTL(0)) / 4];
} else if (addr >= I40E_QRX_ENA(0) && addr <= I40E_QRX_ENA(NUM_QUEUES - 1)) {
val = regs.qrx_ena[(addr - I40E_QRX_ENA(0)) / 4];
} else if (addr >= I40E_QRX_TAIL(0) &&
addr <= I40E_QRX_TAIL(NUM_QUEUES - 1)) {
val = regs.qrx_tail[(addr - I40E_QRX_TAIL(0)) / 4];
} else if (addr >= I40E_GLHMC_LANTXBASE(0) &&
addr <= I40E_GLHMC_LANTXBASE(I40E_GLHMC_LANTXBASE_MAX_INDEX)) {
val = regs.glhmc_lantxbase[(addr - I40E_GLHMC_LANTXBASE(0)) / 4];
} else if (addr >= I40E_GLHMC_LANTXCNT(0) &&
addr <= I40E_GLHMC_LANTXCNT(I40E_GLHMC_LANTXCNT_MAX_INDEX)) {
val = regs.glhmc_lantxcnt[(addr - I40E_GLHMC_LANTXCNT(0)) / 4];
} else if (addr >= I40E_GLHMC_LANRXBASE(0) &&
addr <= I40E_GLHMC_LANRXBASE(I40E_GLHMC_LANRXBASE_MAX_INDEX)) {
val = regs.glhmc_lanrxbase[(addr - I40E_GLHMC_LANRXBASE(0)) / 4];
} else if (addr >= I40E_GLHMC_LANRXCNT(0) &&
addr <= I40E_GLHMC_LANRXCNT(I40E_GLHMC_LANRXCNT_MAX_INDEX)) {
val = regs.glhmc_lanrxcnt[(addr - I40E_GLHMC_LANRXCNT(0)) / 4];
} else if (addr >= I40E_PFQF_HKEY(0) &&
addr <= I40E_PFQF_HKEY(I40E_PFQF_HKEY_MAX_INDEX)) {
val = regs.pfqf_hkey[(addr - I40E_PFQF_HKEY(0)) / 128];
} else if (addr >= I40E_PFQF_HLUT(0) &&
addr <= I40E_PFQF_HLUT(I40E_PFQF_HLUT_MAX_INDEX)) {
val = regs.pfqf_hlut[(addr - I40E_PFQF_HLUT(0)) / 128];
} else if (addr >= I40E_PFINT_ITRN(0, 0) &&
addr <= I40E_PFINT_ITRN(0, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[0][(addr - I40E_PFINT_ITRN(0, 0)) / 4];
} else if (addr >= I40E_PFINT_ITRN(1, 0) &&
addr <= I40E_PFINT_ITRN(1, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[1][(addr - I40E_PFINT_ITRN(1, 0)) / 4];
} else if (addr >= I40E_PFINT_ITRN(2, 0) &&
addr <= I40E_PFINT_ITRN(2, NUM_PFINTS - 1)) {
val = regs.pfint_itrn[2][(addr - I40E_PFINT_ITRN(2, 0)) / 4];
} else {
switch (addr) {
case I40E_PFGEN_CTRL:
val = 0; /* we always simulate immediate reset */
break;
case I40E_GL_FWSTS:
val = 0;
break;
case I40E_GLPCI_CAPSUP:
val = 0;
break;
case I40E_GLNVM_ULD:
val = 0xffffffff;
break;
case I40E_GLNVM_GENS:
val = I40E_GLNVM_GENS_NVM_PRES_MASK |
(6 << I40E_GLNVM_GENS_SR_SIZE_SHIFT); // shadow ram 64kb
break;
case I40E_GLNVM_FLA:
val = I40E_GLNVM_FLA_LOCKED_MASK; // normal flash programming
// mode
break;
case I40E_GLGEN_RSTCTL:
val = regs.glgen_rstctl;
break;
case I40E_GLGEN_STAT:
val = regs.glgen_stat;
break;
case I40E_GLVFGEN_TIMER:
val = runner->time_ps() / 1000000;
break;
case I40E_PFINT_LNKLST0:
val = regs.pfint_lnklst0;
break;
case I40E_PFINT_ICR0_ENA:
val = regs.pfint_icr0_ena;
break;
case I40E_PFINT_ICR0:
val = regs.pfint_icr0;
// read clears
regs.pfint_icr0 = 0;
break;
case I40E_PFINT_STAT_CTL0:
val = regs.pfint_stat_ctl0;
break;
case I40E_PFINT_DYN_CTL0:
val = regs.pfint_dyn_ctl0;
break;
case I40E_PFINT_ITR0(0):
val = regs.pfint_itr0[0];
break;
case I40E_PFINT_ITR0(1):
val = regs.pfint_itr0[1];
break;
case I40E_PFINT_ITR0(2):
val = regs.pfint_itr0[2];
break;
case I40E_GLPCI_CNF2:
// that is ugly, but linux driver needs this not to crash
val = ((NUM_PFINTS - 2) << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) |
(2 << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT);
break;
case I40E_GLNVM_SRCTL:
val = regs.glnvm_srctl;
break;
case I40E_GLNVM_SRDATA:
val = regs.glnvm_srdata;
break;
case I40E_PFLAN_QALLOC:
val = (0 << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) |
((NUM_QUEUES - 1) << I40E_PFLAN_QALLOC_LASTQ_SHIFT) |
(1 << I40E_PFLAN_QALLOC_VALID_SHIFT);
break;
case I40E_PF_VT_PFALLOC:
val = 0; // we don't currently support VFs
break;
case I40E_PFGEN_PORTNUM:
val = (0 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
break;
case I40E_GLLAN_RCTL_0:
val = regs.gllan_rctl_0;
break;
case I40E_GLHMC_LANTXOBJSZ:
val = 7; // 128 B
break;
case I40E_GLHMC_LANQMAX:
val = NUM_QUEUES;
break;
case I40E_GLHMC_LANRXOBJSZ:
val = 5; // 32 B
break;
case I40E_GLHMC_FCOEMAX:
val = 0;
break;
case I40E_GLHMC_FCOEDDPOBJSZ:
val = 0;
break;
case I40E_GLHMC_FCOEFMAX:
// needed to make linux driver happy
val = 0x1000 << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
break;
case I40E_GLHMC_FCOEFOBJSZ:
val = 0;
break;
case I40E_PFHMC_SDCMD:
val = regs.pfhmc_sdcmd;
break;
case I40E_PFHMC_SDDATALOW:
val = regs.pfhmc_sddatalow;
break;
case I40E_PFHMC_SDDATAHIGH:
val = regs.pfhmc_sddatahigh;
break;
case I40E_PFHMC_PDINV:
val = regs.pfhmc_pdinv;
break;
case I40E_PFHMC_ERRORINFO:
val = regs.pfhmc_errorinfo;
break;
case I40E_PFHMC_ERRORDATA:
val = regs.pfhmc_errordata;
break;
case I40E_PF_ATQBAL:
val = regs.pf_atqba;
break;
case I40E_PF_ATQBAH:
val = regs.pf_atqba >> 32;
break;
case I40E_PF_ATQLEN:
val = regs.pf_atqlen;
break;
case I40E_PF_ATQH:
val = regs.pf_atqh;
break;
case I40E_PF_ATQT:
val = regs.pf_atqt;
break;
case I40E_PF_ARQBAL:
val = regs.pf_arqba;
break;
case I40E_PF_ARQBAH:
val = regs.pf_arqba >> 32;
break;
case I40E_PF_ARQLEN:
val = regs.pf_arqlen;
break;
case I40E_PF_ARQH:
val = regs.pf_arqh;
break;
case I40E_PF_ARQT:
val = regs.pf_arqt;
break;
case I40E_PRTMAC_LINKSTA:
val = I40E_REG_LINK_UP | I40E_REG_SPEED_25_40GB;
break;
case I40E_PRTMAC_MACC:
val = 0;
break;
case I40E_PFQF_CTL_0:
val = regs.pfqf_ctl_0;
break;
case I40E_PRTDCB_FCCFG:
val = regs.prtdcb_fccfg;
break;
case I40E_PRTDCB_MFLCN:
val = regs.prtdcb_mflcn;
break;
case I40E_PRT_L2TAGSEN:
val = regs.prt_l2tagsen;
break;
case I40E_PRTQF_CTL_0:
val = regs.prtqf_ctl_0;
break;
case I40E_GLRPB_GHW:
val = regs.glrpb_ghw;
break;
case I40E_GLRPB_GLW:
val = regs.glrpb_glw;
break;
case I40E_GLRPB_PHW:
val = regs.glrpb_phw;
break;
case I40E_GLRPB_PLW:
val = regs.glrpb_plw;
break;
default:
#ifdef DEBUG_DEV
log << "unhandled mem read addr=" << addr
<< logger::endl;
log << "unhandled mem read addr=" << addr << logger::endl;
#endif
break;
}
break;
}
}
return val;
return val;
}
void i40e_bm::reg_mem_write32(uint64_t addr, uint32_t val)
{
if (addr >= I40E_PFINT_DYN_CTLN(0) &&
addr <= I40E_PFINT_DYN_CTLN(NUM_PFINTS - 1)) {
regs.pfint_dyn_ctln[(addr - I40E_PFINT_DYN_CTLN(0)) / 4] = val;
} else if (addr >= I40E_PFINT_LNKLSTN(0) &&
addr <= I40E_PFINT_LNKLSTN(NUM_PFINTS - 1)) {
regs.pfint_lnklstn[(addr - I40E_PFINT_LNKLSTN(0)) / 4] = val;
} else if (addr >= I40E_PFINT_RATEN(0) &&
addr <= I40E_PFINT_RATEN(NUM_PFINTS - 1)) {
regs.pfint_raten[(addr - I40E_PFINT_RATEN(0)) / 4] = val;
} else if (addr >= I40E_GLLAN_TXPRE_QDIS(0) &&
addr <= I40E_GLLAN_TXPRE_QDIS(11)) {
regs.gllan_txpre_qdis[(addr - I40E_GLLAN_TXPRE_QDIS(0)) / 4] = val;
} else if (addr >= I40E_QINT_TQCTL(0) &&
addr <= I40E_QINT_TQCTL(NUM_QUEUES - 1)) {
regs.qint_tqctl[(addr - I40E_QINT_TQCTL(0)) / 4] = val;
} else if (addr >= I40E_QTX_ENA(0) &&
addr <= I40E_QTX_ENA(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QTX_ENA(0)) / 4;
regs.qtx_ena[idx] = val;
lanmgr.qena_updated(idx, false);
} else if (addr >= I40E_QTX_TAIL(0) &&
addr <= I40E_QTX_TAIL(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QTX_TAIL(0)) / 4;
regs.qtx_tail[idx] = val;
lanmgr.tail_updated(idx, false);
} else if (addr >= I40E_QTX_CTL(0) &&
addr <= I40E_QTX_CTL(NUM_QUEUES - 1)) {
regs.qtx_ctl[(addr - I40E_QTX_CTL(0)) / 4] = val;
} else if (addr >= I40E_QINT_RQCTL(0) &&
addr <= I40E_QINT_RQCTL(NUM_QUEUES - 1)) {
regs.qint_rqctl[(addr - I40E_QINT_RQCTL(0)) / 4] = val;
} else if (addr >= I40E_QRX_ENA(0) &&
addr <= I40E_QRX_ENA(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QRX_ENA(0)) / 4;
regs.qrx_ena[idx] = val;
lanmgr.qena_updated(idx, true);
} else if (addr >= I40E_QRX_TAIL(0) &&
addr <= I40E_QRX_TAIL(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QRX_TAIL(0)) / 4;
regs.qrx_tail[idx] = val;
lanmgr.tail_updated(idx, true);
} else if (addr >= I40E_GLHMC_LANTXBASE(0) &&
addr <= I40E_GLHMC_LANTXBASE(I40E_GLHMC_LANTXBASE_MAX_INDEX)) {
regs.glhmc_lantxbase[(addr - I40E_GLHMC_LANTXBASE(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANTXCNT(0) &&
addr <= I40E_GLHMC_LANTXCNT(I40E_GLHMC_LANTXCNT_MAX_INDEX)) {
regs.glhmc_lantxcnt[(addr - I40E_GLHMC_LANTXCNT(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANRXBASE(0) &&
addr <= I40E_GLHMC_LANRXBASE(I40E_GLHMC_LANRXBASE_MAX_INDEX)) {
regs.glhmc_lanrxbase[(addr - I40E_GLHMC_LANRXBASE(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANRXCNT(0) &&
addr <= I40E_GLHMC_LANRXCNT(I40E_GLHMC_LANRXCNT_MAX_INDEX)) {
regs.glhmc_lanrxcnt[(addr - I40E_GLHMC_LANRXCNT(0)) / 4] = val;
} else if (addr >= I40E_PFQF_HKEY(0) &&
addr <= I40E_PFQF_HKEY(I40E_PFQF_HKEY_MAX_INDEX)) {
regs.pfqf_hkey[(addr - I40E_PFQF_HKEY(0)) / 128] = val;
lanmgr.rss_key_updated();
} else if (addr >= I40E_PFQF_HLUT(0) &&
addr <= I40E_PFQF_HLUT(I40E_PFQF_HLUT_MAX_INDEX)) {
regs.pfqf_hlut[(addr - I40E_PFQF_HLUT(0)) / 128] = val;
} else if (addr >= I40E_PFINT_ITRN(0, 0) &&
addr <= I40E_PFINT_ITRN(0, NUM_PFINTS - 1)) {
regs.pfint_itrn[0][(addr - I40E_PFINT_ITRN(0, 0)) / 4] = val;
} else if (addr >= I40E_PFINT_ITRN(1, 0) &&
addr <= I40E_PFINT_ITRN(1, NUM_PFINTS - 1)) {
regs.pfint_itrn[1][(addr - I40E_PFINT_ITRN(1, 0)) / 4] = val;
} else if (addr >= I40E_PFINT_ITRN(2, 0) &&
addr <= I40E_PFINT_ITRN(2, NUM_PFINTS - 1)) {
regs.pfint_itrn[2][(addr - I40E_PFINT_ITRN(2, 0)) / 4] = val;
} else {
switch (addr) {
case I40E_PFGEN_CTRL:
if ((val & I40E_PFGEN_CTRL_PFSWR_MASK) ==
I40E_PFGEN_CTRL_PFSWR_MASK)
reset(true);
break;
case I40E_GL_FWSTS:
break;
case I40E_GLGEN_RSTCTL:
regs.glgen_rstctl = val;
break;
case I40E_GLLAN_RCTL_0:
if ((val & I40E_GLLAN_RCTL_0_PXE_MODE_MASK))
regs.gllan_rctl_0 &= ~I40E_GLLAN_RCTL_0_PXE_MODE_MASK;
break;
case I40E_GLNVM_SRCTL:
regs.glnvm_srctl = val;
shram.reg_updated();
break;
case I40E_GLNVM_SRDATA:
regs.glnvm_srdata = val;
shram.reg_updated();
break;
case I40E_PFINT_LNKLST0:
regs.pfint_lnklst0 = val;
break;
case I40E_PFINT_ICR0_ENA:
regs.pfint_icr0_ena = val;
break;
case I40E_PFINT_ICR0:
regs.pfint_icr0 = val;
break;
case I40E_PFINT_STAT_CTL0:
regs.pfint_stat_ctl0 = val;
break;
case I40E_PFINT_DYN_CTL0:
regs.pfint_dyn_ctl0 = val;
break;
case I40E_PFINT_ITR0(0):
regs.pfint_itr0[0] = val;
break;
case I40E_PFINT_ITR0(1):
regs.pfint_itr0[1] = val;
break;
case I40E_PFINT_ITR0(2):
regs.pfint_itr0[2] = val;
break;
case I40E_PFHMC_SDCMD:
regs.pfhmc_sdcmd = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_SDDATALOW:
regs.pfhmc_sddatalow = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_SDDATAHIGH:
regs.pfhmc_sddatahigh = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_PDINV:
regs.pfhmc_pdinv = val;
hmc.reg_updated(addr);
break;
case I40E_PF_ATQBAL:
regs.pf_atqba = val | (regs.pf_atqba & 0xffffffff00000000ULL);
pf_atq.reg_updated();
break;
case I40E_PF_ATQBAH:
regs.pf_atqba = ((uint64_t) val << 32) |
(regs.pf_atqba & 0xffffffffULL);
pf_atq.reg_updated();
break;
case I40E_PF_ATQLEN:
regs.pf_atqlen = val;
pf_atq.reg_updated();
break;
case I40E_PF_ATQH:
regs.pf_atqh = val;
pf_atq.reg_updated();
break;
case I40E_PF_ATQT:
regs.pf_atqt = val;
pf_atq.reg_updated();
break;
case I40E_PF_ARQBAL:
regs.pf_arqba = val | (regs.pf_atqba & 0xffffffff00000000ULL);
break;
case I40E_PF_ARQBAH:
regs.pf_arqba = ((uint64_t) val << 32) |
(regs.pf_arqba & 0xffffffffULL);
break;
case I40E_PF_ARQLEN:
regs.pf_arqlen = val;
break;
case I40E_PF_ARQH:
regs.pf_arqh = val;
break;
case I40E_PF_ARQT:
regs.pf_arqt = val;
break;
case I40E_PFQF_CTL_0:
regs.pfqf_ctl_0 = val;
break;
case I40E_PRTDCB_FCCFG:
regs.prtdcb_fccfg = val;
break;
case I40E_PRTDCB_MFLCN:
regs.prtdcb_mflcn = val;
break;
case I40E_PRT_L2TAGSEN:
regs.prt_l2tagsen = val;
break;
case I40E_PRTQF_CTL_0:
regs.prtqf_ctl_0 = val;
break;
case I40E_GLRPB_GHW:
regs.glrpb_ghw = val;
break;
case I40E_GLRPB_GLW:
regs.glrpb_glw = val;
break;
case I40E_GLRPB_PHW:
regs.glrpb_phw = val;
break;
case I40E_GLRPB_PLW:
regs.glrpb_plw = val;
break;
default:
void i40e_bm::reg_mem_write32(uint64_t addr, uint32_t val) {
if (addr >= I40E_PFINT_DYN_CTLN(0) &&
addr <= I40E_PFINT_DYN_CTLN(NUM_PFINTS - 1)) {
regs.pfint_dyn_ctln[(addr - I40E_PFINT_DYN_CTLN(0)) / 4] = val;
} else if (addr >= I40E_PFINT_LNKLSTN(0) &&
addr <= I40E_PFINT_LNKLSTN(NUM_PFINTS - 1)) {
regs.pfint_lnklstn[(addr - I40E_PFINT_LNKLSTN(0)) / 4] = val;
} else if (addr >= I40E_PFINT_RATEN(0) &&
addr <= I40E_PFINT_RATEN(NUM_PFINTS - 1)) {
regs.pfint_raten[(addr - I40E_PFINT_RATEN(0)) / 4] = val;
} else if (addr >= I40E_GLLAN_TXPRE_QDIS(0) &&
addr <= I40E_GLLAN_TXPRE_QDIS(11)) {
regs.gllan_txpre_qdis[(addr - I40E_GLLAN_TXPRE_QDIS(0)) / 4] = val;
} else if (addr >= I40E_QINT_TQCTL(0) &&
addr <= I40E_QINT_TQCTL(NUM_QUEUES - 1)) {
regs.qint_tqctl[(addr - I40E_QINT_TQCTL(0)) / 4] = val;
} else if (addr >= I40E_QTX_ENA(0) && addr <= I40E_QTX_ENA(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QTX_ENA(0)) / 4;
regs.qtx_ena[idx] = val;
lanmgr.qena_updated(idx, false);
} else if (addr >= I40E_QTX_TAIL(0) &&
addr <= I40E_QTX_TAIL(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QTX_TAIL(0)) / 4;
regs.qtx_tail[idx] = val;
lanmgr.tail_updated(idx, false);
} else if (addr >= I40E_QTX_CTL(0) && addr <= I40E_QTX_CTL(NUM_QUEUES - 1)) {
regs.qtx_ctl[(addr - I40E_QTX_CTL(0)) / 4] = val;
} else if (addr >= I40E_QINT_RQCTL(0) &&
addr <= I40E_QINT_RQCTL(NUM_QUEUES - 1)) {
regs.qint_rqctl[(addr - I40E_QINT_RQCTL(0)) / 4] = val;
} else if (addr >= I40E_QRX_ENA(0) && addr <= I40E_QRX_ENA(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QRX_ENA(0)) / 4;
regs.qrx_ena[idx] = val;
lanmgr.qena_updated(idx, true);
} else if (addr >= I40E_QRX_TAIL(0) &&
addr <= I40E_QRX_TAIL(NUM_QUEUES - 1)) {
size_t idx = (addr - I40E_QRX_TAIL(0)) / 4;
regs.qrx_tail[idx] = val;
lanmgr.tail_updated(idx, true);
} else if (addr >= I40E_GLHMC_LANTXBASE(0) &&
addr <= I40E_GLHMC_LANTXBASE(I40E_GLHMC_LANTXBASE_MAX_INDEX)) {
regs.glhmc_lantxbase[(addr - I40E_GLHMC_LANTXBASE(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANTXCNT(0) &&
addr <= I40E_GLHMC_LANTXCNT(I40E_GLHMC_LANTXCNT_MAX_INDEX)) {
regs.glhmc_lantxcnt[(addr - I40E_GLHMC_LANTXCNT(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANRXBASE(0) &&
addr <= I40E_GLHMC_LANRXBASE(I40E_GLHMC_LANRXBASE_MAX_INDEX)) {
regs.glhmc_lanrxbase[(addr - I40E_GLHMC_LANRXBASE(0)) / 4] = val;
} else if (addr >= I40E_GLHMC_LANRXCNT(0) &&
addr <= I40E_GLHMC_LANRXCNT(I40E_GLHMC_LANRXCNT_MAX_INDEX)) {
regs.glhmc_lanrxcnt[(addr - I40E_GLHMC_LANRXCNT(0)) / 4] = val;
} else if (addr >= I40E_PFQF_HKEY(0) &&
addr <= I40E_PFQF_HKEY(I40E_PFQF_HKEY_MAX_INDEX)) {
regs.pfqf_hkey[(addr - I40E_PFQF_HKEY(0)) / 128] = val;
lanmgr.rss_key_updated();
} else if (addr >= I40E_PFQF_HLUT(0) &&
addr <= I40E_PFQF_HLUT(I40E_PFQF_HLUT_MAX_INDEX)) {
regs.pfqf_hlut[(addr - I40E_PFQF_HLUT(0)) / 128] = val;
} else if (addr >= I40E_PFINT_ITRN(0, 0) &&
addr <= I40E_PFINT_ITRN(0, NUM_PFINTS - 1)) {
regs.pfint_itrn[0][(addr - I40E_PFINT_ITRN(0, 0)) / 4] = val;
} else if (addr >= I40E_PFINT_ITRN(1, 0) &&
addr <= I40E_PFINT_ITRN(1, NUM_PFINTS - 1)) {
regs.pfint_itrn[1][(addr - I40E_PFINT_ITRN(1, 0)) / 4] = val;
} else if (addr >= I40E_PFINT_ITRN(2, 0) &&
addr <= I40E_PFINT_ITRN(2, NUM_PFINTS - 1)) {
regs.pfint_itrn[2][(addr - I40E_PFINT_ITRN(2, 0)) / 4] = val;
} else {
switch (addr) {
case I40E_PFGEN_CTRL:
if ((val & I40E_PFGEN_CTRL_PFSWR_MASK) == I40E_PFGEN_CTRL_PFSWR_MASK)
reset(true);
break;
case I40E_GL_FWSTS:
break;
case I40E_GLGEN_RSTCTL:
regs.glgen_rstctl = val;
break;
case I40E_GLLAN_RCTL_0:
if ((val & I40E_GLLAN_RCTL_0_PXE_MODE_MASK))
regs.gllan_rctl_0 &= ~I40E_GLLAN_RCTL_0_PXE_MODE_MASK;
break;
case I40E_GLNVM_SRCTL:
regs.glnvm_srctl = val;
shram.reg_updated();
break;
case I40E_GLNVM_SRDATA:
regs.glnvm_srdata = val;
shram.reg_updated();
break;
case I40E_PFINT_LNKLST0:
regs.pfint_lnklst0 = val;
break;
case I40E_PFINT_ICR0_ENA:
regs.pfint_icr0_ena = val;
break;
case I40E_PFINT_ICR0:
regs.pfint_icr0 = val;
break;
case I40E_PFINT_STAT_CTL0:
regs.pfint_stat_ctl0 = val;
break;
case I40E_PFINT_DYN_CTL0:
regs.pfint_dyn_ctl0 = val;
break;
case I40E_PFINT_ITR0(0):
regs.pfint_itr0[0] = val;
break;
case I40E_PFINT_ITR0(1):
regs.pfint_itr0[1] = val;
break;
case I40E_PFINT_ITR0(2):
regs.pfint_itr0[2] = val;
break;
case I40E_PFHMC_SDCMD:
regs.pfhmc_sdcmd = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_SDDATALOW:
regs.pfhmc_sddatalow = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_SDDATAHIGH:
regs.pfhmc_sddatahigh = val;
hmc.reg_updated(addr);
break;
case I40E_PFHMC_PDINV:
regs.pfhmc_pdinv = val;
hmc.reg_updated(addr);
break;
case I40E_PF_ATQBAL:
regs.pf_atqba = val | (regs.pf_atqba & 0xffffffff00000000ULL);
pf_atq.reg_updated();
break;
case I40E_PF_ATQBAH:
regs.pf_atqba = ((uint64_t)val << 32) | (regs.pf_atqba & 0xffffffffULL);
pf_atq.reg_updated();
break;
case I40E_PF_ATQLEN:
regs.pf_atqlen = val;
pf_atq.reg_updated();
break;
case I40E_PF_ATQH:
regs.pf_atqh = val;
pf_atq.reg_updated();
break;
case I40E_PF_ATQT:
regs.pf_atqt = val;
pf_atq.reg_updated();
break;
case I40E_PF_ARQBAL:
regs.pf_arqba = val | (regs.pf_atqba & 0xffffffff00000000ULL);
break;
case I40E_PF_ARQBAH:
regs.pf_arqba = ((uint64_t)val << 32) | (regs.pf_arqba & 0xffffffffULL);
break;
case I40E_PF_ARQLEN:
regs.pf_arqlen = val;
break;
case I40E_PF_ARQH:
regs.pf_arqh = val;
break;
case I40E_PF_ARQT:
regs.pf_arqt = val;
break;
case I40E_PFQF_CTL_0:
regs.pfqf_ctl_0 = val;
break;
case I40E_PRTDCB_FCCFG:
regs.prtdcb_fccfg = val;
break;
case I40E_PRTDCB_MFLCN:
regs.prtdcb_mflcn = val;
break;
case I40E_PRT_L2TAGSEN:
regs.prt_l2tagsen = val;
break;
case I40E_PRTQF_CTL_0:
regs.prtqf_ctl_0 = val;
break;
case I40E_GLRPB_GHW:
regs.glrpb_ghw = val;
break;
case I40E_GLRPB_GLW:
regs.glrpb_glw = val;
break;
case I40E_GLRPB_PHW:
regs.glrpb_phw = val;
break;
case I40E_GLRPB_PLW:
regs.glrpb_plw = val;
break;
default:
#ifdef DEBUG_DEV
log << "unhandled mem write addr=" << addr
<< " val=" << val << logger::endl;
log << "unhandled mem write addr=" << addr << " val=" << val
<< logger::endl;
#endif
break;
}
break;
}
}
}
void i40e_bm::timed_event(nicbm::TimedEvent &ev)
{
int_ev &iev = *((int_ev *) &ev);
void i40e_bm::timed_event(nicbm::TimedEvent &ev) {
int_ev &iev = *((int_ev *)&ev);
#ifdef DEBUG_DEV
log << "timed_event: triggering interrupt (" << iev.vec << ")" <<
logger::endl;
log << "timed_event: triggering interrupt (" << iev.vec << ")"
<< logger::endl;
#endif
iev.armed = false;
if (int_msix_en) {
runner->msix_issue(iev.vec);
} else if (iev.vec > 0) {
log << "timed_event: MSI-X disabled, but vec != 0" << logger::endl;
abort();
} else {
runner->msi_issue(0);
}
iev.armed = false;
if (int_msix_en) {
runner->msix_issue(iev.vec);
} else if (iev.vec > 0) {
log << "timed_event: MSI-X disabled, but vec != 0" << logger::endl;
abort();
} else {
runner->msi_issue(0);
}
}
void i40e_bm::signal_interrupt(uint16_t vec, uint8_t itr)
{
int_ev &iev = intevs[vec];
uint64_t mindelay;
if (itr <= 2) {
// itr 0-2
if (vec == 0)
mindelay = regs.pfint_itr0[itr];
else
mindelay = regs.pfint_itrn[itr][vec];
mindelay *= 2000000ULL;
} else if (itr == 3) {
// noitr
mindelay = 0;
} else {
log << "signal_interrupt() invalid itr (" << itr << ")" << logger::endl;
abort();
}
uint64_t curtime = runner->time_ps();
uint64_t newtime = curtime + mindelay;
if (iev.armed && iev.time <= newtime) {
// already armed and this is not scheduled sooner
void i40e_bm::signal_interrupt(uint16_t vec, uint8_t itr) {
int_ev &iev = intevs[vec];
uint64_t mindelay;
if (itr <= 2) {
// itr 0-2
if (vec == 0)
mindelay = regs.pfint_itr0[itr];
else
mindelay = regs.pfint_itrn[itr][vec];
mindelay *= 2000000ULL;
} else if (itr == 3) {
// noitr
mindelay = 0;
} else {
log << "signal_interrupt() invalid itr (" << itr << ")" << logger::endl;
abort();
}
uint64_t curtime = runner->time_ps();
uint64_t newtime = curtime + mindelay;
if (iev.armed && iev.time <= newtime) {
// already armed and this is not scheduled sooner
#ifdef DEBUG_DEV
log << "signal_interrupt: vec " << vec << " already scheduled" <<
logger::endl;
log << "signal_interrupt: vec " << vec << " already scheduled"
<< logger::endl;
#endif
return;
} else if (iev.armed) {
// need to reschedule
runner->event_cancel(iev);
}
return;
} else if (iev.armed) {
// need to reschedule
runner->event_cancel(iev);
}
iev.armed = true;
iev.time = newtime;
iev.armed = true;
iev.time = newtime;
#ifdef DEBUG_DEV
log << "signal_interrupt: scheduled vec " << vec << " for time=" <<
newtime << " (itr " << itr << ")" << logger::endl;
log << "signal_interrupt: scheduled vec " << vec << " for time=" << newtime
<< " (itr " << itr << ")" << logger::endl;
#endif
runner->event_schedule(iev);
runner->event_schedule(iev);
}
void i40e_bm::reset(bool indicate_done)
{
void i40e_bm::reset(bool indicate_done) {
#ifdef DEBUG_DEV
std::cout << "reset triggered" << logger::endl;
std::cout << "reset triggered" << logger::endl;
#endif
pf_atq.reset();
hmc.reset();
lanmgr.reset();
memset(&regs, 0, sizeof(regs));
if (indicate_done)
regs.glnvm_srctl = I40E_GLNVM_SRCTL_DONE_MASK;
for (uint16_t i = 0; i < NUM_PFINTS; i++) {
intevs[i].vec = i;
if (intevs[i].armed) {
runner->event_cancel(intevs[i]);
intevs[i].armed = false;
}
intevs[i].time = 0;
}
pf_atq.reset();
hmc.reset();
lanmgr.reset();
// add default hash key
regs.pfqf_hkey[0] = 0xda565a6d;
regs.pfqf_hkey[1] = 0xc20e5b25;
regs.pfqf_hkey[2] = 0x3d256741;
regs.pfqf_hkey[3] = 0xb08fa343;
regs.pfqf_hkey[4] = 0xcb2bcad0;
regs.pfqf_hkey[5] = 0xb4307bae;
regs.pfqf_hkey[6] = 0xa32dcb77;
regs.pfqf_hkey[7] = 0x0cf23080;
regs.pfqf_hkey[8] = 0x3bb7426a;
regs.pfqf_hkey[9] = 0xfa01acbe;
regs.pfqf_hkey[10] = 0x0;
regs.pfqf_hkey[11] = 0x0;
regs.pfqf_hkey[12] = 0x0;
regs.glrpb_ghw = 0xF2000;
regs.glrpb_phw = 0x1246;
regs.glrpb_plw = 0x0846;
memset(&regs, 0, sizeof(regs));
if (indicate_done)
regs.glnvm_srctl = I40E_GLNVM_SRCTL_DONE_MASK;
for (uint16_t i = 0; i < NUM_PFINTS; i++) {
intevs[i].vec = i;
if (intevs[i].armed) {
runner->event_cancel(intevs[i]);
intevs[i].armed = false;
}
intevs[i].time = 0;
}
// add default hash key
regs.pfqf_hkey[0] = 0xda565a6d;
regs.pfqf_hkey[1] = 0xc20e5b25;
regs.pfqf_hkey[2] = 0x3d256741;
regs.pfqf_hkey[3] = 0xb08fa343;
regs.pfqf_hkey[4] = 0xcb2bcad0;
regs.pfqf_hkey[5] = 0xb4307bae;
regs.pfqf_hkey[6] = 0xa32dcb77;
regs.pfqf_hkey[7] = 0x0cf23080;
regs.pfqf_hkey[8] = 0x3bb7426a;
regs.pfqf_hkey[9] = 0xfa01acbe;
regs.pfqf_hkey[10] = 0x0;
regs.pfqf_hkey[11] = 0x0;
regs.pfqf_hkey[12] = 0x0;
regs.glrpb_ghw = 0xF2000;
regs.glrpb_phw = 0x1246;
regs.glrpb_plw = 0x0846;
}
shadow_ram::shadow_ram(i40e_bm &dev_)
: dev(dev_), log("sram")
{
shadow_ram::shadow_ram(i40e_bm &dev_) : dev(dev_), log("sram") {
}
void shadow_ram::reg_updated()
{
uint32_t val = dev.regs.glnvm_srctl;
uint32_t addr;
bool is_write;
void shadow_ram::reg_updated() {
uint32_t val = dev.regs.glnvm_srctl;
uint32_t addr;
bool is_write;
if (!(val & I40E_GLNVM_SRCTL_START_MASK))
return;
if (!(val & I40E_GLNVM_SRCTL_START_MASK))
return;
addr = (val & I40E_GLNVM_SRCTL_ADDR_MASK)
>> I40E_GLNVM_SRCTL_ADDR_SHIFT;
is_write = (val & I40E_GLNVM_SRCTL_WRITE_MASK);
addr = (val & I40E_GLNVM_SRCTL_ADDR_MASK) >> I40E_GLNVM_SRCTL_ADDR_SHIFT;
is_write = (val & I40E_GLNVM_SRCTL_WRITE_MASK);
#ifdef DEBUG_DEV
log << "shadow ram op addr=" << addr << " w=" << is_write
<< logger::endl;
log << "shadow ram op addr=" << addr << " w=" << is_write << logger::endl;
#endif
if (is_write) {
write(addr,
(dev.regs.glnvm_srdata & I40E_GLNVM_SRDATA_WRDATA_MASK)
>> I40E_GLNVM_SRDATA_WRDATA_SHIFT);
} else {
dev.regs.glnvm_srdata &= ~I40E_GLNVM_SRDATA_RDDATA_MASK;
dev.regs.glnvm_srdata |= ((uint32_t) read(addr)) <<
I40E_GLNVM_SRDATA_RDDATA_SHIFT;
}
dev.regs.glnvm_srctl &= ~I40E_GLNVM_SRCTL_START_MASK;
dev.regs.glnvm_srctl |= I40E_GLNVM_SRCTL_DONE_MASK;
if (is_write) {
write(addr, (dev.regs.glnvm_srdata & I40E_GLNVM_SRDATA_WRDATA_MASK) >>
I40E_GLNVM_SRDATA_WRDATA_SHIFT);
} else {
dev.regs.glnvm_srdata &= ~I40E_GLNVM_SRDATA_RDDATA_MASK;
dev.regs.glnvm_srdata |= ((uint32_t)read(addr))
<< I40E_GLNVM_SRDATA_RDDATA_SHIFT;
}
dev.regs.glnvm_srctl &= ~I40E_GLNVM_SRCTL_START_MASK;
dev.regs.glnvm_srctl |= I40E_GLNVM_SRCTL_DONE_MASK;
}
uint16_t shadow_ram::read(uint16_t addr)
{
switch (addr) {
/* for any of these hopefully return 0 should be fine */
/* they are read by drivers but not used */
case I40E_SR_NVM_DEV_STARTER_VERSION:
case I40E_SR_NVM_EETRACK_LO:
case I40E_SR_NVM_EETRACK_HI:
case I40E_SR_BOOT_CONFIG_PTR:
return 0;
uint16_t shadow_ram::read(uint16_t addr) {
switch (addr) {
/* for any of these hopefully return 0 should be fine */
/* they are read by drivers but not used */
case I40E_SR_NVM_DEV_STARTER_VERSION:
case I40E_SR_NVM_EETRACK_LO:
case I40E_SR_NVM_EETRACK_HI:
case I40E_SR_BOOT_CONFIG_PTR:
return 0;
case I40E_SR_NVM_CONTROL_WORD:
return (1 << I40E_SR_CONTROL_WORD_1_SHIFT);
case I40E_SR_NVM_CONTROL_WORD:
return (1 << I40E_SR_CONTROL_WORD_1_SHIFT);
case I40E_SR_SW_CHECKSUM_WORD:
return 0xbaba;
case I40E_SR_SW_CHECKSUM_WORD:
return 0xbaba;
default:
default:
#ifdef DEBUG_DEV
log << "TODO shadow memory read addr=" << addr
<< logger::endl;
log << "TODO shadow memory read addr=" << addr << logger::endl;
#endif
break;
}
break;
}
return 0;
return 0;
}
void shadow_ram::write(uint16_t addr, uint16_t val)
{
void shadow_ram::write(uint16_t addr, uint16_t val) {
#ifdef DEBUG_DEV
log << "TODO shadow memory write addr=" << addr <<
" val=" << val << logger::endl;
log << "TODO shadow memory write addr=" << addr << " val=" << val
<< logger::endl;
#endif
}
int_ev::int_ev()
{
armed = false;
time = 0;
int_ev::int_ev() {
armed = false;
time = 0;
}
} // namespace i40e
using namespace i40e;
int main(int argc, char *argv[])
{
i40e_bm dev;
runner = new nicbm::Runner(dev);
return runner->runMain(argc, argv);
int main(int argc, char *argv[]) {
i40e_bm dev;
runner = new nicbm::Runner(dev);
return runner->runMain(argc, argv);
}
......@@ -24,10 +24,11 @@
#pragma once
#include <stdint.h>
#include <deque>
#include <sstream>
#include <string>
#include <stdint.h>
extern "C" {
#include <simbricks/proto/pcie.h>
}
......@@ -48,39 +49,38 @@ class i40e_bm;
class lan;
class dma_base : public nicbm::DMAOp {
public:
/** i40e_bm will call this when dma is done */
virtual void done() = 0;
public:
/** i40e_bm will call this when dma is done */
virtual void done() = 0;
};
class int_ev : public nicbm::TimedEvent {
public:
uint16_t vec;
bool armed;
public:
uint16_t vec;
bool armed;
int_ev();
int_ev();
};
class logger : public std::ostream {
public:
static const char endl = '\n';
protected:
std::string label;
std::stringstream ss;
public:
logger(const std::string &label_);
logger &operator<<(char c);
logger &operator<<(int32_t c);
logger &operator<<(uint8_t i);
logger &operator<<(uint16_t i);
logger &operator<<(uint32_t i);
logger &operator<<(uint64_t i);
logger &operator<<(bool c);
logger &operator<<(const char *str);
logger &operator<<(void *str);
public:
static const char endl = '\n';
protected:
std::string label;
std::stringstream ss;
public:
logger(const std::string &label_);
logger &operator<<(char c);
logger &operator<<(int32_t c);
logger &operator<<(uint8_t i);
logger &operator<<(uint16_t i);
logger &operator<<(uint32_t i);
logger &operator<<(uint64_t i);
logger &operator<<(bool c);
logger &operator<<(const char *str);
logger &operator<<(void *str);
};
/**
......@@ -106,515 +106,516 @@ class logger : public std::ostream {
* capacity
*/
class queue_base {
protected:
static const uint32_t MAX_ACTIVE_DESCS = 128;
class desc_ctx {
friend class queue_base;
public:
enum state {
DESC_EMPTY,
DESC_FETCHING,
DESC_PREPARING,
DESC_PREPARED,
DESC_PROCESSING,
DESC_PROCESSED,
DESC_WRITING_BACK,
DESC_WRITTEN_BACK,
};
protected:
queue_base &queue;
public:
enum state state;
uint32_t index;
void *desc;
void *data;
size_t data_len;
size_t data_capacity;
virtual void prepared();
virtual void processed();
protected:
void data_fetch(uint64_t addr, size_t len);
virtual void data_fetched(uint64_t addr, size_t len);
void data_write(uint64_t addr, size_t len, const void *buf);
virtual void data_written(uint64_t addr, size_t len);
public:
desc_ctx(queue_base &queue_);
virtual ~desc_ctx();
virtual void prepare();
virtual void process() = 0;
};
class dma_fetch : public dma_base {
protected:
queue_base &queue;
public:
uint32_t pos;
dma_fetch(queue_base &queue_, size_t len);
virtual ~dma_fetch();
virtual void done();
};
class dma_wb : public dma_base {
protected:
queue_base &queue;
public:
uint32_t pos;
dma_wb(queue_base &queue_, size_t len);
virtual ~dma_wb();
virtual void done();
};
class dma_data_fetch : public dma_base {
protected:
desc_ctx &ctx;
public:
size_t total_len;
size_t part_offset;
dma_data_fetch(desc_ctx &ctx_, size_t len, void *buffer);
virtual ~dma_data_fetch();
virtual void done();
};
class dma_data_wb : public dma_base {
protected:
desc_ctx &ctx;
public:
size_t total_len;
size_t part_offset;
dma_data_wb(desc_ctx &ctx_, size_t len);
virtual ~dma_data_wb();
virtual void done();
};
public:
std::string qname;
logger log;
protected:
desc_ctx *desc_ctxs[MAX_ACTIVE_DESCS];
uint32_t active_first_pos;
uint32_t active_first_idx;
uint32_t active_cnt;
uint64_t base;
uint32_t len;
uint32_t &reg_head;
uint32_t &reg_tail;
bool enabled;
size_t desc_len;
void ctxs_init();
void trigger_fetch();
void trigger_process();
void trigger_writeback();
void trigger();
// returns how many descriptors the queue can fetch max during the next
// fetch: default UINT32_MAX, but can be overriden by child classes
virtual uint32_t max_fetch_capacity();
virtual uint32_t max_writeback_capacity();
virtual uint32_t max_active_capacity();
virtual desc_ctx &desc_ctx_create() = 0;
// dummy function, needs to be overriden if interrupts are required
virtual void interrupt();
// this does the actual write-back. Can be overridden
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
// called by dma op when writeback has completed
void writeback_done(uint32_t first_pos, uint32_t cnt);
public:
queue_base(const std::string &qname_, uint32_t &reg_head_,
uint32_t &reg_tail_);
virtual void reset();
void reg_updated();
bool is_enabled();
protected:
static const uint32_t MAX_ACTIVE_DESCS = 128;
class desc_ctx {
friend class queue_base;
public:
enum state
{
DESC_EMPTY,
DESC_FETCHING,
DESC_PREPARING,
DESC_PREPARED,
DESC_PROCESSING,
DESC_PROCESSED,
DESC_WRITING_BACK,
DESC_WRITTEN_BACK,
};
protected:
queue_base &queue;
public:
enum state state;
uint32_t index;
void *desc;
void *data;
size_t data_len;
size_t data_capacity;
virtual void prepared();
virtual void processed();
protected:
void data_fetch(uint64_t addr, size_t len);
virtual void data_fetched(uint64_t addr, size_t len);
void data_write(uint64_t addr, size_t len, const void *buf);
virtual void data_written(uint64_t addr, size_t len);
public:
desc_ctx(queue_base &queue_);
virtual ~desc_ctx();
virtual void prepare();
virtual void process() = 0;
};
class dma_fetch : public dma_base {
protected:
queue_base &queue;
public:
uint32_t pos;
dma_fetch(queue_base &queue_, size_t len);
virtual ~dma_fetch();
virtual void done();
};
class dma_wb : public dma_base {
protected:
queue_base &queue;
public:
uint32_t pos;
dma_wb(queue_base &queue_, size_t len);
virtual ~dma_wb();
virtual void done();
};
class dma_data_fetch : public dma_base {
protected:
desc_ctx &ctx;
public:
size_t total_len;
size_t part_offset;
dma_data_fetch(desc_ctx &ctx_, size_t len, void *buffer);
virtual ~dma_data_fetch();
virtual void done();
};
class dma_data_wb : public dma_base {
protected:
desc_ctx &ctx;
public:
size_t total_len;
size_t part_offset;
dma_data_wb(desc_ctx &ctx_, size_t len);
virtual ~dma_data_wb();
virtual void done();
};
public:
std::string qname;
logger log;
protected:
desc_ctx *desc_ctxs[MAX_ACTIVE_DESCS];
uint32_t active_first_pos;
uint32_t active_first_idx;
uint32_t active_cnt;
uint64_t base;
uint32_t len;
uint32_t &reg_head;
uint32_t &reg_tail;
bool enabled;
size_t desc_len;
void ctxs_init();
void trigger_fetch();
void trigger_process();
void trigger_writeback();
void trigger();
// returns how many descriptors the queue can fetch max during the next
// fetch: default UINT32_MAX, but can be overriden by child classes
virtual uint32_t max_fetch_capacity();
virtual uint32_t max_writeback_capacity();
virtual uint32_t max_active_capacity();
virtual desc_ctx &desc_ctx_create() = 0;
// dummy function, needs to be overriden if interrupts are required
virtual void interrupt();
// this does the actual write-back. Can be overridden
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
// called by dma op when writeback has completed
void writeback_done(uint32_t first_pos, uint32_t cnt);
public:
queue_base(const std::string &qname_, uint32_t &reg_head_,
uint32_t &reg_tail_);
virtual void reset();
void reg_updated();
bool is_enabled();
};
class queue_admin_tx : public queue_base {
protected:
class admin_desc_ctx : public desc_ctx {
protected:
queue_admin_tx &aq;
i40e_bm &dev;
struct i40e_aq_desc *d;
virtual void data_written(uint64_t addr, size_t len);
// prepare completion descriptor (fills flags, and return value)
void desc_compl_prepare(uint16_t retval, uint16_t extra_flags);
// complete direct response
void desc_complete(uint16_t retval, uint16_t extra_flags = 0);
// complete indirect response
void desc_complete_indir(uint16_t retval, const void *data,
size_t len, uint16_t extra_flags = 0,
bool ignore_datalen = false);
public:
admin_desc_ctx(queue_admin_tx &queue_, i40e_bm &dev);
virtual void prepare();
virtual void process();
};
i40e_bm &dev;
uint64_t &reg_base;
uint32_t &reg_len;
virtual desc_ctx &desc_ctx_create();
public:
queue_admin_tx(i40e_bm &dev_, uint64_t &reg_base_,
uint32_t &reg_len_, uint32_t &reg_head_, uint32_t &reg_tail_);
void reg_updated();
protected:
class admin_desc_ctx : public desc_ctx {
protected:
queue_admin_tx &aq;
i40e_bm &dev;
struct i40e_aq_desc *d;
virtual void data_written(uint64_t addr, size_t len);
// prepare completion descriptor (fills flags, and return value)
void desc_compl_prepare(uint16_t retval, uint16_t extra_flags);
// complete direct response
void desc_complete(uint16_t retval, uint16_t extra_flags = 0);
// complete indirect response
void desc_complete_indir(uint16_t retval, const void *data, size_t len,
uint16_t extra_flags = 0,
bool ignore_datalen = false);
public:
admin_desc_ctx(queue_admin_tx &queue_, i40e_bm &dev);
virtual void prepare();
virtual void process();
};
i40e_bm &dev;
uint64_t &reg_base;
uint32_t &reg_len;
virtual desc_ctx &desc_ctx_create();
public:
queue_admin_tx(i40e_bm &dev_, uint64_t &reg_base_, uint32_t &reg_len_,
uint32_t &reg_head_, uint32_t &reg_tail_);
void reg_updated();
};
// host memory cache
class host_mem_cache {
protected:
static const uint16_t MAX_SEGMENTS = 0x1000;
struct segment {
uint64_t addr;
uint16_t pgcount;
bool valid;
bool direct;
};
i40e_bm &dev;
segment segs[MAX_SEGMENTS];
public:
class mem_op : public dma_base {
public:
bool failed;
};
host_mem_cache(i40e_bm &dev);
void reset();
void reg_updated(uint64_t addr);
// issue a hmc memory operation (address is in the context
void issue_mem_op(mem_op &op);
protected:
static const uint16_t MAX_SEGMENTS = 0x1000;
struct segment {
uint64_t addr;
uint16_t pgcount;
bool valid;
bool direct;
};
i40e_bm &dev;
segment segs[MAX_SEGMENTS];
public:
class mem_op : public dma_base {
public:
bool failed;
};
host_mem_cache(i40e_bm &dev);
void reset();
void reg_updated(uint64_t addr);
// issue a hmc memory operation (address is in the context
void issue_mem_op(mem_op &op);
};
class lan_queue_base : public queue_base {
protected:
class qctx_fetch : public host_mem_cache::mem_op {
public:
lan_queue_base &lq;
qctx_fetch(lan_queue_base &lq_);
virtual void done();
};
lan &lanmgr;
void ctx_fetched();
void ctx_written_back();
virtual void interrupt();
virtual void initialize() = 0;
public:
bool enabling;
size_t idx;
uint32_t &reg_ena;
uint32_t &fpm_basereg;
uint32_t &reg_intqctl;
size_t ctx_size;
void *ctx;
uint32_t reg_dummy_head;
lan_queue_base(lan &lanmgr_, const std::string &qtype,
uint32_t &reg_tail, size_t idx_, uint32_t &reg_ena_,
uint32_t &fpm_basereg, uint32_t &reg_intqctl,
uint16_t ctx_size);
virtual void reset();
void enable();
void disable();
protected:
class qctx_fetch : public host_mem_cache::mem_op {
public:
lan_queue_base &lq;
qctx_fetch(lan_queue_base &lq_);
virtual void done();
};
lan &lanmgr;
void ctx_fetched();
void ctx_written_back();
virtual void interrupt();
virtual void initialize() = 0;
public:
bool enabling;
size_t idx;
uint32_t &reg_ena;
uint32_t &fpm_basereg;
uint32_t &reg_intqctl;
size_t ctx_size;
void *ctx;
uint32_t reg_dummy_head;
lan_queue_base(lan &lanmgr_, const std::string &qtype, uint32_t &reg_tail,
size_t idx_, uint32_t &reg_ena_, uint32_t &fpm_basereg,
uint32_t &reg_intqctl, uint16_t ctx_size);
virtual void reset();
void enable();
void disable();
};
class lan_queue_tx : public lan_queue_base {
protected:
static const uint16_t MTU = 9024;
class tx_desc_ctx : public desc_ctx {
protected:
lan_queue_tx &tq;
public:
i40e_tx_desc *d;
tx_desc_ctx(lan_queue_tx &queue_);
virtual void prepare();
virtual void process();
virtual void processed();
};
class dma_hwb : public dma_base {
protected:
lan_queue_tx &queue;
public:
uint32_t pos;
uint32_t cnt;
uint32_t next_head;
dma_hwb(lan_queue_tx &queue_, uint32_t pos, uint32_t cnt,
uint32_t next_head);
virtual ~dma_hwb();
virtual void done();
};
uint8_t pktbuf[MTU];
uint32_t tso_off;
uint32_t tso_len;
std::deque<tx_desc_ctx *> ready_segments;
bool hwb;
uint64_t hwb_addr;
virtual void initialize();
virtual desc_ctx &desc_ctx_create();
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
bool trigger_tx_packet();
void trigger_tx();
public:
lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail, size_t idx,
uint32_t &reg_ena, uint32_t &fpm_basereg,
uint32_t &reg_intqctl);
virtual void reset();
protected:
static const uint16_t MTU = 9024;
class tx_desc_ctx : public desc_ctx {
protected:
lan_queue_tx &tq;
public:
i40e_tx_desc *d;
tx_desc_ctx(lan_queue_tx &queue_);
virtual void prepare();
virtual void process();
virtual void processed();
};
class dma_hwb : public dma_base {
protected:
lan_queue_tx &queue;
public:
uint32_t pos;
uint32_t cnt;
uint32_t next_head;
dma_hwb(lan_queue_tx &queue_, uint32_t pos, uint32_t cnt,
uint32_t next_head);
virtual ~dma_hwb();
virtual void done();
};
uint8_t pktbuf[MTU];
uint32_t tso_off;
uint32_t tso_len;
std::deque<tx_desc_ctx *> ready_segments;
bool hwb;
uint64_t hwb_addr;
virtual void initialize();
virtual desc_ctx &desc_ctx_create();
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
bool trigger_tx_packet();
void trigger_tx();
public:
lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail, size_t idx, uint32_t &reg_ena,
uint32_t &fpm_basereg, uint32_t &reg_intqctl);
virtual void reset();
};
class lan_queue_rx : public lan_queue_base {
protected:
class rx_desc_ctx : public desc_ctx {
protected:
lan_queue_rx &rq;
virtual void data_written(uint64_t addr, size_t len);
public:
rx_desc_ctx(lan_queue_rx &queue_);
virtual void process();
void packet_received(const void *data, size_t len, bool last);
};
uint16_t dbuff_size;
uint16_t hbuff_size;
uint16_t rxmax;
bool crc_strip;
std::deque<rx_desc_ctx *> dcache;
virtual void initialize();
virtual desc_ctx &desc_ctx_create();
public:
lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail, size_t idx,
uint32_t &reg_ena, uint32_t &fpm_basereg,
uint32_t &reg_intqctl);
virtual void reset();
void packet_received(const void *data, size_t len, uint32_t hash);
protected:
class rx_desc_ctx : public desc_ctx {
protected:
lan_queue_rx &rq;
virtual void data_written(uint64_t addr, size_t len);
public:
rx_desc_ctx(lan_queue_rx &queue_);
virtual void process();
void packet_received(const void *data, size_t len, bool last);
};
uint16_t dbuff_size;
uint16_t hbuff_size;
uint16_t rxmax;
bool crc_strip;
std::deque<rx_desc_ctx *> dcache;
virtual void initialize();
virtual desc_ctx &desc_ctx_create();
public:
lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail, size_t idx, uint32_t &reg_ena,
uint32_t &fpm_basereg, uint32_t &reg_intqctl);
virtual void reset();
void packet_received(const void *data, size_t len, uint32_t hash);
};
class rss_key_cache {
protected:
static const size_t key_len = 52;
// big enough for 2x ipv6 (2x128 + 2x16)
static const size_t cache_len = 288;
bool cache_dirty;
const uint32_t (&key)[key_len / 4];
uint32_t cache[cache_len];
void build();
public:
rss_key_cache(const uint32_t (&key_)[key_len / 4]);
void set_dirty();
uint32_t hash_ipv4(uint32_t sip, uint32_t dip, uint16_t sp,
uint16_t dp);
protected:
static const size_t key_len = 52;
// big enough for 2x ipv6 (2x128 + 2x16)
static const size_t cache_len = 288;
bool cache_dirty;
const uint32_t (&key)[key_len / 4];
uint32_t cache[cache_len];
void build();
public:
rss_key_cache(const uint32_t (&key_)[key_len / 4]);
void set_dirty();
uint32_t hash_ipv4(uint32_t sip, uint32_t dip, uint16_t sp, uint16_t dp);
};
// rx tx management
class lan {
protected:
friend class lan_queue_base;
friend class lan_queue_tx;
friend class lan_queue_rx;
i40e_bm &dev;
logger log;
rss_key_cache rss_kc;
const size_t num_qs;
lan_queue_rx **rxqs;
lan_queue_tx **txqs;
bool rss_steering(const void *data, size_t len, uint16_t &queue,
uint32_t &hash);
public:
lan(i40e_bm &dev, size_t num_qs);
void reset();
void qena_updated(uint16_t idx, bool rx);
void tail_updated(uint16_t idx, bool rx);
void rss_key_updated();
void packet_received(const void *data, size_t len);
protected:
friend class lan_queue_base;
friend class lan_queue_tx;
friend class lan_queue_rx;
i40e_bm &dev;
logger log;
rss_key_cache rss_kc;
const size_t num_qs;
lan_queue_rx **rxqs;
lan_queue_tx **txqs;
bool rss_steering(const void *data, size_t len, uint16_t &queue,
uint32_t &hash);
public:
lan(i40e_bm &dev, size_t num_qs);
void reset();
void qena_updated(uint16_t idx, bool rx);
void tail_updated(uint16_t idx, bool rx);
void rss_key_updated();
void packet_received(const void *data, size_t len);
};
class shadow_ram {
protected:
i40e_bm &dev;
logger log;
public:
shadow_ram(i40e_bm &dev);
void reg_updated();
uint16_t read(uint16_t addr);
void write(uint16_t addr, uint16_t val);
protected:
i40e_bm &dev;
logger log;
public:
shadow_ram(i40e_bm &dev);
void reg_updated();
uint16_t read(uint16_t addr);
void write(uint16_t addr, uint16_t val);
};
class i40e_bm : public nicbm::Runner::Device {
protected:
friend class queue_admin_tx;
friend class host_mem_cache;
friend class lan;
friend class lan_queue_base;
friend class lan_queue_rx;
friend class lan_queue_tx;
friend class shadow_ram;
static const unsigned BAR_REGS = 0;
static const unsigned BAR_IO = 2;
static const unsigned BAR_MSIX = 3;
static const uint32_t NUM_QUEUES = 1536;
static const uint32_t NUM_PFINTS = 128;
static const uint32_t NUM_VSIS = 384;
static const uint16_t MAX_MTU = 2048;
static const uint8_t NUM_ITR = 3;
struct i40e_regs {
uint32_t glgen_rstctl;
uint32_t glgen_stat;
uint32_t gllan_rctl_0;
uint32_t pfint_lnklst0;
uint32_t pfint_icr0_ena;
uint32_t pfint_icr0;
uint32_t pfint_itr0[NUM_ITR];
uint32_t pfint_itrn[NUM_ITR][NUM_PFINTS];
uint32_t pfint_stat_ctl0;
uint32_t pfint_dyn_ctl0;
uint32_t pfint_dyn_ctln[NUM_PFINTS - 1];
uint32_t pfint_lnklstn[NUM_PFINTS - 1];
uint32_t pfint_raten[NUM_PFINTS - 1];
uint32_t gllan_txpre_qdis[12];
uint32_t glnvm_srctl;
uint32_t glnvm_srdata;
uint32_t qint_tqctl[NUM_QUEUES];
uint32_t qtx_ena[NUM_QUEUES];
uint32_t qtx_tail[NUM_QUEUES];
uint32_t qtx_ctl[NUM_QUEUES];
uint32_t qint_rqctl[NUM_QUEUES];
uint32_t qrx_ena[NUM_QUEUES];
uint32_t qrx_tail[NUM_QUEUES];
uint32_t glhmc_lantxbase[16];
uint32_t glhmc_lantxcnt[16];
uint32_t glhmc_lanrxbase[16];
uint32_t glhmc_lanrxcnt[16];
uint32_t pfhmc_sdcmd;
uint32_t pfhmc_sddatalow;
uint32_t pfhmc_sddatahigh;
uint32_t pfhmc_pdinv;
uint32_t pfhmc_errorinfo;
uint32_t pfhmc_errordata;
uint64_t pf_atqba;
uint32_t pf_atqlen;
uint32_t pf_atqh;
uint32_t pf_atqt;
uint64_t pf_arqba;
uint32_t pf_arqlen;
uint32_t pf_arqh;
uint32_t pf_arqt;
uint32_t pfqf_ctl_0;
uint32_t pfqf_hkey[13];
uint32_t pfqf_hlut[128];
uint32_t prtdcb_fccfg;
uint32_t prtdcb_mflcn;
uint32_t prt_l2tagsen;
uint32_t prtqf_ctl_0;
uint32_t glrpb_ghw;
uint32_t glrpb_glw;
uint32_t glrpb_phw;
uint32_t glrpb_plw;
};
public:
i40e_bm();
~i40e_bm();
virtual void setup_intro(struct cosim_pcie_proto_dev_intro &di);
virtual void reg_read(uint8_t bar, uint64_t addr, void *dest, size_t len);
virtual uint32_t reg_read32(uint8_t bar, uint64_t addr);
virtual void reg_write(uint8_t bar, uint64_t addr, const void *src,
size_t len);
virtual void reg_write32(uint8_t bar, uint64_t addr, uint32_t val);
virtual void dma_complete(nicbm::DMAOp &op);
virtual void eth_rx(uint8_t port, const void *data, size_t len);
virtual void timed_event(nicbm::TimedEvent &ev);
void signal_interrupt(uint16_t vector, uint8_t itr);
protected:
logger log;
i40e_regs regs;
queue_admin_tx pf_atq;
host_mem_cache hmc;
shadow_ram shram;
lan lanmgr;
int_ev intevs[NUM_PFINTS];
/** Read from the I/O bar */
virtual uint32_t reg_io_read(uint64_t addr);
/** Write to the I/O bar */
virtual void reg_io_write(uint64_t addr, uint32_t val);
/** 32-bit read from the memory bar (should be the default) */
virtual uint32_t reg_mem_read32(uint64_t addr);
/** 32-bit write to the memory bar (should be the default) */
virtual void reg_mem_write32(uint64_t addr, uint32_t val);
void reset(bool indicate_done);
protected:
friend class queue_admin_tx;
friend class host_mem_cache;
friend class lan;
friend class lan_queue_base;
friend class lan_queue_rx;
friend class lan_queue_tx;
friend class shadow_ram;
static const unsigned BAR_REGS = 0;
static const unsigned BAR_IO = 2;
static const unsigned BAR_MSIX = 3;
static const uint32_t NUM_QUEUES = 1536;
static const uint32_t NUM_PFINTS = 128;
static const uint32_t NUM_VSIS = 384;
static const uint16_t MAX_MTU = 2048;
static const uint8_t NUM_ITR = 3;
struct i40e_regs {
uint32_t glgen_rstctl;
uint32_t glgen_stat;
uint32_t gllan_rctl_0;
uint32_t pfint_lnklst0;
uint32_t pfint_icr0_ena;
uint32_t pfint_icr0;
uint32_t pfint_itr0[NUM_ITR];
uint32_t pfint_itrn[NUM_ITR][NUM_PFINTS];
uint32_t pfint_stat_ctl0;
uint32_t pfint_dyn_ctl0;
uint32_t pfint_dyn_ctln[NUM_PFINTS - 1];
uint32_t pfint_lnklstn[NUM_PFINTS - 1];
uint32_t pfint_raten[NUM_PFINTS - 1];
uint32_t gllan_txpre_qdis[12];
uint32_t glnvm_srctl;
uint32_t glnvm_srdata;
uint32_t qint_tqctl[NUM_QUEUES];
uint32_t qtx_ena[NUM_QUEUES];
uint32_t qtx_tail[NUM_QUEUES];
uint32_t qtx_ctl[NUM_QUEUES];
uint32_t qint_rqctl[NUM_QUEUES];
uint32_t qrx_ena[NUM_QUEUES];
uint32_t qrx_tail[NUM_QUEUES];
uint32_t glhmc_lantxbase[16];
uint32_t glhmc_lantxcnt[16];
uint32_t glhmc_lanrxbase[16];
uint32_t glhmc_lanrxcnt[16];
uint32_t pfhmc_sdcmd;
uint32_t pfhmc_sddatalow;
uint32_t pfhmc_sddatahigh;
uint32_t pfhmc_pdinv;
uint32_t pfhmc_errorinfo;
uint32_t pfhmc_errordata;
uint64_t pf_atqba;
uint32_t pf_atqlen;
uint32_t pf_atqh;
uint32_t pf_atqt;
uint64_t pf_arqba;
uint32_t pf_arqlen;
uint32_t pf_arqh;
uint32_t pf_arqt;
uint32_t pfqf_ctl_0;
uint32_t pfqf_hkey[13];
uint32_t pfqf_hlut[128];
uint32_t prtdcb_fccfg;
uint32_t prtdcb_mflcn;
uint32_t prt_l2tagsen;
uint32_t prtqf_ctl_0;
uint32_t glrpb_ghw;
uint32_t glrpb_glw;
uint32_t glrpb_phw;
uint32_t glrpb_plw;
};
public:
i40e_bm();
~i40e_bm();
virtual void setup_intro(struct cosim_pcie_proto_dev_intro &di);
virtual void reg_read(uint8_t bar, uint64_t addr, void *dest, size_t len);
virtual uint32_t reg_read32(uint8_t bar, uint64_t addr);
virtual void reg_write(uint8_t bar, uint64_t addr, const void *src,
size_t len);
virtual void reg_write32(uint8_t bar, uint64_t addr, uint32_t val);
virtual void dma_complete(nicbm::DMAOp &op);
virtual void eth_rx(uint8_t port, const void *data, size_t len);
virtual void timed_event(nicbm::TimedEvent &ev);
void signal_interrupt(uint16_t vector, uint8_t itr);
protected:
logger log;
i40e_regs regs;
queue_admin_tx pf_atq;
host_mem_cache hmc;
shadow_ram shram;
lan lanmgr;
int_ev intevs[NUM_PFINTS];
/** Read from the I/O bar */
virtual uint32_t reg_io_read(uint64_t addr);
/** Write to the I/O bar */
virtual void reg_io_write(uint64_t addr, uint32_t val);
/** 32-bit read from the memory bar (should be the default) */
virtual uint32_t reg_mem_read32(uint64_t addr);
/** 32-bit write to the memory bar (should be the default) */
virtual void reg_mem_write32(uint64_t addr, uint32_t val);
void reset(bool indicate_done);
};
// places the tcp checksum in the packet (assuming ipv4)
......@@ -622,10 +623,9 @@ void xsum_tcp(void *tcphdr, size_t l4len);
// calculates the full ipv4 & tcp checksum without assuming any pseudo header
// xsums
void xsum_tcpip_tso(void *iphdr, uint8_t iplen, uint8_t l4len,
uint16_t paylen);
void xsum_tcpip_tso(void *iphdr, uint8_t iplen, uint8_t l4len, uint16_t paylen);
void tso_postupdate_header(void *iphdr, uint8_t iplen, uint8_t l4len,
uint16_t paylen);
uint16_t paylen);
} // namespace i40e
......@@ -24,122 +24,118 @@
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include "sims/nic/i40e_bm/i40e_bm.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
#include "sims/nic/i40e_bm/i40e_bm.h"
using namespace i40e;
extern nicbm::Runner *runner;
host_mem_cache::host_mem_cache(i40e_bm &dev_)
: dev(dev_)
{
reset();
host_mem_cache::host_mem_cache(i40e_bm &dev_) : dev(dev_) {
reset();
}
void host_mem_cache::reset()
{
for (size_t i = 0; i < MAX_SEGMENTS; i++) {
segs[i].addr = 0;
segs[i].pgcount = 0;
segs[i].valid = false;
segs[i].direct = false;
}
void host_mem_cache::reset() {
for (size_t i = 0; i < MAX_SEGMENTS; i++) {
segs[i].addr = 0;
segs[i].pgcount = 0;
segs[i].valid = false;
segs[i].direct = false;
}
}
void host_mem_cache::reg_updated(uint64_t addr)
{
if (addr == I40E_PFHMC_SDCMD) {
// read/write command for descriptor
uint32_t cmd = dev.regs.pfhmc_sdcmd;
uint16_t idx = (cmd & I40E_PFHMC_SDCMD_PMSDIDX_MASK) >>
I40E_PFHMC_SDCMD_PMSDIDX_SHIFT;
uint32_t lo = dev.regs.pfhmc_sddatalow;
uint32_t hi = dev.regs.pfhmc_sddatahigh;
if ((cmd & I40E_PFHMC_SDCMD_PMSDWR_MASK)) {
// write
void host_mem_cache::reg_updated(uint64_t addr) {
if (addr == I40E_PFHMC_SDCMD) {
// read/write command for descriptor
uint32_t cmd = dev.regs.pfhmc_sdcmd;
uint16_t idx =
(cmd & I40E_PFHMC_SDCMD_PMSDIDX_MASK) >> I40E_PFHMC_SDCMD_PMSDIDX_SHIFT;
uint32_t lo = dev.regs.pfhmc_sddatalow;
uint32_t hi = dev.regs.pfhmc_sddatahigh;
if ((cmd & I40E_PFHMC_SDCMD_PMSDWR_MASK)) {
// write
#ifdef DEBUG_HMC
std::cerr << "hmc: writing descriptor " << idx << std::endl;
std::cerr << "hmc: writing descriptor " << idx << std::endl;
#endif
segs[idx].addr = ((lo & I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK) >>
I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) << 12;
segs[idx].addr |= ((uint64_t) hi) << 32;
segs[idx].pgcount = (lo & I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK) >>
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT;
segs[idx].valid = !!(lo & I40E_PFHMC_SDDATALOW_PMSDVALID_MASK);
segs[idx].direct = !!(lo & I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK);
segs[idx].addr = ((lo & I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK) >>
I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
<< 12;
segs[idx].addr |= ((uint64_t)hi) << 32;
segs[idx].pgcount = (lo & I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK) >>
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT;
segs[idx].valid = !!(lo & I40E_PFHMC_SDDATALOW_PMSDVALID_MASK);
segs[idx].direct = !!(lo & I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK);
#ifdef DEBUG_HMC
std::cerr << " addr=" << segs[idx].addr << " pgcount=" <<
segs[idx].pgcount << " valid=" << segs[idx].valid <<
" direct=" << segs[idx].direct << std::endl;
std::cerr << " addr=" << segs[idx].addr
<< " pgcount=" << segs[idx].pgcount
<< " valid=" << segs[idx].valid
<< " direct=" << segs[idx].direct << std::endl;
#endif
} else {
// read
} else {
// read
#ifdef DEBUG_HMC
std::cerr << "hmc: reading descriptor " << idx << std::endl;
std::cerr << "hmc: reading descriptor " << idx << std::endl;
#endif
dev.regs.pfhmc_sddatalow = ((segs[idx].addr >> 12) <<
I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) &
I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK;
dev.regs.pfhmc_sddatalow |= (segs[idx].pgcount <<
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) &
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK;
if (segs[idx].valid)
dev.regs.pfhmc_sddatalow |= I40E_PFHMC_SDDATALOW_PMSDVALID_MASK;
if (segs[idx].direct)
dev.regs.pfhmc_sddatalow |= I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK;
dev.regs.pfhmc_sddatahigh = segs[idx].addr >> 32;
}
dev.regs.pfhmc_sddatalow =
((segs[idx].addr >> 12) << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) &
I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK;
dev.regs.pfhmc_sddatalow |=
(segs[idx].pgcount << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) &
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK;
if (segs[idx].valid)
dev.regs.pfhmc_sddatalow |= I40E_PFHMC_SDDATALOW_PMSDVALID_MASK;
if (segs[idx].direct)
dev.regs.pfhmc_sddatalow |= I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK;
dev.regs.pfhmc_sddatahigh = segs[idx].addr >> 32;
}
}
}
void host_mem_cache::issue_mem_op(mem_op &op)
{
uint64_t addr = op.dma_addr;
uint16_t seg_idx = addr >> 21;
uint16_t seg_idx_last = (addr + op.len - 1) >> 21;
uint32_t dir_off = addr & ((1 << 21) - 1);
struct segment *seg = &segs[seg_idx];
if (seg_idx >= MAX_SEGMENTS) {
std::cerr << "hmc issue_mem_op: seg index too high " << seg_idx <<
std::endl;
abort();
}
if (!seg->valid) {
// TODO(antoinek): errorinfo and data registers
std::cerr << "hmc issue_mem_op: segment invalid addr=" << addr <<
std::endl;
op.failed = true;
return;
}
if (seg_idx != seg_idx_last) {
std::cerr << "hmc issue_mem_op: operation crosses segs addr=" <<
addr << " len=" << op.len << std::endl;
abort();
}
if (!seg->direct) {
std::cerr << "hmc issue_mem_op: TODO paged ops addr=" << addr <<
std::endl;
abort();
}
op.failed = false;
op.dma_addr = seg->addr + dir_off;
void host_mem_cache::issue_mem_op(mem_op &op) {
uint64_t addr = op.dma_addr;
uint16_t seg_idx = addr >> 21;
uint16_t seg_idx_last = (addr + op.len - 1) >> 21;
uint32_t dir_off = addr & ((1 << 21) - 1);
struct segment *seg = &segs[seg_idx];
if (seg_idx >= MAX_SEGMENTS) {
std::cerr << "hmc issue_mem_op: seg index too high " << seg_idx
<< std::endl;
abort();
}
if (!seg->valid) {
// TODO(antoinek): errorinfo and data registers
std::cerr << "hmc issue_mem_op: segment invalid addr=" << addr << std::endl;
op.failed = true;
return;
}
if (seg_idx != seg_idx_last) {
std::cerr << "hmc issue_mem_op: operation crosses segs addr=" << addr
<< " len=" << op.len << std::endl;
abort();
}
if (!seg->direct) {
std::cerr << "hmc issue_mem_op: TODO paged ops addr=" << addr << std::endl;
abort();
}
op.failed = false;
op.dma_addr = seg->addr + dir_off;
#ifdef DEBUG_HMC
std::cerr << "hmc issue_mem_op: hmc_addr=" << addr << " dma_addr=" <<
op.dma_addr << " len=" << op.len << std::endl;
std::cerr << "hmc issue_mem_op: hmc_addr=" << addr
<< " dma_addr=" << op.dma_addr << " len=" << op.len << std::endl;
#endif
runner->issue_dma(op);
runner->issue_dma(op);
}
......@@ -22,726 +22,690 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <arpa/inet.h>
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>
#include <arpa/inet.h>
#include "sims/nic/i40e_bm/i40e_bm.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
#include "sims/nic/i40e_bm/headers.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
#include "sims/nic/i40e_bm/i40e_bm.h"
using namespace i40e;
extern nicbm::Runner *runner;
lan::lan(i40e_bm &dev_, size_t num_qs_)
: dev(dev_), log("lan"), rss_kc(dev_.regs.pfqf_hkey), num_qs(num_qs_)
{
rxqs = new lan_queue_rx *[num_qs];
txqs = new lan_queue_tx *[num_qs];
for (size_t i = 0; i < num_qs; i++) {
rxqs[i] = new lan_queue_rx(*this, dev.regs.qrx_tail[i], i,
dev.regs.qrx_ena[i], dev.regs.glhmc_lanrxbase[0],
dev.regs.qint_rqctl[i]);
txqs[i] = new lan_queue_tx(*this, dev.regs.qtx_tail[i], i,
dev.regs.qtx_ena[i], dev.regs.glhmc_lantxbase[0],
dev.regs.qint_tqctl[i]);
}
}
void lan::reset()
{
rss_kc.set_dirty();
for (size_t i = 0; i < num_qs; i++) {
rxqs[i]->reset();
txqs[i]->reset();
}
}
void lan::qena_updated(uint16_t idx, bool rx)
{
uint32_t &reg = (rx ? dev.regs.qrx_ena[idx] : dev.regs.qtx_ena[idx]);
: dev(dev_), log("lan"), rss_kc(dev_.regs.pfqf_hkey), num_qs(num_qs_) {
rxqs = new lan_queue_rx *[num_qs];
txqs = new lan_queue_tx *[num_qs];
for (size_t i = 0; i < num_qs; i++) {
rxqs[i] =
new lan_queue_rx(*this, dev.regs.qrx_tail[i], i, dev.regs.qrx_ena[i],
dev.regs.glhmc_lanrxbase[0], dev.regs.qint_rqctl[i]);
txqs[i] =
new lan_queue_tx(*this, dev.regs.qtx_tail[i], i, dev.regs.qtx_ena[i],
dev.regs.glhmc_lantxbase[0], dev.regs.qint_tqctl[i]);
}
}
void lan::reset() {
rss_kc.set_dirty();
for (size_t i = 0; i < num_qs; i++) {
rxqs[i]->reset();
txqs[i]->reset();
}
}
void lan::qena_updated(uint16_t idx, bool rx) {
uint32_t &reg = (rx ? dev.regs.qrx_ena[idx] : dev.regs.qtx_ena[idx]);
#ifdef DEBUG_LAN
log << " qena updated idx=" << idx << " rx=" << rx << " reg=" << reg <<
logger::endl;
log << " qena updated idx=" << idx << " rx=" << rx << " reg=" << reg
<< logger::endl;
#endif
lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx]) :
static_cast<lan_queue_base &>(*txqs[idx]));
lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx])
: static_cast<lan_queue_base &>(*txqs[idx]));
if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) && !q.is_enabled()) {
q.enable();
} else if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) && q.is_enabled()) {
q.disable();
}
if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) && !q.is_enabled()) {
q.enable();
} else if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) && q.is_enabled()) {
q.disable();
}
}
void lan::tail_updated(uint16_t idx, bool rx)
{
void lan::tail_updated(uint16_t idx, bool rx) {
#ifdef DEBUG_LAN
log << " tail updated idx=" << idx << " rx=" << rx << logger::endl;
log << " tail updated idx=" << idx << " rx=" << rx << logger::endl;
#endif
lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx]) :
static_cast<lan_queue_base &>(*txqs[idx]));
lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx])
: static_cast<lan_queue_base &>(*txqs[idx]));
if (q.is_enabled())
q.reg_updated();
if (q.is_enabled())
q.reg_updated();
}
void lan::rss_key_updated()
{
rss_kc.set_dirty();
void lan::rss_key_updated() {
rss_kc.set_dirty();
}
bool lan::rss_steering(const void *data, size_t len, uint16_t &queue,
uint32_t &hash)
{
hash = 0;
const headers::pkt_tcp *tcp =
reinterpret_cast<const headers::pkt_tcp *> (data);
const headers::pkt_udp *udp =
reinterpret_cast<const headers::pkt_udp *> (data);
// should actually determine packet type and mask with enabled packet types
// TODO(antoinek): ipv6
if (tcp->eth.type == htons(ETH_TYPE_IP) &&
tcp->ip.proto == IP_PROTO_TCP) {
hash = rss_kc.hash_ipv4(ntohl(tcp->ip.src), ntohl(tcp->ip.dest),
ntohs(tcp->tcp.src), ntohs(tcp->tcp.dest));
} else if (udp->eth.type == htons(ETH_TYPE_IP) &&
udp->ip.proto == IP_PROTO_UDP) {
hash = rss_kc.hash_ipv4(ntohl(udp->ip.src), ntohl(udp->ip.dest),
ntohs(udp->udp.src), ntohs(udp->udp.dest));
} else if (udp->eth.type == htons(ETH_TYPE_IP)) {
hash = rss_kc.hash_ipv4(ntohl(udp->ip.src), ntohl(udp->ip.dest), 0, 0);
} else {
return false;
}
uint16_t luts = (!(dev.regs.pfqf_ctl_0 & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK) ?
128 : 512);
uint16_t idx = hash % luts;
queue = (dev.regs.pfqf_hlut[idx / 4] >> (8 * (idx % 4))) & 0x3f;
uint32_t &hash) {
hash = 0;
const headers::pkt_tcp *tcp =
reinterpret_cast<const headers::pkt_tcp *>(data);
const headers::pkt_udp *udp =
reinterpret_cast<const headers::pkt_udp *>(data);
// should actually determine packet type and mask with enabled packet types
// TODO(antoinek): ipv6
if (tcp->eth.type == htons(ETH_TYPE_IP) && tcp->ip.proto == IP_PROTO_TCP) {
hash = rss_kc.hash_ipv4(ntohl(tcp->ip.src), ntohl(tcp->ip.dest),
ntohs(tcp->tcp.src), ntohs(tcp->tcp.dest));
} else if (udp->eth.type == htons(ETH_TYPE_IP) &&
udp->ip.proto == IP_PROTO_UDP) {
hash = rss_kc.hash_ipv4(ntohl(udp->ip.src), ntohl(udp->ip.dest),
ntohs(udp->udp.src), ntohs(udp->udp.dest));
} else if (udp->eth.type == htons(ETH_TYPE_IP)) {
hash = rss_kc.hash_ipv4(ntohl(udp->ip.src), ntohl(udp->ip.dest), 0, 0);
} else {
return false;
}
uint16_t luts =
(!(dev.regs.pfqf_ctl_0 & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK) ? 128 : 512);
uint16_t idx = hash % luts;
queue = (dev.regs.pfqf_hlut[idx / 4] >> (8 * (idx % 4))) & 0x3f;
#ifdef DEBUG_LAN
log << " q=" << queue << " h=" << hash << " i=" << idx << logger::endl;
log << " q=" << queue << " h=" << hash << " i=" << idx << logger::endl;
#endif
return true;
return true;
}
void lan::packet_received(const void *data, size_t len)
{
void lan::packet_received(const void *data, size_t len) {
#ifdef DEBUG_LAN
log << " packet received len=" << len << logger::endl;
log << " packet received len=" << len << logger::endl;
#endif
uint32_t hash = 0;
uint16_t queue = 0;
rss_steering(data, len, queue, hash);
rxqs[queue]->packet_received(data, len, hash);
uint32_t hash = 0;
uint16_t queue = 0;
rss_steering(data, len, queue, hash);
rxqs[queue]->packet_received(data, len, hash);
}
lan_queue_base::lan_queue_base(lan &lanmgr_, const std::string &qtype,
uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &fpm_basereg_, uint32_t &reg_intqctl_,
uint16_t ctx_size_)
uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &fpm_basereg_,
uint32_t &reg_intqctl_, uint16_t ctx_size_)
: queue_base(qtype + std::to_string(idx_), reg_dummy_head, reg_tail_),
lanmgr(lanmgr_), enabling(false),
idx(idx_), reg_ena(reg_ena_), fpm_basereg(fpm_basereg_),
reg_intqctl(reg_intqctl_), ctx_size(ctx_size_)
{
ctx = new uint8_t[ctx_size_];
lanmgr(lanmgr_),
enabling(false),
idx(idx_),
reg_ena(reg_ena_),
fpm_basereg(fpm_basereg_),
reg_intqctl(reg_intqctl_),
ctx_size(ctx_size_) {
ctx = new uint8_t[ctx_size_];
}
void lan_queue_base::reset()
{
enabling = false;
queue_base::reset();
void lan_queue_base::reset() {
enabling = false;
queue_base::reset();
}
void lan_queue_base::enable()
{
if (enabling || enabled)
return;
void lan_queue_base::enable() {
if (enabling || enabled)
return;
#ifdef DEBUG_LAN
log << " lan enabling queue " << idx << logger::endl;
log << " lan enabling queue " << idx << logger::endl;
#endif
enabling = true;
enabling = true;
qctx_fetch *qf = new qctx_fetch(*this);
qf->write = false;
qf->dma_addr = ((fpm_basereg & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) >>
I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) * 512;
qf->dma_addr += ctx_size * idx;
qf->len = ctx_size;
qf->data = ctx;
qctx_fetch *qf = new qctx_fetch(*this);
qf->write = false;
qf->dma_addr = ((fpm_basereg & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) >>
I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) *
512;
qf->dma_addr += ctx_size * idx;
qf->len = ctx_size;
qf->data = ctx;
lanmgr.dev.hmc.issue_mem_op(*qf);
lanmgr.dev.hmc.issue_mem_op(*qf);
}
void lan_queue_base::ctx_fetched()
{
void lan_queue_base::ctx_fetched() {
#ifdef DEBUG_LAN
log << " lan ctx fetched " << idx << logger::endl;
log << " lan ctx fetched " << idx << logger::endl;
#endif
initialize();
initialize();
enabling = false;
enabled = true;
reg_ena |= I40E_QRX_ENA_QENA_STAT_MASK;
enabling = false;
enabled = true;
reg_ena |= I40E_QRX_ENA_QENA_STAT_MASK;
reg_updated();
reg_updated();
}
void lan_queue_base::disable()
{
void lan_queue_base::disable() {
#ifdef DEBUG_LAN
log << " lan disabling queue " << idx << logger::endl;
log << " lan disabling queue " << idx << logger::endl;
#endif
enabled = false;
// TODO(antoinek): write back
reg_ena &= ~I40E_QRX_ENA_QENA_STAT_MASK;
enabled = false;
// TODO(antoinek): write back
reg_ena &= ~I40E_QRX_ENA_QENA_STAT_MASK;
}
void lan_queue_base::interrupt()
{
uint32_t qctl = reg_intqctl;
uint32_t gctl = lanmgr.dev.regs.pfint_dyn_ctl0;
void lan_queue_base::interrupt() {
uint32_t qctl = reg_intqctl;
uint32_t gctl = lanmgr.dev.regs.pfint_dyn_ctl0;
#ifdef DEBUG_LAN
log << " interrupt qctl=" << qctl << " gctl=" << gctl << logger::endl;
log << " interrupt qctl=" << qctl << " gctl=" << gctl << logger::endl;
#endif
uint16_t msix_idx = (qctl & I40E_QINT_TQCTL_MSIX_INDX_MASK) >>
I40E_QINT_TQCTL_MSIX_INDX_SHIFT;
uint8_t msix0_idx = (qctl & I40E_QINT_TQCTL_MSIX0_INDX_MASK) >>
I40E_QINT_TQCTL_MSIX0_INDX_SHIFT;
uint16_t msix_idx = (qctl & I40E_QINT_TQCTL_MSIX_INDX_MASK) >>
I40E_QINT_TQCTL_MSIX_INDX_SHIFT;
uint8_t msix0_idx = (qctl & I40E_QINT_TQCTL_MSIX0_INDX_MASK) >>
I40E_QINT_TQCTL_MSIX0_INDX_SHIFT;
bool cause_ena = !!(qctl & I40E_QINT_TQCTL_CAUSE_ENA_MASK) &&
!!(gctl & I40E_PFINT_DYN_CTL0_INTENA_MASK);
if (!cause_ena) {
bool cause_ena = !!(qctl & I40E_QINT_TQCTL_CAUSE_ENA_MASK) &&
!!(gctl & I40E_PFINT_DYN_CTL0_INTENA_MASK);
if (!cause_ena) {
#ifdef DEBUG_LAN
log << " interrupt cause disabled" << logger::endl;
log << " interrupt cause disabled" << logger::endl;
#endif
return;
}
return;
}
if (msix_idx == 0) {
if (msix_idx == 0) {
#ifdef DEBUG_LAN
log << " setting int0.qidx=" << msix0_idx << logger::endl;
log << " setting int0.qidx=" << msix0_idx << logger::endl;
#endif
lanmgr.dev.regs.pfint_icr0 |= I40E_PFINT_ICR0_INTEVENT_MASK |
(1 << (I40E_PFINT_ICR0_QUEUE_0_SHIFT + msix0_idx));
}
lanmgr.dev.regs.pfint_icr0 |=
I40E_PFINT_ICR0_INTEVENT_MASK |
(1 << (I40E_PFINT_ICR0_QUEUE_0_SHIFT + msix0_idx));
}
uint8_t itr = (qctl & I40E_QINT_TQCTL_ITR_INDX_MASK) >>
I40E_QINT_TQCTL_ITR_INDX_SHIFT;
lanmgr.dev.signal_interrupt(msix_idx, itr);
uint8_t itr =
(qctl & I40E_QINT_TQCTL_ITR_INDX_MASK) >> I40E_QINT_TQCTL_ITR_INDX_SHIFT;
lanmgr.dev.signal_interrupt(msix_idx, itr);
}
lan_queue_base::qctx_fetch::qctx_fetch(lan_queue_base &lq_)
: lq(lq_)
{
lan_queue_base::qctx_fetch::qctx_fetch(lan_queue_base &lq_) : lq(lq_) {
}
void lan_queue_base::qctx_fetch::done()
{
lq.ctx_fetched();
delete this;
void lan_queue_base::qctx_fetch::done() {
lq.ctx_fetched();
delete this;
}
lan_queue_rx::lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl_)
uint32_t &reg_ena_, uint32_t &reg_fpmbase_,
uint32_t &reg_intqctl_)
: lan_queue_base(lanmgr_, "rxq", reg_tail_, idx_, reg_ena_, reg_fpmbase_,
reg_intqctl_, 32)
{
// use larger value for initialization
desc_len = 32;
ctxs_init();
reg_intqctl_, 32) {
// use larger value for initialization
desc_len = 32;
ctxs_init();
}
void lan_queue_rx::reset()
{
dcache.clear();
queue_base::reset();
void lan_queue_rx::reset() {
dcache.clear();
queue_base::reset();
}
void lan_queue_rx::initialize()
{
void lan_queue_rx::initialize() {
#ifdef DEBUG_LAN
log << " initialize()" << logger::endl;
log << " initialize()" << logger::endl;
#endif
uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);
uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
uint16_t *qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 11);
uint16_t *dbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 12);
uint16_t *hbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 13);
uint32_t *rxmax_p = reinterpret_cast<uint32_t *>(ctx_p + 21);
reg_dummy_head = (*head_p) & ((1 << 13) - 1);
base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
len = (*qlen_p >> 1) & ((1 << 13) - 1);
dbuff_size = (((*dbsz_p) >> 6) & ((1 << 7) - 1)) * 128;
hbuff_size = (((*hbsz_p) >> 5) & ((1 << 5) - 1)) * 64;
uint8_t dtype = ((*hbsz_p) >> 10) & ((1 << 2) - 1);
bool longdesc = !!(((*hbsz_p) >> 12) & 0x1);
desc_len = (longdesc ? 32 : 16);
crc_strip = !!(((*hbsz_p) >> 13) & 0x1);
rxmax = (((*rxmax_p) >> 6) & ((1 << 14) - 1)) * 128;
if (!longdesc) {
log << "lan_queue_rx::initialize: currently only 32B descs "
" supported" << logger::endl;
abort();
}
if (dtype != 0) {
log << "lan_queue_rx::initialize: no header split supported"
<< logger::endl;
abort();
}
uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);
uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
uint16_t *qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 11);
uint16_t *dbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 12);
uint16_t *hbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 13);
uint32_t *rxmax_p = reinterpret_cast<uint32_t *>(ctx_p + 21);
reg_dummy_head = (*head_p) & ((1 << 13) - 1);
base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
len = (*qlen_p >> 1) & ((1 << 13) - 1);
dbuff_size = (((*dbsz_p) >> 6) & ((1 << 7) - 1)) * 128;
hbuff_size = (((*hbsz_p) >> 5) & ((1 << 5) - 1)) * 64;
uint8_t dtype = ((*hbsz_p) >> 10) & ((1 << 2) - 1);
bool longdesc = !!(((*hbsz_p) >> 12) & 0x1);
desc_len = (longdesc ? 32 : 16);
crc_strip = !!(((*hbsz_p) >> 13) & 0x1);
rxmax = (((*rxmax_p) >> 6) & ((1 << 14) - 1)) * 128;
if (!longdesc) {
log << "lan_queue_rx::initialize: currently only 32B descs "
" supported"
<< logger::endl;
abort();
}
if (dtype != 0) {
log << "lan_queue_rx::initialize: no header split supported"
<< logger::endl;
abort();
}
#ifdef DEBUG_LAN
log << " head=" << reg_dummy_head << " base=" << base <<
" len=" << len << " dbsz=" << dbuff_size << " hbsz=" << hbuff_size <<
" dtype=" << (unsigned) dtype << " longdesc=" << longdesc <<
" crcstrip=" << crc_strip << " rxmax=" << rxmax << logger::endl;
log << " head=" << reg_dummy_head << " base=" << base << " len=" << len
<< " dbsz=" << dbuff_size << " hbsz=" << hbuff_size
<< " dtype=" << (unsigned)dtype << " longdesc=" << longdesc
<< " crcstrip=" << crc_strip << " rxmax=" << rxmax << logger::endl;
#endif
}
queue_base::desc_ctx &lan_queue_rx::desc_ctx_create()
{
return *new rx_desc_ctx(*this);
queue_base::desc_ctx &lan_queue_rx::desc_ctx_create() {
return *new rx_desc_ctx(*this);
}
void lan_queue_rx::packet_received(const void *data, size_t pktlen, uint32_t h)
{
size_t num_descs = (pktlen + dbuff_size - 1) / dbuff_size;
void lan_queue_rx::packet_received(const void *data, size_t pktlen,
uint32_t h) {
size_t num_descs = (pktlen + dbuff_size - 1) / dbuff_size;
if (!enabled)
return;
if (!enabled)
return;
if (dcache.size() < num_descs) {
if (dcache.size() < num_descs) {
#ifdef DEBUG_LAN
log << " not enough rx descs (" << num_descs << ", dropping packet" <<
logger::endl;
log << " not enough rx descs (" << num_descs << ", dropping packet"
<< logger::endl;
#endif
return;
}
return;
}
for (size_t i = 0; i < num_descs; i++) {
rx_desc_ctx &ctx = *dcache.front();
for (size_t i = 0; i < num_descs; i++) {
rx_desc_ctx &ctx = *dcache.front();
#ifdef DEBUG_LAN
log << " packet part=" << i << " received didx=" << ctx.index <<
" cnt=" << dcache.size() << logger::endl;
log << " packet part=" << i << " received didx=" << ctx.index
<< " cnt=" << dcache.size() << logger::endl;
#endif
dcache.pop_front();
const uint8_t *buf = (const uint8_t *) data + (dbuff_size * i);
if (i == num_descs - 1) {
// last packet
ctx.packet_received(buf, pktlen - dbuff_size * i, true);
} else {
ctx.packet_received(buf, dbuff_size, false);
}
dcache.pop_front();
const uint8_t *buf = (const uint8_t *)data + (dbuff_size * i);
if (i == num_descs - 1) {
// last packet
ctx.packet_received(buf, pktlen - dbuff_size * i, true);
} else {
ctx.packet_received(buf, dbuff_size, false);
}
}
}
lan_queue_rx::rx_desc_ctx::rx_desc_ctx(lan_queue_rx &queue_)
: desc_ctx(queue_), rq(queue_)
{
: desc_ctx(queue_), rq(queue_) {
}
void lan_queue_rx::rx_desc_ctx::data_written(uint64_t addr, size_t len)
{
processed();
void lan_queue_rx::rx_desc_ctx::data_written(uint64_t addr, size_t len) {
processed();
}
void lan_queue_rx::rx_desc_ctx::process()
{
rq.dcache.push_back(this);
void lan_queue_rx::rx_desc_ctx::process() {
rq.dcache.push_back(this);
}
void lan_queue_rx::rx_desc_ctx::packet_received(const void *data,
size_t pktlen, bool last)
{
union i40e_32byte_rx_desc *rxd = reinterpret_cast<
union i40e_32byte_rx_desc *> (desc);
void lan_queue_rx::rx_desc_ctx::packet_received(const void *data, size_t pktlen,
bool last) {
union i40e_32byte_rx_desc *rxd =
reinterpret_cast<union i40e_32byte_rx_desc *>(desc);
uint64_t addr = rxd->read.pkt_addr;
uint64_t addr = rxd->read.pkt_addr;
memset(rxd, 0, sizeof(*rxd));
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
rxd->wb.qword1.status_error_len |=
(pktlen << I40E_RXD_QW1_LENGTH_PBUF_SHIFT);
memset(rxd, 0, sizeof(*rxd));
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
rxd->wb.qword1.status_error_len |= (pktlen << I40E_RXD_QW1_LENGTH_PBUF_SHIFT);
if (last) {
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
// TODO(antoinek): only if checksums are correct
rxd->wb.qword1.status_error_len |=
(1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT);
}
data_write(addr, pktlen, data);
if (last) {
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
// TODO(antoinek): only if checksums are correct
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT);
}
data_write(addr, pktlen, data);
}
lan_queue_tx::lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl)
uint32_t &reg_ena_, uint32_t &reg_fpmbase_,
uint32_t &reg_intqctl)
: lan_queue_base(lanmgr_, "txq", reg_tail_, idx_, reg_ena_, reg_fpmbase_,
reg_intqctl, 128)
{
desc_len = 16;
ctxs_init();
reg_intqctl, 128) {
desc_len = 16;
ctxs_init();
}
void lan_queue_tx::reset()
{
tso_off = 0;
tso_len = 0;
ready_segments.clear();
queue_base::reset();
void lan_queue_tx::reset() {
tso_off = 0;
tso_len = 0;
ready_segments.clear();
queue_base::reset();
}
void lan_queue_tx::initialize()
{
void lan_queue_tx::initialize() {
#ifdef DEBUG_LAN
log << " initialize()" << logger::endl;
log << " initialize()" << logger::endl;
#endif
uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);
uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);
uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
uint16_t *hwb_qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 20);
uint64_t *hwb_addr_p = reinterpret_cast<uint64_t *>(ctx_p + 24);
uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
uint16_t *hwb_qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 20);
uint64_t *hwb_addr_p = reinterpret_cast<uint64_t *>(ctx_p + 24);
reg_dummy_head = (*head_p) & ((1 << 13) - 1);
reg_dummy_head = (*head_p) & ((1 << 13) - 1);
base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
len = ((*hwb_qlen_p) >> 1) & ((1 << 13) - 1);
base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
len = ((*hwb_qlen_p) >> 1) & ((1 << 13) - 1);
hwb = !!(*hwb_qlen_p & (1 << 0));
hwb_addr = *hwb_addr_p;
hwb = !!(*hwb_qlen_p & (1 << 0));
hwb_addr = *hwb_addr_p;
#ifdef DEBUG_LAN
log << " head=" << reg_dummy_head << " base=" << base <<
" len=" << len << " hwb=" << hwb << " hwb_addr=" << hwb_addr <<
logger::endl;
log << " head=" << reg_dummy_head << " base=" << base << " len=" << len
<< " hwb=" << hwb << " hwb_addr=" << hwb_addr << logger::endl;
#endif
}
queue_base::desc_ctx &lan_queue_tx::desc_ctx_create()
{
return *new tx_desc_ctx(*this);
queue_base::desc_ctx &lan_queue_tx::desc_ctx_create() {
return *new tx_desc_ctx(*this);
}
void lan_queue_tx::do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt)
{
if (!hwb) {
// if head index writeback is disabled we need to write descriptor back
lan_queue_base::do_writeback(first_idx, first_pos, cnt);
} else {
// else we just need to write the index back
dma_hwb *dma = new dma_hwb(*this, first_pos, cnt,
(first_idx + cnt) % len);
dma->dma_addr = hwb_addr;
uint32_t cnt) {
if (!hwb) {
// if head index writeback is disabled we need to write descriptor back
lan_queue_base::do_writeback(first_idx, first_pos, cnt);
} else {
// else we just need to write the index back
dma_hwb *dma = new dma_hwb(*this, first_pos, cnt, (first_idx + cnt) % len);
dma->dma_addr = hwb_addr;
#ifdef DEBUG_LAN
log << " hwb=" << *((uint32_t *) dma->data) << logger::endl;
log << " hwb=" << *((uint32_t *)dma->data) << logger::endl;
#endif
runner->issue_dma(*dma);
}
runner->issue_dma(*dma);
}
}
bool lan_queue_tx::trigger_tx_packet()
{
size_t n = ready_segments.size();
size_t d_skip = 0, dcnt;
bool eop = false;
uint64_t d1;
uint32_t iipt, l4t, pkt_len, total_len = 0, data_limit;
bool tso = false;
uint32_t tso_mss = 0, tso_paylen = 0;
uint16_t maclen = 0, iplen = 0, l4len = 0;
bool lan_queue_tx::trigger_tx_packet() {
size_t n = ready_segments.size();
size_t d_skip = 0, dcnt;
bool eop = false;
uint64_t d1;
uint32_t iipt, l4t, pkt_len, total_len = 0, data_limit;
bool tso = false;
uint32_t tso_mss = 0, tso_paylen = 0;
uint16_t maclen = 0, iplen = 0, l4len = 0;
// abort if no queued up descriptors
if (n == 0)
return false;
// abort if no queued up descriptors
if (n == 0)
return false;
#ifdef DEBUG_LAN
log << "trigger_tx_packet(n=" << n << ", firstidx=" <<
ready_segments.at(0)->index << ")" << logger::endl;
log << " tso_off=" << tso_off << " tso_len=" << tso_len << logger::endl;
log << "trigger_tx_packet(n=" << n
<< ", firstidx=" << ready_segments.at(0)->index << ")" << logger::endl;
log << " tso_off=" << tso_off << " tso_len=" << tso_len << logger::endl;
#endif
// check if we have a context descriptor first
tx_desc_ctx *rd = ready_segments.at(0);
uint8_t dtype = (rd->d->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK) >>
I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
struct i40e_tx_context_desc *ctxd =
reinterpret_cast<struct i40e_tx_context_desc *>(rd->d);
d1 = ctxd->type_cmd_tso_mss;
// check if we have a context descriptor first
tx_desc_ctx *rd = ready_segments.at(0);
uint8_t dtype = (rd->d->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK) >>
I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
struct i40e_tx_context_desc *ctxd =
reinterpret_cast<struct i40e_tx_context_desc *> (rd->d);
d1 = ctxd->type_cmd_tso_mss;
uint16_t cmd = ((d1 & I40E_TXD_CTX_QW1_CMD_MASK) >>
I40E_TXD_CTX_QW1_CMD_SHIFT);
tso = !!(cmd & I40E_TX_CTX_DESC_TSO);
tso_mss = (d1 & I40E_TXD_CTX_QW1_MSS_MASK) >>
I40E_TXD_CTX_QW1_MSS_SHIFT;
uint16_t cmd =
((d1 & I40E_TXD_CTX_QW1_CMD_MASK) >> I40E_TXD_CTX_QW1_CMD_SHIFT);
tso = !!(cmd & I40E_TX_CTX_DESC_TSO);
tso_mss = (d1 & I40E_TXD_CTX_QW1_MSS_MASK) >> I40E_TXD_CTX_QW1_MSS_SHIFT;
#ifdef DEBUG_LAN
log << " tso=" << tso << " mss=" << tso_mss << logger::endl;
log << " tso=" << tso << " mss=" << tso_mss << logger::endl;
#endif
d_skip = 1;
}
d_skip = 1;
}
// find EOP descriptor
for (dcnt = d_skip; dcnt < n && !eop; dcnt++) {
tx_desc_ctx *rd = ready_segments.at(dcnt);
d1 = rd->d->cmd_type_offset_bsz;
// find EOP descriptor
for (dcnt = d_skip; dcnt < n && !eop; dcnt++) {
tx_desc_ctx *rd = ready_segments.at(dcnt);
d1 = rd->d->cmd_type_offset_bsz;
#ifdef DEBUG_LAN
log << " data fetched didx=" << rd->index << " d1=" <<
d1 << logger::endl;
log << " data fetched didx=" << rd->index << " d1=" << d1 << logger::endl;
#endif
dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype != I40E_TX_DESC_DTYPE_DATA) {
log << "trigger tx desc is not a data descriptor idx=" << rd->index
<< " d1=" << d1 << logger::endl;
abort();
}
uint16_t cmd = (d1 & I40E_TXD_QW1_CMD_MASK) >> I40E_TXD_QW1_CMD_SHIFT;
eop = (cmd & I40E_TX_DESC_CMD_EOP);
iipt = cmd & (I40E_TX_DESC_CMD_IIPT_MASK);
l4t = (cmd & I40E_TX_DESC_CMD_L4T_EOFT_MASK);
if (eop) {
uint32_t off = (d1 & I40E_TXD_QW1_OFFSET_MASK) >>
I40E_TXD_QW1_OFFSET_SHIFT;
maclen = ((off & I40E_TXD_QW1_MACLEN_MASK) >>
I40E_TX_DESC_LENGTH_MACLEN_SHIFT) * 2;
iplen = ((off & I40E_TXD_QW1_IPLEN_MASK) >>
I40E_TX_DESC_LENGTH_IPLEN_SHIFT) * 4;
l4len = ((off & I40E_TXD_QW1_L4LEN_MASK) >>
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) * 4;
}
pkt_len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
total_len += pkt_len;
dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype != I40E_TX_DESC_DTYPE_DATA) {
log << "trigger tx desc is not a data descriptor idx=" << rd->index
<< " d1=" << d1 << logger::endl;
abort();
}
uint16_t cmd = (d1 & I40E_TXD_QW1_CMD_MASK) >> I40E_TXD_QW1_CMD_SHIFT;
eop = (cmd & I40E_TX_DESC_CMD_EOP);
iipt = cmd & (I40E_TX_DESC_CMD_IIPT_MASK);
l4t = (cmd & I40E_TX_DESC_CMD_L4T_EOFT_MASK);
if (eop) {
uint32_t off =
(d1 & I40E_TXD_QW1_OFFSET_MASK) >> I40E_TXD_QW1_OFFSET_SHIFT;
maclen = ((off & I40E_TXD_QW1_MACLEN_MASK) >>
I40E_TX_DESC_LENGTH_MACLEN_SHIFT) *
2;
iplen =
((off & I40E_TXD_QW1_IPLEN_MASK) >> I40E_TX_DESC_LENGTH_IPLEN_SHIFT) *
4;
l4len = ((off & I40E_TXD_QW1_L4LEN_MASK) >>
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT) *
4;
}
pkt_len =
(d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
total_len += pkt_len;
#ifdef DEBUG_LAN
log << " eop=" << eop << " len=" << pkt_len << logger::endl;
log << " eop=" << eop << " len=" << pkt_len << logger::endl;
#endif
}
}
// Unit not completely fetched yet
if (!eop)
return false;
// Unit not completely fetched yet
if (!eop)
return false;
if (tso) {
if (tso_off == 0)
data_limit = maclen + iplen + l4len + tso_mss;
else
data_limit = tso_off + tso_mss;
if (tso) {
if (tso_off == 0)
data_limit = maclen + iplen + l4len + tso_mss;
else
data_limit = tso_off + tso_mss;
if (data_limit > total_len) {
data_limit = total_len;
}
} else {
if (total_len > MTU) {
log << " packet is longer (" << total_len << ") than MTU (" <<
MTU << ")" << logger::endl;
abort();
}
data_limit = total_len;
if (data_limit > total_len) {
data_limit = total_len;
}
} else {
if (total_len > MTU) {
log << " packet is longer (" << total_len << ") than MTU (" << MTU
<< ")" << logger::endl;
abort();
}
data_limit = total_len;
}
#ifdef DEBUG_LAN
log << " iipt=" << iipt << " l4t=" << l4t <<
" maclen=" << maclen << " iplen=" << iplen << " l4len=" << l4len <<
" total_len=" << total_len << " data_limit=" << data_limit <<
logger::endl;
log << " iipt=" << iipt << " l4t=" << l4t << " maclen=" << maclen
<< " iplen=" << iplen << " l4len=" << l4len << " total_len=" << total_len
<< " data_limit=" << data_limit << logger::endl;
#else
(void) iipt;
(void)iipt;
#endif
// copy data for this segment
uint32_t off = 0;
for (dcnt = d_skip; dcnt < n && off < data_limit; dcnt++) {
tx_desc_ctx *rd = ready_segments.at(dcnt);
d1 = rd->d->cmd_type_offset_bsz;
uint16_t pkt_len =
(d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
// copy data for this segment
uint32_t off = 0;
for (dcnt = d_skip; dcnt < n && off < data_limit; dcnt++) {
tx_desc_ctx *rd = ready_segments.at(dcnt);
d1 = rd->d->cmd_type_offset_bsz;
uint16_t pkt_len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
if (off <= tso_off && off + pkt_len > tso_off) {
uint32_t start = tso_off;
uint32_t end = off + pkt_len;
if (end > data_limit)
end = data_limit;
if (off <= tso_off && off + pkt_len > tso_off) {
uint32_t start = tso_off;
uint32_t end = off + pkt_len;
if (end > data_limit)
end = data_limit;
#ifdef DEBUG_LAN
log << " copying data from off=" << off << " idx=" << rd->index <<
" start=" << start << " end=" << end << " tso_len=" << tso_len <<
logger::endl;
log << " copying data from off=" << off << " idx=" << rd->index
<< " start=" << start << " end=" << end << " tso_len=" << tso_len
<< logger::endl;
#endif
memcpy(pktbuf + tso_len, (uint8_t *) rd->data + (start - off),
end - start);
tso_off = end;
tso_len += end - start;
}
off += pkt_len;
memcpy(pktbuf + tso_len, (uint8_t *)rd->data + (start - off),
end - start);
tso_off = end;
tso_len += end - start;
}
assert(tso_len <= MTU);
off += pkt_len;
}
assert(tso_len <= MTU);
if (!tso) {
if (!tso) {
#ifdef DEBUG_LAN
log << " normal non-tso packet" << logger::endl;
log << " normal non-tso packet" << logger::endl;
#endif
if (l4t == I40E_TX_DESC_CMD_L4T_EOFT_TCP) {
uint16_t tcp_off = maclen + iplen;
xsum_tcp(pktbuf + tcp_off, tso_len - tcp_off);
}
if (l4t == I40E_TX_DESC_CMD_L4T_EOFT_TCP) {
uint16_t tcp_off = maclen + iplen;
xsum_tcp(pktbuf + tcp_off, tso_len - tcp_off);
}
runner->eth_send(pktbuf, tso_len);
} else {
runner->eth_send(pktbuf, tso_len);
} else {
#ifdef DEBUG_LAN
log << " tso packet off=" << tso_off << " len=" << tso_len <<
logger::endl;
log << " tso packet off=" << tso_off << " len=" << tso_len
<< logger::endl;
#endif
// TSO gets hairier
uint16_t hdrlen = maclen + iplen + l4len;
// TSO gets hairier
uint16_t hdrlen = maclen + iplen + l4len;
// calculate payload size
tso_paylen = tso_len - hdrlen;
if (tso_paylen > tso_mss)
tso_paylen = tso_mss;
// calculate payload size
tso_paylen = tso_len - hdrlen;
if (tso_paylen > tso_mss)
tso_paylen = tso_mss;
xsum_tcpip_tso(pktbuf + maclen, iplen, l4len, tso_paylen);
xsum_tcpip_tso(pktbuf + maclen, iplen, l4len, tso_paylen);
runner->eth_send(pktbuf, tso_len);
runner->eth_send(pktbuf, tso_len);
tso_postupdate_header(pktbuf + maclen, iplen, l4len, tso_paylen);
tso_postupdate_header(pktbuf + maclen, iplen, l4len, tso_paylen);
// not done yet with this TSO unit
if (tso && tso_off < total_len) {
tso_len = hdrlen;
return true;
}
// not done yet with this TSO unit
if (tso && tso_off < total_len) {
tso_len = hdrlen;
return true;
}
}
#ifdef DEBUG_LAN
log << " unit done" << logger::endl;
log << " unit done" << logger::endl;
#endif
while (dcnt-- > 0) {
ready_segments.front()->processed();
ready_segments.pop_front();
}
while (dcnt-- > 0) {
ready_segments.front()->processed();
ready_segments.pop_front();
}
tso_len = 0;
tso_off = 0;
tso_len = 0;
tso_off = 0;
return true;
return true;
}
void lan_queue_tx::trigger_tx()
{
while (trigger_tx_packet()) {}
void lan_queue_tx::trigger_tx() {
while (trigger_tx_packet()) {
}
}
lan_queue_tx::tx_desc_ctx::tx_desc_ctx(lan_queue_tx &queue_)
: desc_ctx(queue_), tq(queue_)
{
d = reinterpret_cast<struct i40e_tx_desc *>(desc);
: desc_ctx(queue_), tq(queue_) {
d = reinterpret_cast<struct i40e_tx_desc *>(desc);
}
void lan_queue_tx::tx_desc_ctx::prepare()
{
uint64_t d1 = d->cmd_type_offset_bsz;
void lan_queue_tx::tx_desc_ctx::prepare() {
uint64_t d1 = d->cmd_type_offset_bsz;
#ifdef DEBUG_LAN
queue.log << " desc fetched didx=" << index << " d1=" <<
d1 << logger::endl;
queue.log << " desc fetched didx=" << index << " d1=" << d1 << logger::endl;
#endif
uint8_t dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype == I40E_TX_DESC_DTYPE_DATA) {
uint16_t len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
uint8_t dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype == I40E_TX_DESC_DTYPE_DATA) {
uint16_t len =
(d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
#ifdef DEBUG_LAN
queue.log << " bufaddr=" << d->buffer_addr <<
" len=" << len << logger::endl;
queue.log << " bufaddr=" << d->buffer_addr << " len=" << len
<< logger::endl;
#endif
data_fetch(d->buffer_addr, len);
} else if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
data_fetch(d->buffer_addr, len);
} else if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
#ifdef DEBUG_LAN
struct i40e_tx_context_desc *ctxd =
reinterpret_cast<struct i40e_tx_context_desc *> (d);
queue.log << " context descriptor: tp=" << ctxd->tunneling_params <<
" l2t=" << ctxd->l2tag2 << " tctm=" << ctxd->type_cmd_tso_mss <<
logger::endl;
struct i40e_tx_context_desc *ctxd =
reinterpret_cast<struct i40e_tx_context_desc *>(d);
queue.log << " context descriptor: tp=" << ctxd->tunneling_params
<< " l2t=" << ctxd->l2tag2 << " tctm=" << ctxd->type_cmd_tso_mss
<< logger::endl;
#endif
prepared();
} else {
queue.log << "txq: only support context & data descriptors" <<
logger::endl;
abort();
}
prepared();
} else {
queue.log << "txq: only support context & data descriptors" << logger::endl;
abort();
}
}
void lan_queue_tx::tx_desc_ctx::process()
{
tq.ready_segments.push_back(this);
tq.trigger_tx();
void lan_queue_tx::tx_desc_ctx::process() {
tq.ready_segments.push_back(this);
tq.trigger_tx();
}
void lan_queue_tx::tx_desc_ctx::processed()
{
d->cmd_type_offset_bsz = I40E_TX_DESC_DTYPE_DESC_DONE <<
I40E_TXD_QW1_DTYPE_SHIFT;
desc_ctx::processed();
void lan_queue_tx::tx_desc_ctx::processed() {
d->cmd_type_offset_bsz = I40E_TX_DESC_DTYPE_DESC_DONE
<< I40E_TXD_QW1_DTYPE_SHIFT;
desc_ctx::processed();
}
lan_queue_tx::dma_hwb::dma_hwb(lan_queue_tx &queue_, uint32_t pos_,
uint32_t cnt_, uint32_t nh_)
: queue(queue_), pos(pos_), cnt(cnt_), next_head(nh_)
{
data = &next_head;
len = 4;
write = true;
uint32_t cnt_, uint32_t nh_)
: queue(queue_), pos(pos_), cnt(cnt_), next_head(nh_) {
data = &next_head;
len = 4;
write = true;
}
lan_queue_tx::dma_hwb::~dma_hwb()
{
lan_queue_tx::dma_hwb::~dma_hwb() {
}
void lan_queue_tx::dma_hwb::done()
{
void lan_queue_tx::dma_hwb::done() {
#ifdef DEBUG_LAN
queue.log << " tx head written back" << logger::endl;
queue.log << " tx head written back" << logger::endl;
#endif
queue.writeback_done(pos, cnt);
queue.trigger();
delete this;
queue.writeback_done(pos, cnt);
queue.trigger();
delete this;
}
......@@ -24,454 +24,426 @@
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <cassert>
#include <iostream>
#include <algorithm>
#include "sims/nic/i40e_bm/i40e_bm.h"
#include "sims/nic/i40e_bm/i40e_base_wrapper.h"
#include "sims/nic/i40e_bm/i40e_bm.h"
using namespace i40e;
extern nicbm::Runner *runner;
queue_base::queue_base(const std::string &qname_, uint32_t &reg_head_,
uint32_t &reg_tail_)
: qname(qname_), log(qname_), active_first_pos(0), active_first_idx(0),
active_cnt(0), base(0), len(0), reg_head(reg_head_), reg_tail(reg_tail_),
enabled(false), desc_len(0)
{
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = nullptr;
}
}
void queue_base::ctxs_init()
{
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = &desc_ctx_create();
}
}
void queue_base::trigger_fetch()
{
if (!enabled)
return;
// calculate how many we can fetch
uint32_t next_idx = (active_first_idx + active_cnt) % len;
uint32_t desc_avail = (reg_tail - next_idx) % len;
uint32_t fetch_cnt = desc_avail;
fetch_cnt = std::min(fetch_cnt, MAX_ACTIVE_DESCS - active_cnt);
if (max_active_capacity() <= active_cnt)
fetch_cnt = std::min(fetch_cnt, max_active_capacity() - active_cnt);
fetch_cnt = std::min(fetch_cnt, max_fetch_capacity());
if (next_idx + fetch_cnt > len)
fetch_cnt = len - next_idx;
uint32_t &reg_tail_)
: qname(qname_),
log(qname_),
active_first_pos(0),
active_first_idx(0),
active_cnt(0),
base(0),
len(0),
reg_head(reg_head_),
reg_tail(reg_tail_),
enabled(false),
desc_len(0) {
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = nullptr;
}
}
void queue_base::ctxs_init() {
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = &desc_ctx_create();
}
}
void queue_base::trigger_fetch() {
if (!enabled)
return;
// calculate how many we can fetch
uint32_t next_idx = (active_first_idx + active_cnt) % len;
uint32_t desc_avail = (reg_tail - next_idx) % len;
uint32_t fetch_cnt = desc_avail;
fetch_cnt = std::min(fetch_cnt, MAX_ACTIVE_DESCS - active_cnt);
if (max_active_capacity() <= active_cnt)
fetch_cnt = std::min(fetch_cnt, max_active_capacity() - active_cnt);
fetch_cnt = std::min(fetch_cnt, max_fetch_capacity());
if (next_idx + fetch_cnt > len)
fetch_cnt = len - next_idx;
#ifdef DEBUG_QUEUES
log << "fetching avail=" << desc_avail << " cnt=" << fetch_cnt << " idx=" <<
next_idx << logger::endl;
log << "fetching avail=" << desc_avail << " cnt=" << fetch_cnt
<< " idx=" << next_idx << logger::endl;
#endif
// abort if nothign to fetch
if (fetch_cnt == 0)
return;
// mark descriptor contexts as fetching
uint32_t first_pos = (active_first_pos + active_cnt) % MAX_ACTIVE_DESCS;
for (uint32_t i = 0; i < fetch_cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_EMPTY);
ctx.state = desc_ctx::DESC_FETCHING;
ctx.index = (next_idx + i) % len;
}
active_cnt += fetch_cnt;
// prepare & issue dma
dma_fetch *dma = new dma_fetch(*this, desc_len * fetch_cnt);
dma->write = false;
dma->dma_addr = base + next_idx * desc_len;
dma->pos = first_pos;
// abort if nothign to fetch
if (fetch_cnt == 0)
return;
// mark descriptor contexts as fetching
uint32_t first_pos = (active_first_pos + active_cnt) % MAX_ACTIVE_DESCS;
for (uint32_t i = 0; i < fetch_cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_EMPTY);
ctx.state = desc_ctx::DESC_FETCHING;
ctx.index = (next_idx + i) % len;
}
active_cnt += fetch_cnt;
// prepare & issue dma
dma_fetch *dma = new dma_fetch(*this, desc_len * fetch_cnt);
dma->write = false;
dma->dma_addr = base + next_idx * desc_len;
dma->pos = first_pos;
#ifdef DEBUG_QUEUES
log << " dma = " << dma << logger::endl;
log << " dma = " << dma << logger::endl;
#endif
runner->issue_dma(*dma);
}
void queue_base::trigger_process()
{
if (!enabled)
return;
// first skip over descriptors that are already done processing
uint32_t i;
for (i = 0; i < active_cnt; i++)
if (desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS]->state
<= desc_ctx::DESC_PREPARED)
break;
// then run all prepared contexts
uint32_t j;
for (j = 0; i + j < active_cnt; j++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i + j)
% MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_PREPARED)
break;
ctx.state = desc_ctx::DESC_PROCESSING;
runner->issue_dma(*dma);
}
void queue_base::trigger_process() {
if (!enabled)
return;
// first skip over descriptors that are already done processing
uint32_t i;
for (i = 0; i < active_cnt; i++)
if (desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS]->state <=
desc_ctx::DESC_PREPARED)
break;
// then run all prepared contexts
uint32_t j;
for (j = 0; i + j < active_cnt; j++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i + j) % MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_PREPARED)
break;
ctx.state = desc_ctx::DESC_PROCESSING;
#ifdef DEBUG_QUEUES
log << "processing desc " << ctx.index << logger::endl;
log << "processing desc " << ctx.index << logger::endl;
#endif
ctx.process();
}
ctx.process();
}
}
void queue_base::trigger_writeback()
{
if (!enabled)
return;
void queue_base::trigger_writeback() {
if (!enabled)
return;
// from first pos count number of processed descriptors
uint32_t avail;
for (avail = 0; avail < active_cnt; avail++)
if (desc_ctxs[(active_first_pos + avail) % MAX_ACTIVE_DESCS]->state
!= desc_ctx::DESC_PROCESSED)
break;
// from first pos count number of processed descriptors
uint32_t avail;
for (avail = 0; avail < active_cnt; avail++)
if (desc_ctxs[(active_first_pos + avail) % MAX_ACTIVE_DESCS]->state !=
desc_ctx::DESC_PROCESSED)
break;
uint32_t cnt = std::min(avail, max_writeback_capacity());
if (active_first_idx + cnt > len)
cnt = len - active_first_idx;
uint32_t cnt = std::min(avail, max_writeback_capacity());
if (active_first_idx + cnt > len)
cnt = len - active_first_idx;
#ifdef DEBUG_QUEUES
log << "writing back avail=" << avail << " cnt=" << cnt <<
" idx=" << active_first_idx << logger::endl;
log << "writing back avail=" << avail << " cnt=" << cnt
<< " idx=" << active_first_idx << logger::endl;
#endif
if (cnt == 0)
return;
if (cnt == 0)
return;
// mark these descriptors as writing back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS];
ctx.state = desc_ctx::DESC_WRITING_BACK;
}
// mark these descriptors as writing back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS];
ctx.state = desc_ctx::DESC_WRITING_BACK;
}
do_writeback(active_first_idx, active_first_pos, cnt);
do_writeback(active_first_idx, active_first_pos, cnt);
}
void queue_base::trigger()
{
void queue_base::trigger() {
trigger_fetch();
trigger_process();
trigger_writeback();
}
void queue_base::reset()
{
void queue_base::reset() {
#ifdef DEBUG_QUEUES
log << "reset" << logger::endl;
log << "reset" << logger::endl;
#endif
enabled = false;
active_first_pos = 0;
active_first_idx = 0;
active_cnt = 0;
enabled = false;
active_first_pos = 0;
active_first_idx = 0;
active_cnt = 0;
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i]->state = desc_ctx::DESC_EMPTY;
}
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i]->state = desc_ctx::DESC_EMPTY;
}
}
void queue_base::reg_updated()
{
void queue_base::reg_updated() {
#ifdef DEBUG_QUEUES
log << "reg_updated: tail=" << reg_tail << " enabled=" << (int) enabled <<
logger::endl;
log << "reg_updated: tail=" << reg_tail << " enabled=" << (int)enabled
<< logger::endl;
#endif
if (!enabled)
return;
if (!enabled)
return;
trigger();
trigger();
}
bool queue_base::is_enabled()
{
return enabled;
bool queue_base::is_enabled() {
return enabled;
}
uint32_t queue_base::max_fetch_capacity()
{
return UINT32_MAX;
uint32_t queue_base::max_fetch_capacity() {
return UINT32_MAX;
}
uint32_t queue_base::max_active_capacity()
{
return UINT32_MAX;
uint32_t queue_base::max_active_capacity() {
return UINT32_MAX;
}
uint32_t queue_base::max_writeback_capacity()
{
return UINT32_MAX;
uint32_t queue_base::max_writeback_capacity() {
return UINT32_MAX;
}
void queue_base::interrupt()
{
void queue_base::interrupt() {
}
void queue_base::do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt)
{
dma_wb *dma = new dma_wb(*this, desc_len * cnt);
dma->write = true;
dma->dma_addr = base + first_idx * desc_len;
dma->pos = first_pos;
uint8_t *buf = reinterpret_cast<uint8_t *> (dma->data);
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
memcpy(buf + i * desc_len, ctx.desc, desc_len);
}
runner->issue_dma(*dma);
}
void queue_base::writeback_done(uint32_t first_pos, uint32_t cnt)
{
if (!enabled)
return;
// first mark descriptors as written back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
ctx.state = desc_ctx::DESC_WRITTEN_BACK;
}
uint32_t cnt) {
dma_wb *dma = new dma_wb(*this, desc_len * cnt);
dma->write = true;
dma->dma_addr = base + first_idx * desc_len;
dma->pos = first_pos;
uint8_t *buf = reinterpret_cast<uint8_t *>(dma->data);
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
memcpy(buf + i * desc_len, ctx.desc, desc_len);
}
runner->issue_dma(*dma);
}
void queue_base::writeback_done(uint32_t first_pos, uint32_t cnt) {
if (!enabled)
return;
// first mark descriptors as written back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
ctx.state = desc_ctx::DESC_WRITTEN_BACK;
}
#ifdef DEBUG_QUEUES
log << "written back afi=" << active_first_idx << " afp=" <<
active_first_pos << " acnt=" << active_cnt << " pos=" <<
first_pos << " cnt=" << cnt << logger::endl;
log << "written back afi=" << active_first_idx << " afp=" << active_first_pos
<< " acnt=" << active_cnt << " pos=" << first_pos << " cnt=" << cnt
<< logger::endl;
#endif
// then start at the beginning and check how many are written back and then
// free those
uint32_t bump_cnt = 0;
for (bump_cnt = 0; bump_cnt < active_cnt; bump_cnt++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + bump_cnt) %
MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_WRITTEN_BACK)
break;
ctx.state = desc_ctx::DESC_EMPTY;
}
// then start at the beginning and check how many are written back and then
// free those
uint32_t bump_cnt = 0;
for (bump_cnt = 0; bump_cnt < active_cnt; bump_cnt++) {
desc_ctx &ctx =
*desc_ctxs[(active_first_pos + bump_cnt) % MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_WRITTEN_BACK)
break;
ctx.state = desc_ctx::DESC_EMPTY;
}
#ifdef DEBUG_QUEUES
log << " bump_cnt=" << bump_cnt << logger::endl;
log << " bump_cnt=" << bump_cnt << logger::endl;
#endif
active_first_pos = (active_first_pos + bump_cnt) % MAX_ACTIVE_DESCS;
active_first_idx = (active_first_idx + bump_cnt) % len;
active_cnt -= bump_cnt;
active_first_pos = (active_first_pos + bump_cnt) % MAX_ACTIVE_DESCS;
active_first_idx = (active_first_idx + bump_cnt) % len;
active_cnt -= bump_cnt;
reg_head = active_first_idx;
interrupt();
reg_head = active_first_idx;
interrupt();
}
queue_base::desc_ctx::desc_ctx(queue_base &queue_)
: queue(queue_), state(DESC_EMPTY), index(0), data(nullptr), data_len(0),
data_capacity(0)
{
desc = new uint8_t[queue_.desc_len];
: queue(queue_),
state(DESC_EMPTY),
index(0),
data(nullptr),
data_len(0),
data_capacity(0) {
desc = new uint8_t[queue_.desc_len];
}
queue_base::desc_ctx::~desc_ctx()
{
delete[] ((uint8_t *) desc);
if (data_capacity > 0)
delete[] ((uint8_t *) data);
queue_base::desc_ctx::~desc_ctx() {
delete[]((uint8_t *)desc);
if (data_capacity > 0)
delete[]((uint8_t *)data);
}
void queue_base::desc_ctx::prepare()
{
prepared();
void queue_base::desc_ctx::prepare() {
prepared();
}
void queue_base::desc_ctx::prepared()
{
void queue_base::desc_ctx::prepared() {
#ifdef DEBUG_QUEUES
queue.log << "prepared desc " << index << logger::endl;
queue.log << "prepared desc " << index << logger::endl;
#endif
assert(state == DESC_PREPARING);
state = DESC_PREPARED;
assert(state == DESC_PREPARING);
state = DESC_PREPARED;
}
void queue_base::desc_ctx::processed()
{
void queue_base::desc_ctx::processed() {
#ifdef DEBUG_QUEUES
queue.log << "processed desc " << index << logger::endl;
queue.log << "processed desc " << index << logger::endl;
#endif
assert(state == DESC_PROCESSING);
state = DESC_PROCESSED;
assert(state == DESC_PROCESSING);
state = DESC_PROCESSED;
}
#define MAX_DMA_SIZE ((size_t) 9024)
#define MAX_DMA_SIZE ((size_t)9024)
void queue_base::desc_ctx::data_fetch(uint64_t addr, size_t data_len)
{
if (data_capacity < data_len) {
void queue_base::desc_ctx::data_fetch(uint64_t addr, size_t data_len) {
if (data_capacity < data_len) {
#ifdef DEBUG_QUEUES
queue.log << "data_fetch allocating" << logger::endl;
queue.log << "data_fetch allocating" << logger::endl;
#endif
if (data_capacity != 0)
delete[] ((uint8_t *) data);
if (data_capacity != 0)
delete[]((uint8_t *)data);
data = new uint8_t[data_len];
data_capacity = data_len;
}
data = new uint8_t[data_len];
data_capacity = data_len;
}
dma_data_fetch *dma = new dma_data_fetch(*this, std::min(data_len,
MAX_DMA_SIZE), data);
dma->part_offset = 0;
dma->total_len = data_len;
dma->write = false;
dma->dma_addr = addr;
dma_data_fetch *dma =
new dma_data_fetch(*this, std::min(data_len, MAX_DMA_SIZE), data);
dma->part_offset = 0;
dma->total_len = data_len;
dma->write = false;
dma->dma_addr = addr;
#ifdef DEBUG_QUEUES
queue.log << "fetching data idx=" << index << " addr=" << addr << " len=" <<
data_len << logger::endl;
queue.log << " dma = " << dma << " data=" << data << logger::endl;
queue.log << "fetching data idx=" << index << " addr=" << addr
<< " len=" << data_len << logger::endl;
queue.log << " dma = " << dma << " data=" << data << logger::endl;
#endif
runner->issue_dma(*dma);
runner->issue_dma(*dma);
}
void queue_base::desc_ctx::data_fetched(uint64_t addr, size_t len)
{
prepared();
void queue_base::desc_ctx::data_fetched(uint64_t addr, size_t len) {
prepared();
}
void queue_base::desc_ctx::data_write(uint64_t addr, size_t data_len,
const void *buf)
{
const void *buf) {
#ifdef DEBUG_QUEUES
queue.log << "data_write(addr=" << addr << " datalen=" << data_len << ")" <<
logger::endl;
queue.log << "data_write(addr=" << addr << " datalen=" << data_len << ")"
<< logger::endl;
#endif
dma_data_wb *data_dma = new dma_data_wb(*this, data_len);
data_dma->write = true;
data_dma->dma_addr = addr;
memcpy(data_dma->data, buf, data_len);
dma_data_wb *data_dma = new dma_data_wb(*this, data_len);
data_dma->write = true;
data_dma->dma_addr = addr;
memcpy(data_dma->data, buf, data_len);
runner->issue_dma(*data_dma);
runner->issue_dma(*data_dma);
}
void queue_base::desc_ctx::data_written(uint64_t addr, size_t len)
{
void queue_base::desc_ctx::data_written(uint64_t addr, size_t len) {
#ifdef DEBUG_QUEUES
queue.log << "data_written(addr=" << addr << " datalen=" << len << ")" <<
logger::endl;
queue.log << "data_written(addr=" << addr << " datalen=" << len << ")"
<< logger::endl;
#endif
processed();
processed();
}
queue_base::dma_fetch::dma_fetch(queue_base &queue_, size_t len_)
: queue(queue_)
{
data = new char[len_];
len = len_;
: queue(queue_) {
data = new char[len_];
len = len_;
}
queue_base::dma_fetch::~dma_fetch()
{
delete[] ((char *) data);
queue_base::dma_fetch::~dma_fetch() {
delete[]((char *)data);
}
void queue_base::dma_fetch::done()
{
uint8_t *buf = reinterpret_cast <uint8_t *> (data);
for (uint32_t i = 0; i < len / queue.desc_len; i++) {
desc_ctx &ctx = *queue.desc_ctxs[(pos + i) % queue.MAX_ACTIVE_DESCS];
memcpy(ctx.desc, buf + queue.desc_len * i, queue.desc_len);
void queue_base::dma_fetch::done() {
uint8_t *buf = reinterpret_cast<uint8_t *>(data);
for (uint32_t i = 0; i < len / queue.desc_len; i++) {
desc_ctx &ctx = *queue.desc_ctxs[(pos + i) % queue.MAX_ACTIVE_DESCS];
memcpy(ctx.desc, buf + queue.desc_len * i, queue.desc_len);
#ifdef DEBUG_QUEUES
queue.log << "preparing desc " << ctx.index << logger::endl;
queue.log << "preparing desc " << ctx.index << logger::endl;
#endif
ctx.state = desc_ctx::DESC_PREPARING;
ctx.prepare();
}
queue.trigger();
delete this;
ctx.state = desc_ctx::DESC_PREPARING;
ctx.prepare();
}
queue.trigger();
delete this;
}
queue_base::dma_data_fetch::dma_data_fetch(desc_ctx &ctx_, size_t len_,
void *buffer)
: ctx(ctx_)
{
data = buffer;
len = len_;
void *buffer)
: ctx(ctx_) {
data = buffer;
len = len_;
}
queue_base::dma_data_fetch::~dma_data_fetch()
{
queue_base::dma_data_fetch::~dma_data_fetch() {
}
void queue_base::dma_data_fetch::done()
{
part_offset += len;
dma_addr += len;
data = (uint8_t *) data + len;
void queue_base::dma_data_fetch::done() {
part_offset += len;
dma_addr += len;
data = (uint8_t *)data + len;
if (part_offset < total_len) {
if (part_offset < total_len) {
#ifdef DEBUG_QUEUES
ctx.queue.log << " dma_fetch: next part of multi part dma" <<
logger::endl;
ctx.queue.log << " dma_fetch: next part of multi part dma" << logger::endl;
#endif
len = std::min(total_len - part_offset, MAX_DMA_SIZE);
runner->issue_dma(*this);
return;
}
ctx.data_fetched(dma_addr - part_offset, total_len);
ctx.queue.trigger();
delete this;
len = std::min(total_len - part_offset, MAX_DMA_SIZE);
runner->issue_dma(*this);
return;
}
ctx.data_fetched(dma_addr - part_offset, total_len);
ctx.queue.trigger();
delete this;
}
queue_base::dma_wb::dma_wb(queue_base &queue_, size_t len_)
: queue(queue_)
{
data = new char[len_];
len = len_;
queue_base::dma_wb::dma_wb(queue_base &queue_, size_t len_) : queue(queue_) {
data = new char[len_];
len = len_;
}
queue_base::dma_wb::~dma_wb()
{
delete[] ((char *) data);
queue_base::dma_wb::~dma_wb() {
delete[]((char *)data);
}
void queue_base::dma_wb::done()
{
queue.writeback_done(pos, len / queue.desc_len);
queue.trigger();
delete this;
void queue_base::dma_wb::done() {
queue.writeback_done(pos, len / queue.desc_len);
queue.trigger();
delete this;
}
queue_base::dma_data_wb::dma_data_wb(desc_ctx &ctx_, size_t len_)
: ctx(ctx_)
{
data = new char[len_];
len = len_;
queue_base::dma_data_wb::dma_data_wb(desc_ctx &ctx_, size_t len_) : ctx(ctx_) {
data = new char[len_];
len = len_;
}
queue_base::dma_data_wb::~dma_data_wb()
{
delete[] ((char *) data);
queue_base::dma_data_wb::~dma_data_wb() {
delete[]((char *)data);
}
void queue_base::dma_data_wb::done()
{
ctx.data_written(dma_addr, len);
ctx.queue.trigger();
delete this;
void queue_base::dma_data_wb::done() {
ctx.data_written(dma_addr, len);
ctx.queue.trigger();
delete this;
}
......@@ -30,69 +30,58 @@ using namespace i40e;
extern nicbm::Runner *runner;
logger::logger(const std::string &label_)
: label(label_)
{
ss << std::hex;
logger::logger(const std::string &label_) : label(label_) {
ss << std::hex;
}
logger &logger::operator<<(char c)
{
if (c == endl) {
std::cerr << runner->time_ps() << " " << label << ": " << ss.str() <<
std::endl;
ss.str(std::string());
ss << std::hex;
} else {
ss << c;
}
return *this;
logger &logger::operator<<(char c) {
if (c == endl) {
std::cerr << runner->time_ps() << " " << label << ": " << ss.str()
<< std::endl;
ss.str(std::string());
ss << std::hex;
} else {
ss << c;
}
return *this;
}
logger &logger::operator<<(int32_t i)
{
ss << i;
return *this;
logger &logger::operator<<(int32_t i) {
ss << i;
return *this;
}
logger &logger::operator<<(uint8_t i)
{
ss << (unsigned) i;
return *this;
logger &logger::operator<<(uint8_t i) {
ss << (unsigned)i;
return *this;
}
logger &logger::operator<<(uint16_t i)
{
ss << i;
return *this;
logger &logger::operator<<(uint16_t i) {
ss << i;
return *this;
}
logger &logger::operator<<(uint32_t i)
{
ss << i;
return *this;
logger &logger::operator<<(uint32_t i) {
ss << i;
return *this;
}
logger &logger::operator<<(uint64_t i)
{
ss << i;
return *this;
logger &logger::operator<<(uint64_t i) {
ss << i;
return *this;
}
logger &logger::operator<<(bool b)
{
ss << b;
return *this;
logger &logger::operator<<(bool b) {
ss << b;
return *this;
}
logger &logger::operator<<(const char *str)
{
ss << str;
return *this;
logger &logger::operator<<(const char *str) {
ss << str;
return *this;
}
logger &logger::operator<<(void *ptr)
{
ss << ptr;
return *this;
logger &logger::operator<<(void *ptr) {
ss << ptr;
return *this;
}
......@@ -26,74 +26,66 @@
using namespace i40e;
rss_key_cache::rss_key_cache(const uint32_t (&key_)[key_len / 4])
: key(key_)
{
cache_dirty = true;
rss_key_cache::rss_key_cache(const uint32_t (&key_)[key_len / 4]) : key(key_) {
cache_dirty = true;
}
void rss_key_cache::build() {
const uint8_t *k = reinterpret_cast<const uint8_t *>(&key);
uint32_t result = (((uint32_t)k[0]) << 24) | (((uint32_t)k[1]) << 16) |
(((uint32_t)k[2]) << 8) | ((uint32_t)k[3]);
void rss_key_cache::build()
{
const uint8_t *k = reinterpret_cast<const uint8_t *> (&key);
uint32_t result = (((uint32_t)k[0]) << 24) |
(((uint32_t)k[1]) << 16) |
(((uint32_t)k[2]) << 8) |
((uint32_t)k[3]);
uint32_t idx = 32;
size_t i;
uint32_t idx = 32;
size_t i;
for (i = 0; i < cache_len; i++, idx++) {
uint8_t shift = (idx % 8);
uint32_t bit;
for (i = 0; i < cache_len; i++, idx++) {
uint8_t shift = (idx % 8);
uint32_t bit;
cache[i] = result;
bit = ((k[idx / 8] << shift) & 0x80) ? 1 : 0;
result = ((result << 1) | bit);
}
cache[i] = result;
bit = ((k[idx / 8] << shift) & 0x80) ? 1 : 0;
result = ((result << 1) | bit);
}
cache_dirty = false;
cache_dirty = false;
}
void rss_key_cache::set_dirty()
{
cache_dirty = true;
void rss_key_cache::set_dirty() {
cache_dirty = true;
}
uint32_t rss_key_cache::hash_ipv4(uint32_t sip, uint32_t dip, uint16_t sp,
uint16_t dp)
{
static const uint32_t MSB32 = 0x80000000;
static const uint32_t MSB16 = 0x8000;
uint32_t res = 0;
int i;
if (cache_dirty)
build();
for (i = 0; i < 32; i++) {
if (sip & MSB32)
res ^= cache[i];
sip <<= 1;
}
for (i = 0; i < 32; i++) {
if (dip & MSB32)
res ^= cache[32+i];
dip <<= 1;
}
for (i = 0; i < 16; i++) {
if (sp & MSB16)
res ^= cache[64+i];
sp <<= 1;
}
for (i = 0; i < 16; i++) {
if (dp & MSB16)
res ^= cache[80+i];
dp <<= 1;
}
return res;
uint16_t dp) {
static const uint32_t MSB32 = 0x80000000;
static const uint32_t MSB16 = 0x8000;
uint32_t res = 0;
int i;
if (cache_dirty)
build();
for (i = 0; i < 32; i++) {
if (sip & MSB32)
res ^= cache[i];
sip <<= 1;
}
for (i = 0; i < 32; i++) {
if (dip & MSB32)
res ^= cache[32 + i];
dip <<= 1;
}
for (i = 0; i < 16; i++) {
if (sp & MSB16)
res ^= cache[64 + i];
sp <<= 1;
}
for (i = 0; i < 16; i++) {
if (dp & MSB16)
res ^= cache[80 + i];
dp <<= 1;
}
return res;
}
#if 0
......
......@@ -6,9 +6,10 @@
* All rights reserved.
*/
#include <arpa/inet.h>
#include <stdlib.h>
#include <string.h>
#include <arpa/inet.h>
#include <cassert>
#include <iostream>
......@@ -18,142 +19,130 @@ namespace i40e {
/* from dpdk/lib/librte_net/rte_tcp.h */
struct rte_tcp_hdr {
uint16_t src_port; /**< TCP source port. */
uint16_t dst_port; /**< TCP destination port. */
uint32_t sent_seq; /**< TX data sequence number. */
uint32_t recv_ack; /**< RX data acknowledgment sequence number. */
uint8_t data_off; /**< Data offset. */
uint8_t tcp_flags; /**< TCP flags */
uint16_t rx_win; /**< RX flow control window. */
uint16_t cksum; /**< TCP checksum. */
uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
uint16_t src_port; /**< TCP source port. */
uint16_t dst_port; /**< TCP destination port. */
uint32_t sent_seq; /**< TX data sequence number. */
uint32_t recv_ack; /**< RX data acknowledgment sequence number. */
uint8_t data_off; /**< Data offset. */
uint8_t tcp_flags; /**< TCP flags */
uint16_t rx_win; /**< RX flow control window. */
uint16_t cksum; /**< TCP checksum. */
uint16_t tcp_urp; /**< TCP urgent pointer, if any. */
} __attribute__((packed));
/* from dpdk/lib/librte_net/rte_ip.h */
struct ipv4_hdr {
uint8_t version_ihl; /**< version and header length */
uint8_t type_of_service; /**< type of service */
uint16_t total_length; /**< length of packet */
uint16_t packet_id; /**< packet ID */
uint16_t fragment_offset; /**< fragmentation offset */
uint8_t time_to_live; /**< time to live */
uint8_t next_proto_id; /**< protocol ID */
uint16_t hdr_checksum; /**< header checksum */
uint32_t src_addr; /**< source address */
uint32_t dst_addr; /**< destination address */
uint8_t version_ihl; /**< version and header length */
uint8_t type_of_service; /**< type of service */
uint16_t total_length; /**< length of packet */
uint16_t packet_id; /**< packet ID */
uint16_t fragment_offset; /**< fragmentation offset */
uint8_t time_to_live; /**< time to live */
uint8_t next_proto_id; /**< protocol ID */
uint16_t hdr_checksum; /**< header checksum */
uint32_t src_addr; /**< source address */
uint32_t dst_addr; /**< destination address */
} __attribute__((packed));
static inline uint32_t __rte_raw_cksum(const void *buf, size_t len,
uint32_t sum)
{
/* workaround gcc strict-aliasing warning */
uintptr_t ptr = (uintptr_t)buf;
typedef uint16_t __attribute__((__may_alias__)) u16_p;
const u16_p *u16_buf = (const u16_p *)ptr;
while (len >= (sizeof(*u16_buf) * 4)) {
sum += u16_buf[0];
sum += u16_buf[1];
sum += u16_buf[2];
sum += u16_buf[3];
len -= sizeof(*u16_buf) * 4;
u16_buf += 4;
}
while (len >= sizeof(*u16_buf)) {
sum += *u16_buf;
len -= sizeof(*u16_buf);
u16_buf += 1;
}
/* if length is in odd bytes */
if (len == 1) {
uint16_t left = 0;
*(uint8_t *)&left = *(const uint8_t *)u16_buf;
sum += left;
}
return sum;
uint32_t sum) {
/* workaround gcc strict-aliasing warning */
uintptr_t ptr = (uintptr_t)buf;
typedef uint16_t __attribute__((__may_alias__)) u16_p;
const u16_p *u16_buf = (const u16_p *)ptr;
while (len >= (sizeof(*u16_buf) * 4)) {
sum += u16_buf[0];
sum += u16_buf[1];
sum += u16_buf[2];
sum += u16_buf[3];
len -= sizeof(*u16_buf) * 4;
u16_buf += 4;
}
while (len >= sizeof(*u16_buf)) {
sum += *u16_buf;
len -= sizeof(*u16_buf);
u16_buf += 1;
}
/* if length is in odd bytes */
if (len == 1) {
uint16_t left = 0;
*(uint8_t *)&left = *(const uint8_t *)u16_buf;
sum += left;
}
return sum;
}
static inline uint16_t __rte_raw_cksum_reduce(uint32_t sum)
{
sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
return (uint16_t)sum;
static inline uint16_t __rte_raw_cksum_reduce(uint32_t sum) {
sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
return (uint16_t)sum;
}
static inline uint16_t rte_raw_cksum(const void *buf, size_t len)
{
uint32_t sum;
static inline uint16_t rte_raw_cksum(const void *buf, size_t len) {
uint32_t sum;
sum = __rte_raw_cksum(buf, len, 0);
return __rte_raw_cksum_reduce(sum);
sum = __rte_raw_cksum(buf, len, 0);
return __rte_raw_cksum_reduce(sum);
}
static inline uint16_t rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr)
{
struct ipv4_psd_header {
uint32_t src_addr; /* IP address of source host. */
uint32_t dst_addr; /* IP address of destination host. */
uint8_t zero; /* zero. */
uint8_t proto; /* L4 protocol type. */
uint16_t len; /* L4 length. */
} psd_hdr;
psd_hdr.src_addr = ipv4_hdr->src_addr;
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
psd_hdr.len = htons(
(uint16_t)(ntohs(ipv4_hdr->total_length)
- sizeof(struct ipv4_hdr)));
return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
static inline uint16_t rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr) {
struct ipv4_psd_header {
uint32_t src_addr; /* IP address of source host. */
uint32_t dst_addr; /* IP address of destination host. */
uint8_t zero; /* zero. */
uint8_t proto; /* L4 protocol type. */
uint16_t len; /* L4 length. */
} psd_hdr;
psd_hdr.src_addr = ipv4_hdr->src_addr;
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
psd_hdr.len = htons(
(uint16_t)(ntohs(ipv4_hdr->total_length) - sizeof(struct ipv4_hdr)));
return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
}
void xsum_tcp(void *tcphdr, size_t l4_len)
{
struct rte_tcp_hdr *tcph = reinterpret_cast<struct rte_tcp_hdr *> (tcphdr);
uint32_t cksum = rte_raw_cksum(tcphdr, l4_len);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
tcph->cksum = cksum;
void xsum_tcp(void *tcphdr, size_t l4_len) {
struct rte_tcp_hdr *tcph = reinterpret_cast<struct rte_tcp_hdr *>(tcphdr);
uint32_t cksum = rte_raw_cksum(tcphdr, l4_len);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
tcph->cksum = cksum;
}
void xsum_tcpip_tso(void *iphdr, uint8_t iplen, uint8_t l4len,
uint16_t paylen)
{
struct ipv4_hdr *ih = (struct ipv4_hdr *) iphdr;
struct rte_tcp_hdr *tcph = (struct rte_tcp_hdr *)
((uint8_t *) iphdr + iplen);
uint32_t cksum;
// calculate ip xsum
ih->total_length = htons(iplen + l4len + paylen);
ih->hdr_checksum = 0;
cksum = rte_raw_cksum(iphdr, iplen);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
ih->hdr_checksum = cksum;
// calculate tcp xsum
tcph->cksum = 0;
cksum = rte_raw_cksum(tcph, l4len + paylen);
cksum += rte_ipv4_phdr_cksum(ih);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
tcph->cksum = cksum;
uint16_t paylen) {
struct ipv4_hdr *ih = (struct ipv4_hdr *)iphdr;
struct rte_tcp_hdr *tcph = (struct rte_tcp_hdr *)((uint8_t *)iphdr + iplen);
uint32_t cksum;
// calculate ip xsum
ih->total_length = htons(iplen + l4len + paylen);
ih->hdr_checksum = 0;
cksum = rte_raw_cksum(iphdr, iplen);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
ih->hdr_checksum = cksum;
// calculate tcp xsum
tcph->cksum = 0;
cksum = rte_raw_cksum(tcph, l4len + paylen);
cksum += rte_ipv4_phdr_cksum(ih);
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
cksum = (~cksum) & 0xffff;
tcph->cksum = cksum;
}
void tso_postupdate_header(void *iphdr, uint8_t iplen, uint8_t l4len,
uint16_t paylen)
{
struct ipv4_hdr *ih = (struct ipv4_hdr *) iphdr;
struct rte_tcp_hdr *tcph = (struct rte_tcp_hdr *)
((uint8_t *) iphdr + iplen);
tcph->sent_seq = htonl(ntohl(tcph->sent_seq) + paylen);
ih->packet_id = htons(ntohs(ih->packet_id) + 1);
uint16_t paylen) {
struct ipv4_hdr *ih = (struct ipv4_hdr *)iphdr;
struct rte_tcp_hdr *tcph = (struct rte_tcp_hdr *)((uint8_t *)iphdr + iplen);
tcph->sent_seq = htonl(ntohl(tcph->sent_seq) + paylen);
ih->packet_id = htons(ntohs(ih->packet_id) + 1);
}
} // namespace i40e
......@@ -27,290 +27,267 @@
#include <string>
class event {
public:
uint64_t ts;
public:
uint64_t ts;
event(uint64_t ts_)
: ts(ts_)
{
}
event(uint64_t ts_) : ts(ts_) {
}
virtual ~event() { }
virtual ~event() {
}
virtual void dump(std::ostream &out) = 0;
virtual void dump(std::ostream &out) = 0;
};
class EHostCall : public event {
public:
const std::string &fun;
public:
const std::string &fun;
EHostCall(uint64_t ts_, const std::string &fun_)
: event(ts_), fun(fun_)
{
}
EHostCall(uint64_t ts_, const std::string &fun_) : event(ts_), fun(fun_) {
}
virtual ~EHostCall() { }
virtual ~EHostCall() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": H.CALL " << fun << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": H.CALL " << fun << std::endl;
}
};
class EHostMsiX : public event {
public:
uint16_t vec;
public:
uint16_t vec;
EHostMsiX(uint64_t ts_, uint16_t vec_)
: event(ts_), vec(vec_)
{
}
EHostMsiX(uint64_t ts_, uint16_t vec_) : event(ts_), vec(vec_) {
}
virtual ~EHostMsiX() { }
virtual ~EHostMsiX() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": H.MSIX " << vec << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": H.MSIX " << vec << std::endl;
}
};
class EHostDmaR : public event {
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostDmaR(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_)
{
}
virtual ~EHostDmaR() { }
virtual void dump(std::ostream &out)
{
out << ts << ": H.DMAR id=" << id << " addr=" << addr << " size=" <<
size << std::endl;
}
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostDmaR(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_) {
}
virtual ~EHostDmaR() {
}
virtual void dump(std::ostream &out) {
out << ts << ": H.DMAR id=" << id << " addr=" << addr << " size=" << size
<< std::endl;
}
};
class EHostDmaW : public event {
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostDmaW(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_)
{
}
virtual ~EHostDmaW() { }
virtual void dump(std::ostream &out)
{
out << ts << ": H.DMAW id=" << id << " addr=" << addr << " size=" <<
size << std::endl;
}
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostDmaW(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_) {
}
virtual ~EHostDmaW() {
}
virtual void dump(std::ostream &out) {
out << ts << ": H.DMAW id=" << id << " addr=" << addr << " size=" << size
<< std::endl;
}
};
class EHostDmaC : public event {
public:
uint64_t id;
public:
uint64_t id;
EHostDmaC(uint64_t ts_, uint64_t id_)
: event(ts_), id(id_)
{
}
EHostDmaC(uint64_t ts_, uint64_t id_) : event(ts_), id(id_) {
}
virtual ~EHostDmaC() { }
virtual ~EHostDmaC() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": H.DMAC id=" << id << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": H.DMAC id=" << id << std::endl;
}
};
class EHostMmioR : public event {
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostMmioR(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_)
{
}
virtual ~EHostMmioR() { }
virtual void dump(std::ostream &out)
{
out << ts << ": H.MMIOR id=" << id << " addr=" << addr << " size=" <<
size << std::endl;
}
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostMmioR(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_) {
}
virtual ~EHostMmioR() {
}
virtual void dump(std::ostream &out) {
out << ts << ": H.MMIOR id=" << id << " addr=" << addr << " size=" << size
<< std::endl;
}
};
class EHostMmioW : public event {
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostMmioW(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_)
{
}
virtual ~EHostMmioW() { }
virtual void dump(std::ostream &out)
{
out << ts << ": H.MMIOW id=" << id << " addr=" << addr << " size=" <<
size << std::endl;
}
public:
uint64_t id;
uint64_t addr;
uint64_t size;
EHostMmioW(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_) {
}
virtual ~EHostMmioW() {
}
virtual void dump(std::ostream &out) {
out << ts << ": H.MMIOW id=" << id << " addr=" << addr << " size=" << size
<< std::endl;
}
};
class EHostMmioC : public event {
public:
uint64_t id;
public:
uint64_t id;
EHostMmioC(uint64_t ts_, uint64_t id_)
: event(ts_), id(id_)
{
}
EHostMmioC(uint64_t ts_, uint64_t id_) : event(ts_), id(id_) {
}
virtual ~EHostMmioC() { }
virtual ~EHostMmioC() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": H.MMIOC id=" << id << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": H.MMIOC id=" << id << std::endl;
}
};
class e_nic_msix : public event {
public:
uint16_t vec;
public:
uint16_t vec;
e_nic_msix(uint64_t ts_, uint16_t vec_)
: event(ts_), vec(vec_)
{
}
e_nic_msix(uint64_t ts_, uint16_t vec_) : event(ts_), vec(vec_) {
}
virtual ~e_nic_msix() { }
virtual ~e_nic_msix() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": N.MSIX " << vec << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": N.MSIX " << vec << std::endl;
}
};
class e_nic_dma_i : public event {
public:
uint64_t id;
uint64_t addr;
uint64_t size;
e_nic_dma_i(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_)
{
}
virtual ~e_nic_dma_i() { }
virtual void dump(std::ostream &out)
{
out << ts << ": N.DMAI id=" << id << " addr=" << addr << " size=" <<
size << std::endl;
}
public:
uint64_t id;
uint64_t addr;
uint64_t size;
e_nic_dma_i(uint64_t ts_, uint64_t id_, uint64_t addr_, uint64_t size_)
: event(ts_), id(id_), addr(addr_), size(size_) {
}
virtual ~e_nic_dma_i() {
}
virtual void dump(std::ostream &out) {
out << ts << ": N.DMAI id=" << id << " addr=" << addr << " size=" << size
<< std::endl;
}
};
class e_nic_dma_c : public event {
public:
uint64_t id;
public:
uint64_t id;
e_nic_dma_c(uint64_t ts_, uint64_t id_)
: event(ts_), id(id_) {
}
e_nic_dma_c(uint64_t ts_, uint64_t id_) : event(ts_), id(id_) {
}
virtual ~e_nic_dma_c() { }
virtual ~e_nic_dma_c() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": N.DMAC id=" << id << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": N.DMAC id=" << id << std::endl;
}
};
class e_nic_mmio_r : public event {
public:
uint64_t addr;
uint64_t size;
uint64_t val;
e_nic_mmio_r(uint64_t ts_, uint64_t addr_, uint64_t size_, uint64_t val_)
: event(ts_), addr(addr_), size(size_), val(val_)
{
}
virtual ~e_nic_mmio_r() { }
virtual void dump(std::ostream &out)
{
out << ts << ": N.MMIOR addr=" << addr << " size=" << size << " val=" <<
val << std::endl;
}
public:
uint64_t addr;
uint64_t size;
uint64_t val;
e_nic_mmio_r(uint64_t ts_, uint64_t addr_, uint64_t size_, uint64_t val_)
: event(ts_), addr(addr_), size(size_), val(val_) {
}
virtual ~e_nic_mmio_r() {
}
virtual void dump(std::ostream &out) {
out << ts << ": N.MMIOR addr=" << addr << " size=" << size << " val=" << val
<< std::endl;
}
};
class e_nic_mmio_w : public event {
public:
uint64_t addr;
uint64_t size;
uint64_t val;
e_nic_mmio_w(uint64_t ts_, uint64_t addr_, uint64_t size_, uint64_t val_)
: event(ts_), addr(addr_), size(size_), val(val_)
{
}
virtual ~e_nic_mmio_w() { }
virtual void dump(std::ostream &out)
{
out << ts << ": N.MMIOW addr=" << addr << " size=" << size << " val=" <<
val << std::endl;
}
public:
uint64_t addr;
uint64_t size;
uint64_t val;
e_nic_mmio_w(uint64_t ts_, uint64_t addr_, uint64_t size_, uint64_t val_)
: event(ts_), addr(addr_), size(size_), val(val_) {
}
virtual ~e_nic_mmio_w() {
}
virtual void dump(std::ostream &out) {
out << ts << ": N.MMIOW addr=" << addr << " size=" << size << " val=" << val
<< std::endl;
}
};
class e_nic_tx : public event {
public:
uint16_t len;
public:
uint16_t len;
e_nic_tx(uint64_t ts_, uint16_t len_)
: event(ts_), len(len_)
{
}
e_nic_tx(uint64_t ts_, uint16_t len_) : event(ts_), len(len_) {
}
virtual ~e_nic_tx() { }
virtual ~e_nic_tx() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": N.TX " << len << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": N.TX " << len << std::endl;
}
};
class e_nic_rx : public event {
public:
uint16_t len;
public:
uint16_t len;
e_nic_rx(uint64_t ts_, uint16_t len_)
: event(ts_), len(len_)
{
}
e_nic_rx(uint64_t ts_, uint16_t len_) : event(ts_), len(len_) {
}
virtual ~e_nic_rx() { }
virtual ~e_nic_rx() {
}
virtual void dump(std::ostream &out)
{
out << ts << ": N.RX " << len << std::endl;
}
virtual void dump(std::ostream &out) {
out << ts << ": N.RX " << len << std::endl;
}
};
......@@ -30,160 +30,154 @@
namespace bio = boost::iostreams;
gem5_parser::gem5_parser(sym_map &syms_)
: syms(syms_)
{
gem5_parser::gem5_parser(sym_map &syms_) : syms(syms_) {
}
gem5_parser::~gem5_parser()
{
gem5_parser::~gem5_parser() {
}
void gem5_parser::process_msg(uint64_t ts, char *comp_name,
size_t comp_name_len, char *msg, size_t msg_len)
{
parser p(msg, msg_len, 0);
/*if (ts < ts_first)
return;*/
if (comp_name_len == 18 && !memcmp(comp_name, "system.switch_cpus", 18)) {
// cpu_lines++;
if (!p.consume_str("T0 : 0x"))
return;
uint64_t addr;
if (!p.consume_hex(addr) || p.consume_char('.'))
return;
if (const std::string *s = syms.lookup(addr)) {
cur_event = new EHostCall(ts, *s);
}
} else if (comp_name_len == 18 &&
!memcmp(comp_name, "system.pc.ethernet", 18)) {
// eth_lines++;
/*std::cout.write(msg, msg_len);
std::cout << std::endl;*/
if (!p.consume_str("cosim: "))
return;
uint64_t id = 0;
uint64_t addr = 0;
uint64_t size = 0;
if (p.consume_str("received ")) {
if (p.consume_str("MSI-X intr vec ") && p.consume_dec(id)) {
cur_event = new EHostMsiX(ts, id);
} else if (p.consume_str("DMA read id ") && p.consume_dec(id) &&
p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size)) {
// cosim: received DMA read id 94113551511792 addr 23697ad60
// size 20
cur_event = new EHostDmaR(ts, id, addr, size);
} else if (p.consume_str("DMA write id ") && p.consume_dec(id) &&
p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size)) {
// cosim: received DMA write id 94113551528032 addr 236972000
// size 4
cur_event = new EHostDmaW(ts, id, addr, size);
} else if (p.consume_str("read completion id ") &&
p.consume_dec(id)) {
// cosim: received read completion id 94583743418112
cur_event = new EHostMmioC(ts, id);
} else if (p.consume_str("write completion id ") &&
p.consume_dec(id)) {
// cosim: received write completion id 94583743418736
cur_event = new EHostMmioC(ts, id);
}
} else if (p.consume_str("sending ")) {
if (p.consume_str("read addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size) &&
p.consume_str(" id ") && p.consume_dec(id)) {
// cosim: sending read addr c012a500 size 4 id 94583743418112
cur_event = new EHostMmioR(ts, id, addr, size);
} else if (p.consume_str("write addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size) &&
p.consume_str(" id ") && p.consume_dec(id)) {
// cosim: sending write addr c0108000 size 4 id 94584005188256
cur_event = new EHostMmioW(ts, id, addr, size);
}
} else if (p.consume_str("completed DMA id ") && p.consume_dec(id)) {
cur_event = new EHostDmaC(ts, id);
}
}
/*if (!cur_event) {
std::cout.write(msg, msg_len);
std::cout << std::endl;
}*/
}
size_t comp_name_len, char *msg, size_t msg_len) {
parser p(msg, msg_len, 0);
/*if (ts < ts_first)
return;*/
void gem5_parser::process_line(char *line, size_t line_len)
{
size_t pos = 0;
if (comp_name_len == 18 && !memcmp(comp_name, "system.switch_cpus", 18)) {
// cpu_lines++;
if (!p.consume_str("T0 : 0x"))
return;
size_t line_start = pos;
size_t comp_name_start = 0;
size_t comp_name_len = 0;
bool valid = true;
uint64_t addr;
if (!p.consume_hex(addr) || p.consume_char('.'))
return;
// eat spaces
for (; pos < line_len && line[pos] == ' '; pos++) {}
// parse ts
uint64_t ts = 0;
size_t ts_len = 0;
for (; pos < line_len && line[pos] >= '0' && line[pos] <= '9'; pos++) {
ts = ts * 10 + line[pos] - '0';
ts_len++;
if (const std::string *s = syms.lookup(addr)) {
cur_event = new EHostCall(ts, *s);
}
if (ts_len == 0) {
valid = false;
goto out;
} else if (comp_name_len == 18 &&
!memcmp(comp_name, "system.pc.ethernet", 18)) {
// eth_lines++;
/*std::cout.write(msg, msg_len);
std::cout << std::endl;*/
if (!p.consume_str("cosim: "))
return;
uint64_t id = 0;
uint64_t addr = 0;
uint64_t size = 0;
if (p.consume_str("received ")) {
if (p.consume_str("MSI-X intr vec ") && p.consume_dec(id)) {
cur_event = new EHostMsiX(ts, id);
} else if (p.consume_str("DMA read id ") && p.consume_dec(id) &&
p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size)) {
// cosim: received DMA read id 94113551511792 addr 23697ad60
// size 20
cur_event = new EHostDmaR(ts, id, addr, size);
} else if (p.consume_str("DMA write id ") && p.consume_dec(id) &&
p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size)) {
// cosim: received DMA write id 94113551528032 addr 236972000
// size 4
cur_event = new EHostDmaW(ts, id, addr, size);
} else if (p.consume_str("read completion id ") && p.consume_dec(id)) {
// cosim: received read completion id 94583743418112
cur_event = new EHostMmioC(ts, id);
} else if (p.consume_str("write completion id ") && p.consume_dec(id)) {
// cosim: received write completion id 94583743418736
cur_event = new EHostMmioC(ts, id);
}
} else if (p.consume_str("sending ")) {
if (p.consume_str("read addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size) &&
p.consume_str(" id ") && p.consume_dec(id)) {
// cosim: sending read addr c012a500 size 4 id 94583743418112
cur_event = new EHostMmioR(ts, id, addr, size);
} else if (p.consume_str("write addr ") && p.consume_hex(addr) &&
p.consume_str(" size ") && p.consume_dec(size) &&
p.consume_str(" id ") && p.consume_dec(id)) {
// cosim: sending write addr c0108000 size 4 id 94584005188256
cur_event = new EHostMmioW(ts, id, addr, size);
}
} else if (p.consume_str("completed DMA id ") && p.consume_dec(id)) {
cur_event = new EHostDmaC(ts, id);
}
}
// skip colon
if (line[pos] != ':') {
valid = false;
goto out;
}
pos++;
/*if (!cur_event) {
std::cout.write(msg, msg_len);
std::cout << std::endl;
}*/
}
// skip space
if (line[pos] != ' ') {
valid = false;
goto out;
}
pos++;
comp_name_start = pos;
for (; pos < line_len && line[pos] != ' ' && line[pos] != '\n'; pos++,
comp_name_len++) {}
// skip space
if (line[pos] != ' ') {
valid = false;
goto out;
}
if (line[pos - 1] != ':') {
valid = false;
goto out;
}
comp_name_len--;
pos++;
void gem5_parser::process_line(char *line, size_t line_len) {
size_t pos = 0;
size_t line_start = pos;
size_t comp_name_start = 0;
size_t comp_name_len = 0;
bool valid = true;
// eat spaces
for (; pos < line_len && line[pos] == ' '; pos++) {
}
// parse ts
uint64_t ts = 0;
size_t ts_len = 0;
for (; pos < line_len && line[pos] >= '0' && line[pos] <= '9'; pos++) {
ts = ts * 10 + line[pos] - '0';
ts_len++;
}
if (ts_len == 0) {
valid = false;
goto out;
}
// skip colon
if (line[pos] != ':') {
valid = false;
goto out;
}
pos++;
// skip space
if (line[pos] != ' ') {
valid = false;
goto out;
}
pos++;
comp_name_start = pos;
for (; pos < line_len && line[pos] != ' ' && line[pos] != '\n';
pos++, comp_name_len++) {
}
// skip space
if (line[pos] != ' ') {
valid = false;
goto out;
}
if (line[pos - 1] != ':') {
valid = false;
goto out;
}
comp_name_len--;
pos++;
out:
size_t msg_start = pos;
size_t msg_len = line_len - msg_start;
line[line_len - 1] = 0;
if (valid) {
process_msg(ts, line + comp_name_start, comp_name_len, line + msg_start,
size_t msg_start = pos;
size_t msg_len = line_len - msg_start;
line[line_len - 1] = 0;
if (valid) {
process_msg(ts, line + comp_name_start, comp_name_len, line + msg_start,
msg_len);
} else {
std::cout << line + line_start << std::endl;
std::cout << pos << std::endl;
}
} else {
std::cout << line + line_start << std::endl;
std::cout << pos << std::endl;
}
return;
return;
}
......@@ -22,10 +22,10 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <boost/iostreams/filter/gzip.hpp>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <fstream>
#include <iostream>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include "trace/events.h"
#include "trace/parser.h"
......@@ -33,100 +33,91 @@
namespace bio = boost::iostreams;
log_parser::log_parser()
: inf(nullptr), gz_file(nullptr), gz_in(nullptr)
{
buf = new char[block_size];
log_parser::log_parser() : inf(nullptr), gz_file(nullptr), gz_in(nullptr) {
buf = new char[block_size];
}
log_parser::~log_parser()
{
if (inf)
delete inf;
if (gz_file) {
delete gz_in;
delete gz_file;
}
delete[] buf;
log_parser::~log_parser() {
if (inf)
delete inf;
if (gz_file) {
delete gz_in;
delete gz_file;
}
delete[] buf;
}
bool log_parser::next_block()
{
if (buf_pos == buf_len) {
buf_pos = 0;
} else {
memmove(buf, buf + buf_pos, buf_len - buf_pos);
buf_pos = buf_len - buf_pos;
}
bool log_parser::next_block() {
if (buf_pos == buf_len) {
buf_pos = 0;
} else {
memmove(buf, buf + buf_pos, buf_len - buf_pos);
buf_pos = buf_len - buf_pos;
}
inf->read(buf + buf_pos, block_size - buf_pos);
size_t newlen = inf->gcount();
inf->read(buf + buf_pos, block_size - buf_pos);
size_t newlen = inf->gcount();
buf_len = buf_pos + newlen;
buf_pos = 0;
buf_len = buf_pos + newlen;
buf_pos = 0;
return newlen != 0;
return newlen != 0;
}
void log_parser::open(const char *path)
{
inf = new std::ifstream(path, std::ios_base::in);
void log_parser::open(const char *path) {
inf = new std::ifstream(path, std::ios_base::in);
}
void log_parser::open_gz(const char *path)
{
gz_file = new std::ifstream(path, std::ios_base::in |
std::ios_base::binary);
gz_in = new bio::filtering_streambuf<bio::input>();
void log_parser::open_gz(const char *path) {
gz_file = new std::ifstream(path, std::ios_base::in | std::ios_base::binary);
gz_in = new bio::filtering_streambuf<bio::input>();
gz_in->push(bio::gzip_decompressor());
gz_in->push(*gz_file);
gz_in->push(bio::gzip_decompressor());
gz_in->push(*gz_file);
inf = new std::istream(gz_in);
inf = new std::istream(gz_in);
}
size_t log_parser::try_line()
{
size_t pos = buf_pos;
size_t line_len = 0;
size_t log_parser::try_line() {
size_t pos = buf_pos;
size_t line_len = 0;
for (; pos < buf_len && buf[pos] != '\n'; pos++, line_len++) {}
if (pos >= buf_len) {
// line is incomplete
return 0;
}
for (; pos < buf_len && buf[pos] != '\n'; pos++, line_len++) {
}
if (pos >= buf_len) {
// line is incomplete
return 0;
}
process_line(buf + buf_pos, line_len);
process_line(buf + buf_pos, line_len);
return pos + 1;
return pos + 1;
}
bool log_parser::next_event() {
cur_event = nullptr;
bool log_parser::next_event()
{
cur_event = nullptr;
if (buf_len == 0 && !next_block()) {
std::cerr << "escape 0" << std::endl;
return false;
}
do {
size_t newpos = try_line();
if (!newpos) {
if (!next_block()) {
std::cerr << "escape 1" << std::endl;
return false;
}
if (buf_len == 0 && !next_block()) {
std::cerr << "escape 0" << std::endl;
newpos = try_line();
if (!newpos) {
std::cerr << "escape 2" << std::endl;
return false;
}
}
buf_pos = newpos;
} while (!cur_event);
do {
size_t newpos = try_line();
if (!newpos) {
if (!next_block()) {
std::cerr << "escape 1" << std::endl;
return false;
}
newpos = try_line();
if (!newpos) {
std::cerr << "escape 2" << std::endl;
return false;
}
}
buf_pos = newpos;
} while (!cur_event);
return true;
return true;
}
......@@ -30,77 +30,59 @@
namespace bio = boost::iostreams;
nicbm_parser::~nicbm_parser()
{
nicbm_parser::~nicbm_parser() {
}
void nicbm_parser::process_line(char *line, size_t line_len)
{
parser p(line, line_len, 0);
void nicbm_parser::process_line(char *line, size_t line_len) {
parser p(line, line_len, 0);
uint64_t ts;
if (!p.consume_dec(ts))
return;
uint64_t ts;
if (!p.consume_dec(ts))
return;
if (!p.consume_str(" nicbm: "))
return;
if (!p.consume_str(" nicbm: "))
return;
uint64_t id, addr, len, val;
if (p.consume_str("read(off=0x")) {
if (p.consume_hex(addr) &&
p.consume_str(", len=") &&
p.consume_dec(len) &&
p.consume_str(", val=0x") &&
p.consume_hex(val))
{
cur_event = new e_nic_mmio_r(ts, addr, len, val);
}
} else if (p.consume_str("write(off=0x")) {
if (p.consume_hex(addr) &&
p.consume_str(", len=") &&
p.consume_dec(len) &&
p.consume_str(", val=0x") &&
p.consume_hex(val))
{
cur_event = new e_nic_mmio_w(ts, addr, len, val);
}
} else if (p.consume_str("issuing dma op 0x")) {
if (p.consume_hex(id) &&
p.consume_str(" addr ") &&
p.consume_hex(addr) &&
p.consume_str(" len ") &&
p.consume_hex(len))
{
cur_event = new e_nic_dma_i(ts, id, addr, len);
}
} else if (p.consume_str("completed dma read op 0x") ||
p.consume_str("completed dma write op 0x")) {
if (p.consume_hex(id) &&
p.consume_str(" addr ") &&
p.consume_hex(addr) &&
p.consume_str(" len ") &&
p.consume_hex(len))
{
cur_event = new e_nic_dma_c(ts, id);
}
} else if (p.consume_str("issue MSI-X interrupt vec ")) {
if (p.consume_dec(id)) {
cur_event = new e_nic_msix(ts, id);
}
} else if (p.consume_str("eth tx: len ")) {
if (p.consume_dec(len)) {
cur_event = new e_nic_tx(ts, len);
}
} else if (p.consume_str("eth rx: port 0 len ")) {
if (p.consume_dec(len)) {
cur_event = new e_nic_rx(ts, len);
}
#if 1
uint64_t id, addr, len, val;
if (p.consume_str("read(off=0x")) {
if (p.consume_hex(addr) && p.consume_str(", len=") && p.consume_dec(len) &&
p.consume_str(", val=0x") && p.consume_hex(val)) {
cur_event = new e_nic_mmio_r(ts, addr, len, val);
}
#else
} else {
std::cerr.write(line, line_len);
std::cerr << std::endl;
} else if (p.consume_str("write(off=0x")) {
if (p.consume_hex(addr) && p.consume_str(", len=") && p.consume_dec(len) &&
p.consume_str(", val=0x") && p.consume_hex(val)) {
cur_event = new e_nic_mmio_w(ts, addr, len, val);
}
} else if (p.consume_str("issuing dma op 0x")) {
if (p.consume_hex(id) && p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" len ") && p.consume_hex(len)) {
cur_event = new e_nic_dma_i(ts, id, addr, len);
}
} else if (p.consume_str("completed dma read op 0x") ||
p.consume_str("completed dma write op 0x")) {
if (p.consume_hex(id) && p.consume_str(" addr ") && p.consume_hex(addr) &&
p.consume_str(" len ") && p.consume_hex(len)) {
cur_event = new e_nic_dma_c(ts, id);
}
} else if (p.consume_str("issue MSI-X interrupt vec ")) {
if (p.consume_dec(id)) {
cur_event = new e_nic_msix(ts, id);
}
} else if (p.consume_str("eth tx: len ")) {
if (p.consume_dec(len)) {
cur_event = new e_nic_tx(ts, len);
}
} else if (p.consume_str("eth rx: port 0 len ")) {
if (p.consume_dec(len)) {
cur_event = new e_nic_rx(ts, len);
}
#if 1
}
#else
} else {
std::cerr.write(line, line_len);
std::cerr << std::endl;
}
#endif
}
......@@ -29,94 +29,89 @@
#include <string>
class parser {
protected:
const char *buf;
size_t buf_len;
size_t pos;
public:
parser(const char *buf_, size_t buf_len_, size_t start_pos = 0)
: buf(buf_), buf_len(buf_len_), pos(start_pos)
{
protected:
const char *buf;
size_t buf_len;
size_t pos;
public:
parser(const char *buf_, size_t buf_len_, size_t start_pos = 0)
: buf(buf_), buf_len(buf_len_), pos(start_pos) {
}
inline size_t trim_spaces() {
size_t cnt = 0;
for (; pos < buf_len && buf[pos] == ' '; pos++, cnt++) {
}
return cnt;
}
inline size_t trim_spaces()
{
size_t cnt = 0;
for (; pos < buf_len && buf[pos] == ' '; pos++, cnt++) {}
return cnt;
inline bool consume_char(char c) {
if (pos == buf_len || buf[pos] != c) {
return false;
}
inline bool consume_char(char c)
{
if (pos == buf_len || buf[pos] != c) {
return false;
}
pos++;
return true;
}
inline bool consume_hex(uint64_t &val)
{
size_t val_len = 0;
val = 0;
for (; pos < buf_len; pos++) {
char d = buf[pos];
bool is_d = d >= '0' && d <= '9';
bool is_x = d >= 'a' && d <= 'f';
if (!is_d && !is_x)
break;
val <<= 4;
if (is_d)
val |= d - '0';
else
val |= d - 'a' + 10;
val_len++;
}
return val_len > 0;
pos++;
return true;
}
inline bool consume_hex(uint64_t &val) {
size_t val_len = 0;
val = 0;
for (; pos < buf_len; pos++) {
char d = buf[pos];
bool is_d = d >= '0' && d <= '9';
bool is_x = d >= 'a' && d <= 'f';
if (!is_d && !is_x)
break;
val <<= 4;
if (is_d)
val |= d - '0';
else
val |= d - 'a' + 10;
val_len++;
}
inline bool consume_dec(uint64_t &val)
{
size_t val_len = 0;
val = 0;
for (; pos < buf_len; pos++) {
char d = buf[pos];
if (d < '0' || d > '9')
break;
return val_len > 0;
}
val = val * 10 + (d - '0');
val_len++;
}
inline bool consume_dec(uint64_t &val) {
size_t val_len = 0;
val = 0;
for (; pos < buf_len; pos++) {
char d = buf[pos];
if (d < '0' || d > '9')
break;
return val_len > 0;
val = val * 10 + (d - '0');
val_len++;
}
inline bool consume_str(const char *str)
{
size_t str_len = strlen(str);
if (pos + str_len > buf_len || memcmp(buf + pos, str, str_len)) {
return false;
}
return val_len > 0;
}
pos += str_len;
return true;
inline bool consume_str(const char *str) {
size_t str_len = strlen(str);
if (pos + str_len > buf_len || memcmp(buf + pos, str, str_len)) {
return false;
}
inline bool extract_until(char end_c, std::string &str)
{
size_t end = pos;
for (; end < buf_len && buf[end] != end_c; end++) {}
pos += str_len;
return true;
}
if (end >= buf_len)
return false;
str.assign(buf + pos, end - pos);
pos = end + 1;
return true;
inline bool extract_until(char end_c, std::string &str) {
size_t end = pos;
for (; end < buf_len && buf[end] != end_c; end++) {
}
if (end >= buf_len)
return false;
str.assign(buf + pos, end - pos);
pos = end + 1;
return true;
}
};
......@@ -22,89 +22,88 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "trace/process.h"
#include <iostream>
#include "trace/events.h"
#include "trace/parser.h"
#include "trace/process.h"
struct log_parser_cmp {
bool operator() (const log_parser *l, const log_parser *r) const {
return l->cur_event->ts < r->cur_event->ts;
}
bool operator()(const log_parser *l, const log_parser *r) const {
return l->cur_event->ts < r->cur_event->ts;
}
};
int main(int argc, char *argv[])
{
sym_map syms;
syms.add_filter("entry_SYSCALL_64");
syms.add_filter("__do_sys_gettimeofday");
syms.add_filter("__sys_sendto");
syms.add_filter("i40e_lan_xmit_frame");
syms.add_filter("syscall_return_via_sysret");
syms.add_filter("__sys_recvfrom");
syms.add_filter("deactivate_task");
syms.add_filter("interrupt_entry");
syms.add_filter("i40e_msix_clean_rings");
syms.add_filter("napi_schedule_prep");
syms.add_filter("__do_softirq");
syms.add_filter("trace_napi_poll");
syms.add_filter("net_rx_action");
syms.add_filter("i40e_napi_poll");
syms.add_filter("activate_task");
syms.add_filter("copyout");
int main(int argc, char *argv[]) {
sym_map syms;
syms.add_filter("entry_SYSCALL_64");
syms.add_filter("__do_sys_gettimeofday");
syms.add_filter("__sys_sendto");
syms.add_filter("i40e_lan_xmit_frame");
syms.add_filter("syscall_return_via_sysret");
syms.add_filter("__sys_recvfrom");
syms.add_filter("deactivate_task");
syms.add_filter("interrupt_entry");
syms.add_filter("i40e_msix_clean_rings");
syms.add_filter("napi_schedule_prep");
syms.add_filter("__do_softirq");
syms.add_filter("trace_napi_poll");
syms.add_filter("net_rx_action");
syms.add_filter("i40e_napi_poll");
syms.add_filter("activate_task");
syms.add_filter("copyout");
syms.load_file("linux.dump", 0);
syms.load_file("i40e.dump", 0xffffffffa0000000ULL);
std::cerr << "map loaded" << std::endl;
syms.load_file("linux.dump", 0);
syms.load_file("i40e.dump", 0xffffffffa0000000ULL);
std::cerr << "map loaded" << std::endl;
std::set<log_parser *> all_parsers;
gem5_parser ch(syms);
gem5_parser sh(syms);
nicbm_parser cn;
nicbm_parser sn;
ch.open(argv[1]);
cn.open(argv[2]);
sh.open(argv[3]);
sn.open(argv[4]);
ch.label = cn.label = "C";
sh.label = sn.label = "S";
all_parsers.insert(&ch);
all_parsers.insert(&cn);
all_parsers.insert(&sh);
all_parsers.insert(&sn);
std::set<log_parser *> all_parsers;
gem5_parser ch(syms);
gem5_parser sh(syms);
nicbm_parser cn;
nicbm_parser sn;
ch.open(argv[1]);
cn.open(argv[2]);
sh.open(argv[3]);
sn.open(argv[4]);
ch.label = cn.label = "C";
sh.label = sn.label = "S";
all_parsers.insert(&ch);
all_parsers.insert(&cn);
all_parsers.insert(&sh);
all_parsers.insert(&sn);
std::set<log_parser *, log_parser_cmp> active_parsers;
std::set<log_parser *, log_parser_cmp> active_parsers;
for (auto p : all_parsers) {
if (p->next_event() && p->cur_event)
active_parsers.insert(p);
}
for (auto p : all_parsers) {
if (p->next_event() && p->cur_event)
active_parsers.insert(p);
}
uint64_t ts_off = 0;
while (!active_parsers.empty()) {
auto i = active_parsers.begin();
log_parser *p = *i;
active_parsers.erase(i);
uint64_t ts_off = 0;
while (!active_parsers.empty()) {
auto i = active_parsers.begin();
log_parser *p = *i;
active_parsers.erase(i);
EHostCall *hc;
event *ev = p->cur_event;
if (p == &ch && (hc = dynamic_cast<EHostCall *>(ev)) &&
hc->fun == "__sys_sendto")
{
std::cout << "---------- REQ START:" << ev->ts << std::endl;
ts_off = ev->ts;
}
EHostCall *hc;
event *ev = p->cur_event;
if (p == &ch && (hc = dynamic_cast<EHostCall *>(ev)) &&
hc->fun == "__sys_sendto") {
std::cout << "---------- REQ START:" << ev->ts << std::endl;
ts_off = ev->ts;
}
std::cout << p->label << " ";
std::cout << p->label << " ";
ev->ts -= ts_off;
ev->ts /= 1000;
ev->dump(std::cout);
ev->ts -= ts_off;
ev->ts /= 1000;
ev->dump(std::cout);
delete ev;
delete ev;
if (p->next_event() && p->cur_event)
active_parsers.insert(p);
}
if (p->next_event() && p->cur_event)
active_parsers.insert(p);
}
}
......@@ -24,83 +24,82 @@
#pragma once
#include <boost/iostreams/filtering_streambuf.hpp>
#include <map>
#include <set>
#include <string>
#include <boost/iostreams/filtering_streambuf.hpp>
#include "trace/events.h"
class sym_map {
protected:
bool filter_en;
bool insmap_en;
std::set<std::string> filter;
protected:
bool filter_en;
bool insmap_en;
std::set<std::string> filter;
public:
std::map<uint64_t, std::string> map;
std::map<uint64_t, std::string> map_ins;
public:
std::map<uint64_t, std::string> map;
std::map<uint64_t, std::string> map_ins;
sym_map();
sym_map();
void add_filter(const std::string &sym);
void load_file(const char *path, uint64_t offset = 0);
void add_filter(const std::string &sym);
void load_file(const char *path, uint64_t offset = 0);
inline const std::string *lookup(uint64_t addr)
{
auto it = map.find(addr);
if (it == map.end())
return nullptr;
inline const std::string *lookup(uint64_t addr) {
auto it = map.find(addr);
if (it == map.end())
return nullptr;
return &it->second;
}
return &it->second;
}
};
class log_parser {
protected:
std::istream *inf;
protected:
std::istream *inf;
std::ifstream *gz_file;
boost::iostreams::filtering_streambuf<boost::iostreams::input> *gz_in;
std::ifstream *gz_file;
boost::iostreams::filtering_streambuf<boost::iostreams::input> *gz_in;
static const size_t block_size = 16 * 1024 * 1024;
char *buf;
size_t buf_len;
size_t buf_pos;
static const size_t block_size = 16 * 1024 * 1024;
char *buf;
size_t buf_len;
size_t buf_pos;
bool next_block();
size_t try_line();
virtual void process_line(char *line, size_t len) = 0;
bool next_block();
size_t try_line();
virtual void process_line(char *line, size_t len) = 0;
public:
const char *label;
event *cur_event;
public:
const char *label;
event *cur_event;
log_parser();
virtual ~log_parser();
void open(const char *path);
void open_gz(const char *path);
log_parser();
virtual ~log_parser();
void open(const char *path);
void open_gz(const char *path);
bool next_event();
bool next_event();
};
class gem5_parser : public log_parser {
protected:
sym_map &syms;
protected:
sym_map &syms;
virtual void process_line(char *line, size_t len);
void process_msg(uint64_t ts, char *comp_name, size_t comp_name_len,
char *msg, size_t msg_len);
virtual void process_line(char *line, size_t len);
void process_msg(uint64_t ts, char *comp_name, size_t comp_name_len,
char *msg, size_t msg_len);
public:
gem5_parser(sym_map &syms_);
virtual ~gem5_parser();
public:
gem5_parser(sym_map &syms_);
virtual ~gem5_parser();
};
class nicbm_parser : public log_parser {
protected:
virtual void process_line(char *line, size_t len);
protected:
virtual void process_line(char *line, size_t len);
public:
virtual ~nicbm_parser();
public:
virtual ~nicbm_parser();
};
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment