Commit 7e4f44db authored by Antoine Kaufmann's avatar Antoine Kaufmann
Browse files

i40e: checkpoint descriptor queue rework

parent e3a2f334
......@@ -17,9 +17,43 @@ queue_admin_tx::queue_admin_tx(i40e_bm &dev_, uint64_t &reg_base_,
reg_len(reg_len_)
{
desc_len = 32;
ctxs_init();
}
void queue_admin_tx::desc_compl_prepare(struct i40e_aq_desc *d, uint16_t retval,
queue_base::desc_ctx &queue_admin_tx::desc_ctx_create()
{
return *new admin_desc_ctx(*this, dev);
}
void queue_admin_tx::reg_updated()
{
base = reg_base;
len = (reg_len & I40E_GL_ATQLEN_ATQLEN_MASK) >> I40E_GL_ATQLEN_ATQLEN_SHIFT;
if (!enabled && (reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
std::cerr << "enable atq base=" << base << " len=" << len << std::endl;
enabled = true;
} else if (enabled && !(reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
std::cerr << "disable atq" << std::endl;
enabled = false;
}
queue_base::reg_updated();
}
queue_admin_tx::admin_desc_ctx::admin_desc_ctx(queue_admin_tx &queue_,
i40e_bm &dev_)
: i40e::queue_base::desc_ctx(queue_), aq(queue_), dev(dev_)
{
d = reinterpret_cast <struct i40e_aq_desc *> (desc);
}
void queue_admin_tx::admin_desc_ctx::data_written(uint64_t addr, size_t len)
{
processed();
}
void queue_admin_tx::admin_desc_ctx::desc_compl_prepare(uint16_t retval,
uint16_t extra_flags)
{
d->flags &= ~0x1ff;
......@@ -29,16 +63,15 @@ void queue_admin_tx::desc_compl_prepare(struct i40e_aq_desc *d, uint16_t retval,
d->retval = retval;
}
void queue_admin_tx::desc_complete(struct i40e_aq_desc *d, uint32_t idx,
uint16_t retval, uint16_t extra_flags)
void queue_admin_tx::admin_desc_ctx::desc_complete(uint16_t retval,
uint16_t extra_flags)
{
desc_compl_prepare(d, extra_flags, retval);
desc_writeback(d, idx);
desc_compl_prepare(retval, extra_flags);
processed();
}
void queue_admin_tx::desc_complete_indir(struct i40e_aq_desc *d, uint32_t idx,
uint16_t retval, const void *data, size_t len, uint16_t extra_flags,
bool ignore_datalen)
void queue_admin_tx::admin_desc_ctx::desc_complete_indir(uint16_t retval,
const void *data, size_t len, uint16_t extra_flags, bool ignore_datalen)
{
if (!ignore_datalen && len > d->datalen) {
std::cerr << "queue_admin_tx::desc_complete_indir: data too long ("
......@@ -47,19 +80,29 @@ void queue_admin_tx::desc_complete_indir(struct i40e_aq_desc *d, uint32_t idx,
}
d->datalen = len;
desc_compl_prepare(d, extra_flags, retval);
desc_compl_prepare(retval, extra_flags);
uint64_t addr = d->params.external.addr_low |
(((uint64_t) d->params.external.addr_high) << 32);
desc_writeback_indirect(d, idx, addr, data, len);
data_write(addr, len, data);
}
void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
void queue_admin_tx::admin_desc_ctx::prepare()
{
struct i40e_aq_desc *d = reinterpret_cast<struct i40e_aq_desc *>(desc);
if ((d->flags & I40E_AQ_FLAG_RD)) {
uint64_t addr = d->params.external.addr_low |
(((uint64_t) d->params.external.addr_high) << 32);
std::cerr << " desc with buffer opc=" << d->opcode << " addr=" << addr
<< std::endl;
data_fetch(addr, d->datalen);
} else {
prepared();
}
}
std::cerr << "descriptor " << idx << " fetched" << std::endl;
void queue_admin_tx::admin_desc_ctx::process()
{
std::cerr << "descriptor " << index << " fetched" << std::endl;
if (d->opcode == i40e_aqc_opc_get_version) {
std::cerr << " get version" << std::endl;
......@@ -72,7 +115,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
gv->api_major = I40E_FW_API_VERSION_MAJOR;
gv->api_minor = I40E_FW_API_VERSION_MINOR_X710;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_request_resource) {
std::cerr << " request resource" << std::endl;
struct i40e_aqc_request_resource *rr =
......@@ -81,7 +124,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
rr->timeout = 180000;
std::cerr << " res_id=" << rr->resource_id << std::endl;
std::cerr << " res_nu=" << rr->resource_number << std::endl;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_release_resource) {
std::cerr << " release resource" << std::endl;
struct i40e_aqc_request_resource *rr =
......@@ -89,11 +132,11 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
d->params.raw);
std::cerr << " res_id=" << rr->resource_id << std::endl;
std::cerr << " res_nu=" << rr->resource_number << std::endl;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_clear_pxe_mode) {
std::cerr << " clear PXE mode" << std::endl;
dev.regs.gllan_rctl_0 &= ~I40E_GLLAN_RCTL_0_PXE_MODE_MASK;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_list_func_capabilities ||
d->opcode == i40e_aqc_opc_list_dev_capabilities)
{
......@@ -116,16 +159,16 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
std::cerr << " data fits" << std::endl;
// data fits within the buffer
lc->count = num_caps;
desc_complete_indir(d, idx, 0, caps, sizeof(caps));
desc_complete_indir(0, caps, sizeof(caps));
} else {
std::cerr << " data doesn't fit" << std::endl;
// data does not fit
d->datalen = sizeof(caps);
desc_complete(d, idx, I40E_AQ_RC_ENOMEM);
desc_complete(I40E_AQ_RC_ENOMEM);
}
} else if (d->opcode == i40e_aqc_opc_lldp_stop) {
std::cerr << " lldp stop" << std::endl;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_mac_address_read) {
std::cerr << " read mac" << std::endl;
struct i40e_aqc_mac_address_read *ar =
......@@ -139,7 +182,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
memcpy(ard.port_mac, &mac, 6);
ar->command_flags = I40E_AQC_LAN_ADDR_VALID | I40E_AQC_PORT_ADDR_VALID;
desc_complete_indir(d, idx, 0, &ard, sizeof(ard));
desc_complete_indir(0, &ard, sizeof(ard));
} else if (d->opcode == i40e_aqc_opc_get_phy_abilities) {
std::cerr << " get phy abilities" << std::endl;
struct i40e_aq_get_phy_abilities_resp par;
......@@ -154,7 +197,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
d->params.external.param0 = 0;
d->params.external.param1 = 0;
desc_complete_indir(d, idx, 0, &par, sizeof(par), 0, true);
desc_complete_indir(0, &par, sizeof(par), 0, true);
} else if (d->opcode == i40e_aqc_opc_get_link_status) {
std::cerr << " link status" << std::endl;
struct i40e_aqc_get_link_status *gls =
......@@ -174,7 +217,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
gls->max_frame_size = dev.MAX_MTU;
gls->config = I40E_AQ_CONFIG_CRC_ENA;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_switch_config) {
std::cerr << " get switch config" << std::endl;
struct i40e_aqc_switch_seid *sw = reinterpret_cast<
......@@ -232,7 +275,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
memcpy(buf, &hr, sizeof(hr));
memcpy(buf + sizeof(hr), els + first, sizeof(els[0]) * report);
desc_complete_indir(d, idx, 0, buf, buflen);
desc_complete_indir(0, buf, buflen);
} else if (d->opcode == i40e_aqc_opc_set_switch_config) {
std::cerr << " set switch config" << std::endl;
/* TODO: lots of interesting things here like l2 filtering etc. that are
......@@ -241,7 +284,7 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
reinterpret_cast<struct i40e_aqc_set_switch_config *>(
d->params.raw);
*/
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_get_vsi_parameters) {
std::cerr << " get vsi parameters" << std::endl;
/*struct i40e_aqc_add_get_update_vsi *v =
......@@ -254,32 +297,32 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
I40E_AQ_VSI_PROP_QUEUE_OPT_VALID |
I40E_AQ_VSI_PROP_SCHED_VALID;
desc_complete_indir(d, idx, 0, &pd, sizeof(pd));
desc_complete_indir(0, &pd, sizeof(pd));
} else if (d->opcode == i40e_aqc_opc_update_vsi_parameters) {
std::cerr << " update vsi parameters" << std::endl;
/* TODO */
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_set_dcb_parameters) {
std::cerr << " set dcb parameters" << std::endl;
/* TODO */
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_configure_vsi_bw_limit) {
std::cerr << " configure vsi bw limit" << std::endl;
desc_complete(d, idx, 0);
desc_complete(0);
} else if (d->opcode == i40e_aqc_opc_query_vsi_bw_config) {
std::cerr << " query vsi bw config" << std::endl;
struct i40e_aqc_query_vsi_bw_config_resp bwc;
memset(&bwc, 0, sizeof(bwc));
for (size_t i = 0; i < 8; i++)
bwc.qs_handles[i] = 0xffff;
desc_complete_indir(d, idx, 0, &bwc, sizeof(bwc));
desc_complete_indir(0, &bwc, sizeof(bwc));
} else if (d->opcode == i40e_aqc_opc_query_vsi_ets_sla_config) {
std::cerr << " query vsi ets sla config" << std::endl;
struct i40e_aqc_query_vsi_ets_sla_config_resp sla;
memset(&sla, 0, sizeof(sla));
for (size_t i = 0; i < 8; i++)
sla.share_credits[i] = 127;
desc_complete_indir(d, idx, 0, &sla, sizeof(sla));
desc_complete_indir(0, &sla, sizeof(sla));
} else if (d->opcode == i40e_aqc_opc_remove_macvlan) {
std::cerr << " remove macvlan" << std::endl;
struct i40e_aqc_macvlan *m = reinterpret_cast<
......@@ -290,50 +333,10 @@ void queue_admin_tx::cmd_run(void *desc, uint32_t idx, void *data)
for (uint16_t i = 0; i < m->num_addresses; i++)
rve[i].error_code = I40E_AQC_REMOVE_MACVLAN_SUCCESS;
desc_complete_indir(d, idx, 0, data, d->datalen);
desc_complete_indir(0, data, d->datalen);
} else {
std::cerr << " uknown opcode=" << d->opcode << std::endl;
desc_complete(d, idx, I40E_AQ_RC_ESRCH);
//desc_complete(I40E_AQ_RC_ESRCH);
desc_complete(0);
}
}
void queue_admin_tx::desc_fetched(void *desc, uint32_t idx)
{
if (!enabled)
return;
struct i40e_aq_desc *d = reinterpret_cast<struct i40e_aq_desc *>(desc);
if ((d->flags & I40E_AQ_FLAG_RD)) {
uint64_t addr = d->params.external.addr_low |
(((uint64_t) d->params.external.addr_high) << 32);
std::cerr << " desc with buffer opc=" << d->opcode << " addr=" << addr
<< std::endl;
data_fetch(desc, idx, addr, d->datalen);
} else {
cmd_run(desc, idx, nullptr);
}
}
void queue_admin_tx::data_fetched(void *desc, uint32_t idx, void *data)
{
if (!enabled)
return;
cmd_run(desc, idx, data);
}
void queue_admin_tx::reg_updated()
{
base = reg_base;
len = (reg_len & I40E_GL_ATQLEN_ATQLEN_MASK) >> I40E_GL_ATQLEN_ATQLEN_SHIFT;
if (!enabled && (reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
std::cerr << "enable atq base=" << base << " len=" << len << std::endl;
enabled = true;
} else if (enabled && !(reg_len & I40E_GL_ATQLEN_ATQENABLE_MASK)) {
std::cerr << "disable atq" << std::endl;
enabled = false;
}
queue_base::reg_updated();
}
#pragma once
#include <list>
#include <vector>
#include <deque>
#include <stdint.h>
extern "C" {
#include <cosim_pcie_proto.h>
......@@ -9,6 +8,7 @@ extern "C" {
#include <nicbm.h>
struct i40e_aq_desc;
struct i40e_tx_desc;
namespace i40e {
......@@ -21,27 +21,80 @@ class dma_base : public nicbm::DMAOp {
virtual void done() = 0;
};
/**
* Base-class for descriptor queues (RX/TX, Admin RX/TX).
*
* Descriptor processing is split up into multiple phases:
*
* - fetch: descriptor is read from host memory. This can be done in
* batches, while the batch sizes is limited by the minimum of
* MAX_ACTIVE_DESCS, max_active_capacity(), and max_fetch_capacity().
* Fetch is implemented by this base class.
*
* - prepare: to be implemented in the sub class, but typically involves
* fetching buffer contents. Not guaranteed to happen in order. If
* overriden subclass must call desc_prepared() when done.
*
* - process: to be implemented in the sub class. Guaranteed to be called
* in order. In case of tx, this actually sends the packet, in rx
* processing finishes when a packet for a descriptor has been received.
* subclass must call desc_processed() when done.
*
* - write back: descriptor is written back to host-memory. Write-back
* capacity
*/
class queue_base {
protected:
class dma_fetch : public dma_base {
static const uint32_t MAX_ACTIVE_DESCS = 128;
class desc_ctx {
friend class queue_base;
public:
enum state {
DESC_EMPTY,
DESC_FETCHING,
DESC_PREPARING,
DESC_PREPARED,
DESC_PROCESSING,
DESC_PROCESSED,
DESC_WRITING_BACK,
DESC_WRITTEN_BACK,
};
protected:
queue_base &queue;
public:
enum state state;
uint32_t index;
dma_fetch(queue_base &queue_, size_t len);
virtual ~dma_fetch();
virtual void done();
void *desc;
void *data;
size_t data_len;
size_t data_capacity;
void prepared();
void processed();
protected:
void data_fetch(uint64_t addr, size_t len);
virtual void data_fetched(uint64_t addr, size_t len);
void data_write(uint64_t addr, size_t len, const void *buf);
virtual void data_written(uint64_t addr, size_t len);
public:
desc_ctx(queue_base &queue_);
virtual ~desc_ctx();
virtual void prepare();
virtual void process() = 0;
};
class dma_data_fetch : public dma_base {
class dma_fetch : public dma_base {
protected:
queue_base &queue;
public:
uint32_t index;
void *desc;
dma_data_fetch(queue_base &queue_, size_t len, const void *desc,
size_t desc_len);
virtual ~dma_data_fetch();
uint32_t pos;
dma_fetch(queue_base &queue_, size_t len);
virtual ~dma_fetch();
virtual void done();
};
......@@ -49,51 +102,67 @@ class queue_base {
protected:
queue_base &queue;
public:
uint32_t index;
uint32_t pos;
dma_wb(queue_base &queue_, size_t len);
virtual ~dma_wb();
virtual void done();
};
class dma_data_fetch : public dma_base {
protected:
desc_ctx &ctx;
public:
dma_data_fetch(desc_ctx &ctx_, size_t len, void *buffer);
virtual ~dma_data_fetch();
virtual void done();
};
class dma_data_wb : public dma_base {
protected:
queue_base &queue;
dma_wb &desc_dma;
desc_ctx &ctx;
public:
uint32_t index;
dma_data_wb(queue_base &queue_, size_t len, dma_wb &desc_dma_);
dma_data_wb(desc_ctx &ctx_, size_t len);
virtual ~dma_data_wb();
virtual void done();
};
desc_ctx *desc_ctxs[MAX_ACTIVE_DESCS];
uint32_t active_first_pos;
uint32_t active_first_idx;
uint32_t active_cnt;
uint64_t base;
uint32_t len;
uint32_t fetch_head;
uint32_t &reg_head;
uint32_t &reg_tail;
bool enabled;
size_t desc_len;
uint32_t pending_fetches;
void ctxs_init();
void trigger_fetch();
void data_fetch(const void *desc, uint32_t idx, uint64_t addr, size_t len);
void desc_writeback(const void *desc, uint32_t idx);
void desc_writeback_indirect(const void *desc, uint32_t idx,
uint64_t data_addr, const void *data, size_t data_len);
void trigger_process();
void trigger_writeback();
// returns how many descriptors the queue can fetch max during the next
// fetch: default UINT32_MAX, but can be overriden by child classes
virtual uint32_t max_fetch_capacity();
// called when a descriptor is fetched
virtual void desc_fetched(void *desc, uint32_t idx) = 0;
// called when data is fetched
virtual void data_fetched(void *desc, uint32_t idx, void *data) = 0;
virtual void desc_written_back(uint32_t idx);
void desc_done(uint32_t idx);
virtual uint32_t max_writeback_capacity();
virtual uint32_t max_active_capacity();
virtual desc_ctx &desc_ctx_create() = 0;
// dummy function, needs to be overriden if interrupts are required
virtual void interrupt();
// this does the actual write-back. Can be overridden
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
// called by dma op when writeback has completed
void writeback_done(uint32_t first_pos, uint32_t cnt);
public:
queue_base(uint32_t &reg_head_, uint32_t &reg_tail_);
virtual void reset();
......@@ -103,30 +172,35 @@ class queue_base {
class queue_admin_tx : public queue_base {
protected:
class admin_desc_ctx : public desc_ctx {
protected:
queue_admin_tx &aq;
i40e_bm &dev;
struct i40e_aq_desc *d;
// prepare completion descriptor (fills flags, and return value)
void desc_compl_prepare(struct i40e_aq_desc *d, uint16_t retval,
uint16_t extra_flags);
virtual void data_written(uint64_t addr, size_t len);
// prepare completion descriptor (fills flags, and return value)
void desc_compl_prepare(uint16_t retval, uint16_t extra_flags);
// complete direct response
void desc_complete(struct i40e_aq_desc *d, uint32_t idx,
uint16_t retval, uint16_t extra_flags = 0);
void desc_complete(uint16_t retval, uint16_t extra_flags = 0);
// complete indirect response
void desc_complete_indir(struct i40e_aq_desc *d, uint32_t idx,
uint16_t retval, const void *data, size_t len,
uint16_t extra_flags = 0, bool ignore_datalen=false);
void desc_complete_indir(uint16_t retval, const void *data,
size_t len, uint16_t extra_flags = 0,
bool ignore_datalen=false);
// run command
virtual void cmd_run(void *desc, uint32_t idx, void *data);
public:
admin_desc_ctx(queue_admin_tx &queue_, i40e_bm &dev);
// called by base class when a descriptor has been fetched
virtual void desc_fetched(void *desc, uint32_t idx);
// called by basee class when data for a descriptor has been fetched
virtual void data_fetched(void *desc, uint32_t idx, void *data);
virtual void prepare();
virtual void process();
};
i40e_bm &dev;
uint64_t &reg_base;
uint32_t &reg_len;
virtual desc_ctx &desc_ctx_create();
public:
queue_admin_tx(i40e_bm &dev_, uint64_t &reg_base_,
uint32_t &reg_len_, uint32_t &reg_head_, uint32_t &reg_tail_);
......@@ -202,29 +276,48 @@ class lan_queue_base : public queue_base {
class lan_queue_tx : public lan_queue_base {
protected:
static const uint16_t MTU = 2048;
class tx_desc_ctx : public desc_ctx {
protected:
lan_queue_tx &tq;
public:
i40e_tx_desc *d;
tx_desc_ctx(lan_queue_tx &queue_);
virtual void prepare();
virtual void process();
};
class dma_hwb : public dma_base {
protected:
lan_queue_tx &queue;
public:
uint32_t head;
uint32_t pos;
uint32_t cnt;
uint32_t next_head;
dma_hwb(lan_queue_tx &queue_, uint32_t head_, uint32_t qlen);
dma_hwb(lan_queue_tx &queue_, uint32_t pos, uint32_t cnt,
uint32_t next_head);
virtual ~dma_hwb();
virtual void done();
};
static const uint16_t MTU = 2048;
uint8_t pktbuf[MTU];
uint16_t pktbuf_len;
std::deque<tx_desc_ctx *> ready_segments;
bool hwb;
uint64_t hwb_addr;
virtual void initialize();
virtual desc_ctx &desc_ctx_create();
virtual void desc_fetched(void *desc, uint32_t idx);
virtual void data_fetched(void *desc, uint32_t idx, void *data);
void desc_writeback(const void *desc, uint32_t idx);
virtual void do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt);
bool trigger_tx_packet();
void trigger_tx();
public:
lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail, size_t idx,
......@@ -235,9 +328,15 @@ class lan_queue_tx : public lan_queue_base {
class lan_queue_rx : public lan_queue_base {
protected:
struct desc_cache {
uint64_t buf;
uint64_t hbuf;
class rx_desc_ctx : public desc_ctx {
protected:
lan_queue_rx &rq;
virtual void data_written(uint64_t addr, size_t len);
public:
rx_desc_ctx(lan_queue_rx &queue_);
virtual void process();
void packet_received(const void *data, size_t len);
};
uint16_t dbuff_size;
......@@ -245,17 +344,10 @@ class lan_queue_rx : public lan_queue_base {
uint16_t rxmax;
bool crc_strip;
static const uint16_t DCACHE_SIZE = 128;
struct desc_cache dcache[DCACHE_SIZE];
uint32_t dcache_first_idx;
uint16_t dcache_first_pos;
uint16_t dcache_first_cnt;
std::deque<rx_desc_ctx *> dcache;
virtual void initialize();
virtual uint32_t max_fetch_capacity();
virtual void desc_fetched(void *desc, uint32_t idx);
virtual void data_fetched(void *desc, uint32_t idx, void *data);
virtual desc_ctx &desc_ctx_create();
public:
lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail, size_t idx,
......
......@@ -166,16 +166,16 @@ void lan_queue_base::qctx_fetch::done()
lan_queue_rx::lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl_)
: lan_queue_base(lanmgr_, reg_tail_, idx_, reg_ena_, reg_fpmbase_,
reg_intqctl_, 32), dcache_first_idx(0), dcache_first_pos(0),
dcache_first_cnt(0)
reg_intqctl_, 32)
{
// use larger value for initialization
desc_len = 32;
ctxs_init();
}
void lan_queue_rx::reset()
{
dcache_first_idx = 0;
dcache_first_pos = 0;
dcache_first_cnt = 0;
dcache.clear();
queue_base::reset();
}
......@@ -221,69 +221,71 @@ void lan_queue_rx::initialize()
" crcstrip=" << crc_strip << " rxmax=" << rxmax << std::endl;
}
uint32_t lan_queue_rx::max_fetch_capacity()
queue_base::desc_ctx &lan_queue_rx::desc_ctx_create()
{
return DCACHE_SIZE - dcache_first_cnt;
return *new rx_desc_ctx(*this);
}
void lan_queue_rx::desc_fetched(void *desc_ptr, uint32_t didx)
void lan_queue_rx::packet_received(const void *data, size_t pktlen)
{
std::cerr << "rxq: desc fetched" << std::endl;
union i40e_32byte_rx_desc *desc =
reinterpret_cast<union i40e_32byte_rx_desc *> (desc_ptr);
if (dcache.empty()) {
std::cerr << "rqx: empty, dropping packet" << std::endl;
return;
}
assert(dcache_first_cnt < DCACHE_SIZE);
std::cerr << " idx=" << dcache_first_idx << " cnt=" << dcache_first_cnt <<
" didx=" << didx << std::endl;
assert((dcache_first_idx + dcache_first_cnt) % len == didx);
rx_desc_ctx &ctx = *dcache.front();
uint16_t dci = (dcache_first_pos + dcache_first_cnt) % DCACHE_SIZE;
dcache[dci].buf = desc->read.pkt_addr;
dcache[dci].hbuf = desc->read.hdr_addr;
std::cerr << "rxq: packet received didx=" << ctx.index << " cnt=" <<
dcache.size() << std::endl;
dcache_first_cnt++;
dcache.pop_front();
ctx.packet_received(data, pktlen);
}
void lan_queue_rx::data_fetched(void *desc, uint32_t didx, void *data)
lan_queue_rx::rx_desc_ctx::rx_desc_ctx(lan_queue_rx &queue_)
: desc_ctx(queue_), rq(queue_)
{
std::cerr << "rxq: data fetched" << std::endl;
}
void lan_queue_rx::packet_received(const void *data, size_t pktlen)
void lan_queue_rx::rx_desc_ctx::data_written(uint64_t addr, size_t len)
{
if (dcache_first_cnt == 0) {
std::cerr << "rqx: empty, dropping packet" << std::endl;
return;
}
processed();
}
std::cerr << "rxq: packet received didx=" << dcache_first_idx << " cnt=" << dcache_first_cnt << std::endl;
union i40e_32byte_rx_desc rxd;
memset(&rxd, 0, sizeof(rxd));
rxd.wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
rxd.wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
// TODO: only if checksums are correct
rxd.wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT);
rxd.wb.qword1.status_error_len |= (pktlen << I40E_RXD_QW1_LENGTH_PBUF_SHIFT);
void lan_queue_rx::rx_desc_ctx::process()
{
rq.dcache.push_back(this);
}
desc_writeback_indirect(&rxd, dcache_first_idx,
dcache[dcache_first_pos].buf, data, pktlen);
void lan_queue_rx::rx_desc_ctx::packet_received(const void *data, size_t pktlen)
{
union i40e_32byte_rx_desc *rxd = reinterpret_cast<
union i40e_32byte_rx_desc *> (desc);
uint64_t addr = rxd->read.pkt_addr;
memset(rxd, 0, sizeof(*rxd));
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
// TODO: only if checksums are correct
rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT);
rxd->wb.qword1.status_error_len |= (pktlen << I40E_RXD_QW1_LENGTH_PBUF_SHIFT);
dcache_first_pos = (dcache_first_pos + 1) % DCACHE_SIZE;
dcache_first_idx = (dcache_first_idx + 1) % len;
dcache_first_cnt--;
data_write(addr, pktlen, data);
}
lan_queue_tx::lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl)
: lan_queue_base(lanmgr_, reg_tail_, idx_, reg_ena_, reg_fpmbase_,
reg_intqctl, 128), pktbuf_len(0)
reg_intqctl, 128)
{
desc_len = 16;
ctxs_init();
}
void lan_queue_tx::reset()
{
pktbuf_len = 0;
ready_segments.clear();
queue_base::reset();
}
......@@ -310,38 +312,147 @@ void lan_queue_tx::initialize()
std::endl;
}
void lan_queue_tx::desc_fetched(void *desc_buf, uint32_t didx)
queue_base::desc_ctx &lan_queue_tx::desc_ctx_create()
{
return *new tx_desc_ctx(*this);
}
struct i40e_tx_desc *desc = reinterpret_cast<struct i40e_tx_desc *>(desc_buf);
uint64_t d1 = desc->cmd_type_offset_bsz;
void lan_queue_tx::do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt)
{
if (!hwb) {
// if head index writeback is disabled we need to write descriptor back
lan_queue_base::do_writeback(first_idx, first_pos, cnt);
} else {
// else we just need to write the index back
dma_hwb *dma = new dma_hwb(*this, first_pos, cnt,
(first_idx + cnt) % len);
dma->dma_addr = hwb_addr;
std::cerr << "hwb=" << *((uint32_t *) dma->data) << std::endl;
runner->issue_dma(*dma);
}
}
bool lan_queue_tx::trigger_tx_packet()
{
size_t n = ready_segments.size();
if (n == 0)
return false;
size_t dcnt;
bool eop = false;
uint64_t d1;
uint16_t iipt, l4t, total_len = 0;
for (dcnt = 0; dcnt < n && !eop; dcnt++) {
tx_desc_ctx *rd = ready_segments.at(dcnt);
d1 = rd->d->cmd_type_offset_bsz;
std::cerr << "txq: data fetched didx=" << rd->index << " d1=" << d1 <<
std::endl;
uint16_t pkt_len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
if (total_len + pkt_len > MTU) {
std::cerr << "txq: trigger_tx_packet too large" << std::endl;
abort();
}
memcpy(pktbuf + total_len, rd->data, pkt_len);
uint16_t cmd = (d1 & I40E_TXD_QW1_CMD_MASK) >> I40E_TXD_QW1_CMD_SHIFT;
eop = (cmd & I40E_TX_DESC_CMD_EOP);
iipt = cmd & (I40E_TX_DESC_CMD_IIPT_MASK);
l4t = (cmd & I40E_TX_DESC_CMD_L4T_EOFT_MASK);
std::cerr << " eop=" << eop << " len=" << pkt_len << std::endl;
total_len += pkt_len;
}
std::cerr << "txq: desc fetched didx=" << didx << " d1=" << d1 << std::endl;
if (!eop)
return false;
uint32_t off = (d1 & I40E_TXD_QW1_OFFSET_MASK) >> I40E_TXD_QW1_OFFSET_SHIFT;
uint16_t maclen = ((off & I40E_TXD_QW1_MACLEN_MASK) >>
I40E_TX_DESC_LENGTH_MACLEN_SHIFT) * 2;
uint16_t iplen = ((off & I40E_TXD_QW1_IPLEN_MASK) >>
I40E_TX_DESC_LENGTH_IPLEN_SHIFT) * 4;
/*uint16_t l4len = (off & I40E_TXD_QW1_L4LEN_MASK) >>
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;*/
if (l4t == I40E_TX_DESC_CMD_L4T_EOFT_TCP) {
uint16_t tcp_off = maclen + iplen;
xsum_tcp(pktbuf + tcp_off, total_len - tcp_off);
}
std::cerr << " iipt=" << iipt << " l4t=" << l4t << " maclen=" << maclen << " iplen=" << iplen<< std::endl;
runner->eth_send(pktbuf, total_len);
while (dcnt-- > 0) {
ready_segments.front()->processed();
ready_segments.pop_front();
}
return true;
}
void lan_queue_tx::trigger_tx()
{
while (trigger_tx_packet());
}
lan_queue_tx::tx_desc_ctx::tx_desc_ctx(lan_queue_tx &queue_)
: desc_ctx(queue_), tq(queue_)
{
d = reinterpret_cast<struct i40e_tx_desc *>(desc);
}
void lan_queue_tx::tx_desc_ctx::prepare()
{
uint64_t d1 = d->cmd_type_offset_bsz;
std::cerr << "txq: desc fetched didx=" << index << " d1=" << d1 << std::endl;
uint8_t dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
if (dtype == I40E_TX_DESC_DTYPE_DATA) {
uint16_t len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
std::cerr << " bufaddr=" << desc->buffer_addr << " len=" << len << std::endl;
std::cerr << " bufaddr=" << d->buffer_addr << " len=" << len << std::endl;
data_fetch(desc_buf, didx, desc->buffer_addr, len);
data_fetch(d->buffer_addr, len);
} else if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
struct i40e_tx_context_desc *ctxd =
reinterpret_cast<struct i40e_tx_context_desc *> (desc_buf);
reinterpret_cast<struct i40e_tx_context_desc *> (d);
std::cerr << " context descriptor: tp=" << ctxd->tunneling_params <<
" l2t=" << ctxd->l2tag2 << " tctm=" << ctxd->type_cmd_tso_mss << std::endl;
abort();
desc->buffer_addr = 0;
/*desc->buffer_addr = 0;
desc->cmd_type_offset_bsz = I40E_TX_DESC_DTYPE_DESC_DONE <<
I40E_TXD_QW1_DTYPE_SHIFT;
desc_writeback(desc_buf, didx);
desc_writeback(desc_buf, didx);*/
} else {
std::cerr << "txq: only support context & data descriptors" << std::endl;
abort();
}
}
void lan_queue_tx::tx_desc_ctx::process()
{
tq.ready_segments.push_back(this);
tq.trigger_tx();
}
#if 0
void lan_queue_tx::desc_fetched(void *desc_buf, uint32_t didx)
{
struct i40e_tx_desc *desc = reinterpret_cast<struct i40e_tx_desc *>(desc_buf);
}
void lan_queue_tx::data_fetched(void *desc_buf, uint32_t didx, void *data)
......@@ -404,21 +515,12 @@ writeback:
void lan_queue_tx::desc_writeback(const void *desc, uint32_t didx)
{
if (!hwb) {
// if head index writeback is disabled we need to write descriptor back
lan_queue_base::desc_writeback(desc, idx);
} else {
// else we just need to write the index back
dma_hwb *dma = new dma_hwb(*this, didx, (didx + 1) % len);
dma->dma_addr = hwb_addr;
std::cerr << "hwb=" << *((uint32_t *) dma->data) << std::endl;
runner->issue_dma(*dma);
}
}
#endif
lan_queue_tx::dma_hwb::dma_hwb(lan_queue_tx &queue_, uint32_t index_, uint32_t next)
: queue(queue_), head(index_), next_head(next)
lan_queue_tx::dma_hwb::dma_hwb(lan_queue_tx &queue_, uint32_t pos_,
uint32_t cnt_, uint32_t nh_)
: queue(queue_), pos(pos_), cnt(cnt_), next_head(nh_)
{
data = &next_head;
len = 4;
......@@ -432,6 +534,6 @@ lan_queue_tx::dma_hwb::~dma_hwb()
void lan_queue_tx::dma_hwb::done()
{
std::cerr << "txq: tx head written back" << std::endl;
queue.desc_written_back(head);
queue.writeback_done(pos, cnt);
delete this;
}
......@@ -12,52 +12,132 @@ using namespace i40e;
extern nicbm::Runner *runner;
queue_base::queue_base(uint32_t &reg_head_, uint32_t &reg_tail_)
: base(0), len(0), fetch_head(0), reg_head(reg_head_), reg_tail(reg_tail_),
enabled(false), desc_len(0), pending_fetches(0)
: active_first_pos(0), active_first_idx(0), active_cnt(0),
base(0), len(0), reg_head(reg_head_), reg_tail(reg_tail_),
enabled(false), desc_len(0)
{
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = nullptr;
}
}
void queue_base::ctxs_init()
{
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i] = &desc_ctx_create();
}
}
void queue_base::trigger_fetch()
{
if (!enabled || fetch_head == reg_tail)
if (!enabled)
return;
if (max_fetch_capacity() < pending_fetches + 1)
return;
// calculate how many we can fetch
uint32_t next_idx = (active_first_idx + active_cnt) % len;
uint32_t desc_avail = (reg_tail - next_idx) % len;
uint32_t fetch_cnt = desc_avail;
fetch_cnt = std::min(fetch_cnt, MAX_ACTIVE_DESCS - active_cnt);
if (max_active_capacity() <= active_cnt)
fetch_cnt = std::min(fetch_cnt, max_active_capacity() - active_cnt);
fetch_cnt = std::min(fetch_cnt, max_fetch_capacity());
dma_fetch *dma = new dma_fetch(*this, desc_len);
dma->write = false;
dma->dma_addr = base + fetch_head * desc_len;
dma->index = fetch_head;
if (next_idx + fetch_cnt > len)
fetch_cnt = len - next_idx;
pending_fetches++;
std::cerr << "fetching: avail=" << desc_avail <<
" cnt=" << fetch_cnt << " idx=" << next_idx << std::endl;
std::cerr << "fetching: avail=" << (reg_tail - fetch_head) % len <<
" fhead=" << fetch_head << " from " << dma->dma_addr << std::endl;
// abort if nothign to fetch
if (fetch_cnt == 0)
return;
// mark descriptor contexts as fetching
uint32_t first_pos = (active_first_pos + active_cnt) % MAX_ACTIVE_DESCS;
for (uint32_t i = 0; i < fetch_cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_EMPTY);
ctx.state = desc_ctx::DESC_FETCHING;
ctx.index = (next_idx + i) % len;
}
active_cnt += fetch_cnt;
std::cerr << "dma = " << dma << std::endl;
// prepare & issue dma
dma_fetch *dma = new dma_fetch(*this, desc_len * fetch_cnt);
dma->write = false;
dma->dma_addr = base + next_idx * desc_len;
dma->pos = first_pos;
std::cerr << " dma = " << dma << std::endl;
runner->issue_dma(*dma);
fetch_head = (fetch_head + 1) % len;
}
void queue_base::data_fetch(const void *desc, uint32_t idx, uint64_t addr,
size_t len)
void queue_base::trigger_process()
{
dma_data_fetch *dma = new dma_data_fetch(*this, len, desc, desc_len);
dma->write = false;
dma->dma_addr = addr;
dma->index = idx;
if (!enabled)
return;
std::cerr << "fetching data idx=" << idx << " addr=" << addr << " len=" <<
len << std::endl;
std::cerr << "dma = " << dma << std::endl;
runner->issue_dma(*dma);
// first skip over descriptors that are already done processing
uint32_t i;
for (i = 0; i < active_cnt; i++)
if (desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS]->state
<= desc_ctx::DESC_PROCESSING)
break;
// then run all prepared contexts
uint32_t j;
for (j = 0; i + j < active_cnt; j++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i + j)
% MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_PREPARED)
break;
ctx.state = desc_ctx::DESC_PROCESSING;
ctx.process();
}
}
void queue_base::trigger_writeback()
{
if (!enabled)
return;
// from first pos count number of processed descriptors
uint32_t avail;
for (avail = 0; avail < active_cnt; avail++)
if (desc_ctxs[(active_first_pos + avail) % MAX_ACTIVE_DESCS]->state
!= desc_ctx::DESC_PROCESSED)
break;
uint32_t cnt = std::min(avail, max_writeback_capacity());
if (active_first_pos + cnt > len)
cnt = len - active_first_pos;
std::cerr << "writing back: avail=" << avail << " cnt=" << cnt << " idx=" <<
active_first_idx << std::endl;
if (cnt == 0)
return;
// mark these descriptors as writing back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + i) % MAX_ACTIVE_DESCS];
ctx.state = desc_ctx::DESC_WRITING_BACK;
}
do_writeback(active_first_idx, active_first_pos, cnt);
}
void queue_base::reset()
{
enabled = false;
fetch_head = 0;
active_first_pos = 0;
active_first_idx = 0;
active_cnt = 0;
for (size_t i = 0; i < MAX_ACTIVE_DESCS; i++) {
desc_ctxs[i]->state = desc_ctx::DESC_EMPTY;
}
}
void queue_base::reg_updated()
......@@ -73,62 +153,158 @@ bool queue_base::is_enabled()
return enabled;
}
void queue_base::desc_writeback(const void *desc, uint32_t idx)
uint32_t queue_base::max_fetch_capacity()
{
return UINT32_MAX;
}
uint32_t queue_base::max_active_capacity()
{
dma_wb *dma = new dma_wb(*this, desc_len);
return UINT32_MAX;
}
uint32_t queue_base::max_writeback_capacity()
{
return UINT32_MAX;
}
void queue_base::interrupt()
{
}
void queue_base::do_writeback(uint32_t first_idx, uint32_t first_pos,
uint32_t cnt)
{
dma_wb *dma = new dma_wb(*this, desc_len * cnt);
dma->write = true;
dma->dma_addr = base + idx * desc_len;
dma->index = idx;
memcpy(dma->data, desc, desc_len);
dma->dma_addr = base + first_idx * desc_len;
dma->pos = first_pos;
uint8_t *buf = reinterpret_cast<uint8_t *> (dma->data);
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
memcpy(buf + i * desc_len, ctx.desc, desc_len);
}
runner->issue_dma(*dma);
}
void queue_base::desc_writeback_indirect(const void *desc, uint32_t idx,
uint64_t data_addr, const void *data, size_t data_len)
void queue_base::writeback_done(uint32_t first_pos, uint32_t cnt)
{
// descriptor dma
dma_wb *desc_dma = new dma_wb(*this, desc_len);
desc_dma->write = true;
desc_dma->dma_addr = base + idx * desc_len;
desc_dma->index = idx;
memcpy(desc_dma->data, desc, desc_len);
// purposefully not issued yet, data dma will issue once ready
if (!enabled)
return;
// data dma
dma_data_wb *data_dma = new dma_data_wb(*this, data_len, *desc_dma);
data_dma->write = true;
data_dma->dma_addr = data_addr;
data_dma->index = idx;
memcpy(data_dma->data, data, data_len);
// first mark descriptors as written back
for (uint32_t i = 0; i < cnt; i++) {
desc_ctx &ctx = *desc_ctxs[(first_pos + i) % MAX_ACTIVE_DESCS];
assert(ctx.state == desc_ctx::DESC_WRITING_BACK);
ctx.state = desc_ctx::DESC_WRITTEN_BACK;
}
std::cerr << "written back: afi=" << active_first_idx << " afp=" <<
active_first_pos << " acnt=" << active_cnt << " pos=" << first_pos <<
" cnt=" << cnt << std::endl;
// then start at the beginning and check how many are written back and then
// free those
uint32_t bump_cnt = 0;
for (bump_cnt = 0; bump_cnt < active_cnt; bump_cnt++) {
desc_ctx &ctx = *desc_ctxs[(active_first_pos + bump_cnt) %
MAX_ACTIVE_DESCS];
if (ctx.state != desc_ctx::DESC_WRITTEN_BACK)
break;
ctx.state = desc_ctx::DESC_EMPTY;
}
std::cerr << " bump_cnt=" << bump_cnt << std::endl;
active_first_pos = (active_first_pos + bump_cnt) % MAX_ACTIVE_DESCS;
active_first_idx = (active_first_idx + bump_cnt) % len;
active_cnt -= bump_cnt;
reg_head = active_first_idx;
interrupt();
}
runner->issue_dma(*data_dma);
queue_base::desc_ctx::desc_ctx(queue_base &queue_)
: queue(queue_), state(DESC_EMPTY), index(0), data(nullptr), data_len(0),
data_capacity(0)
{
desc = new uint8_t[queue_.desc_len];
}
uint32_t queue_base::max_fetch_capacity()
queue_base::desc_ctx::~desc_ctx()
{
return UINT32_MAX;
delete[] ((uint8_t *) desc);
if (data_capacity > 0)
delete[] ((uint8_t *) data);
}
void queue_base::desc_done(uint32_t idx)
void queue_base::desc_ctx::prepare()
{
assert(reg_head == idx);
reg_head = (idx + 1) % len;
trigger_fetch();
prepared();
}
void queue_base::interrupt()
void queue_base::desc_ctx::prepared()
{
assert(state == DESC_PREPARING);
state = DESC_PREPARED;
queue.trigger_process();
}
void queue_base::desc_written_back(uint32_t idx)
void queue_base::desc_ctx::processed()
{
if (!enabled)
return;
assert(state == DESC_PROCESSING);
state = DESC_PROCESSED;
queue.trigger_writeback();
}
void queue_base::desc_ctx::data_fetch(uint64_t addr, size_t data_len)
{
if (data_capacity < data_len) {
std::cerr << "data_fetch: allocating" << std::endl;
if (data_capacity != 0)
delete[] ((uint8_t *) data);
data = new uint8_t[data_len];
data_capacity = data_len;
}
dma_data_fetch *dma = new dma_data_fetch(*this, data_len, data);
dma->write = false;
dma->dma_addr = addr;
std::cerr << "fetching data idx=" << index << " addr=" << addr << " len=" <<
data_len << std::endl;
std::cerr << "dma = " << dma << " data=" << data << std::endl;
runner->issue_dma(*dma);
std::cerr << "descriptor " << idx << " written back" << std::endl;
desc_done(idx);
interrupt();
}
void queue_base::desc_ctx::data_fetched(uint64_t addr, size_t len)
{
prepared();
}
void queue_base::desc_ctx::data_write(uint64_t addr, size_t data_len,
const void *buf)
{
std::cerr << "data_write(addr=" << addr << " datalen=" << data_len <<
")" << std::endl;
dma_data_wb *data_dma = new dma_data_wb(*this, data_len);
data_dma->write = true;
data_dma->dma_addr = addr;
memcpy(data_dma->data, buf, data_len);
runner->issue_dma(*data_dma);
}
void queue_base::desc_ctx::data_written(uint64_t addr, size_t len)
{
std::cerr << "data_written(addr=" << addr << " datalen=" << len <<
")" << std::endl;
processed();
}
queue_base::dma_fetch::dma_fetch(queue_base &queue_, size_t len_)
......@@ -145,32 +321,32 @@ queue_base::dma_fetch::~dma_fetch()
void queue_base::dma_fetch::done()
{
queue.pending_fetches--;
queue.desc_fetched(data, index);
uint8_t *buf = reinterpret_cast <uint8_t *> (data);
for (uint32_t i = 0; i < len / queue.desc_len; i++) {
desc_ctx &ctx = *queue.desc_ctxs[(pos + i) % queue.MAX_ACTIVE_DESCS];
memcpy(ctx.desc, buf + queue.desc_len * i, queue.desc_len);
ctx.state = desc_ctx::DESC_PREPARING;
ctx.prepare();
}
delete this;
}
queue_base::dma_data_fetch::dma_data_fetch(queue_base &queue_, size_t len_,
const void *desc_, size_t desc_len)
:queue(queue_)
queue_base::dma_data_fetch::dma_data_fetch(desc_ctx &ctx_, size_t len_,
void *buffer)
: ctx(ctx_)
{
uint8_t *buf = new uint8_t[desc_len + len_];
desc = buf;
memcpy(desc, desc_, desc_len);
data = buf + desc_len;
data = buffer;
len = len_;
}
queue_base::dma_data_fetch::~dma_data_fetch()
{
delete[] ((uint8_t *) desc);
}
void queue_base::dma_data_fetch::done()
{
queue.data_fetched(desc, index, data);
ctx.data_fetched(dma_addr, len);
delete this;
}
......@@ -188,14 +364,13 @@ queue_base::dma_wb::~dma_wb()
void queue_base::dma_wb::done()
{
queue.desc_written_back(index);
queue.writeback_done(pos, len / queue.desc_len);
delete this;
}
queue_base::dma_data_wb::dma_data_wb(queue_base &queue_, size_t len_,
dma_wb &desc_dma_)
: queue(queue_), desc_dma(desc_dma_)
queue_base::dma_data_wb::dma_data_wb(desc_ctx &ctx_, size_t len_)
: ctx(ctx_)
{
data = new char[len_];
len = len_;
......@@ -208,7 +383,6 @@ queue_base::dma_data_wb::~dma_data_wb()
void queue_base::dma_data_wb::done()
{
// now we can issue descriptor dma
runner->issue_dma(desc_dma);
ctx.data_written(dma_addr, len);
delete this;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment