i40e_lan.cc 14.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <iostream>

#include "i40e_bm.h"

#include "i40e_base_wrapper.h"

using namespace i40e;

extern nicbm::Runner *runner;

lan::lan(i40e_bm &dev_, size_t num_qs_)
15
    : dev(dev_), log("lan"), num_qs(num_qs_)
16
{
17
18
19
20
21
    rxqs = new lan_queue_rx *[num_qs];
    txqs = new lan_queue_tx *[num_qs];

    for (size_t i = 0; i < num_qs; i++) {
        rxqs[i] = new lan_queue_rx(*this, dev.regs.qrx_tail[i], i,
22
23
                dev.regs.qrx_ena[i], dev.regs.glhmc_lanrxbase[0],
                dev.regs.qint_rqctl[i]);
24
        txqs[i] = new lan_queue_tx(*this, dev.regs.qtx_tail[i], i,
25
26
                dev.regs.qtx_ena[i], dev.regs.glhmc_lantxbase[0],
                dev.regs.qint_tqctl[i]);
27
    }
28
29
}

30
31
32
33
34
35
36
37
void lan::reset()
{
    for (size_t i = 0; i < num_qs; i++) {
        rxqs[i]->reset();
        txqs[i]->reset();
    }
}

38
39
void lan::qena_updated(uint16_t idx, bool rx)
{
40
#ifdef DEBUG_LAN
41
    log << " qena updated idx=" << idx << " rx=" << rx << logger::endl;
42
#endif
43
44
45
46
47
48
49
50
51
    uint32_t &reg = (rx ? dev.regs.qrx_ena[idx] : dev.regs.qtx_ena[idx]);
    lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx]) :
        static_cast<lan_queue_base &>(*txqs[idx]));

    if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) && !q.is_enabled()) {
        q.enable();
    } else if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) && q.is_enabled()) {
        q.disable();
    }
52
53
54
55
}

void lan::tail_updated(uint16_t idx, bool rx)
{
56
#ifdef DEBUG_LAN
57
    log << " tail updated idx=" << idx << " rx=" << rx << logger::endl;
58
#endif
59
60
61
62
63
64
65
66

    lan_queue_base &q = (rx ? static_cast<lan_queue_base &>(*rxqs[idx]) :
        static_cast<lan_queue_base &>(*txqs[idx]));

    if (q.is_enabled())
        q.reg_updated();
}

67
68
void lan::packet_received(const void *data, size_t len)
{
69
#ifdef DEBUG_LAN
70
    log << " packet received len=" << len << logger::endl;
71
#endif
72
73
74
75
76

    // TODO: steering
    rxqs[0]->packet_received(data, len);
}

77
78
lan_queue_base::lan_queue_base(lan &lanmgr_, const std::string &qtype,
        uint32_t &reg_tail_, size_t idx_,
79
80
        uint32_t &reg_ena_, uint32_t &fpm_basereg_, uint32_t &reg_intqctl_,
        uint16_t ctx_size_)
81
82
    : queue_base(qtype + std::to_string(idx_), reg_dummy_head, reg_tail_),
    lanmgr(lanmgr_), enabling(false),
83
    idx(idx_), reg_ena(reg_ena_), fpm_basereg(fpm_basereg_),
84
    reg_intqctl(reg_intqctl_), ctx_size(ctx_size_)
85
86
87
88
{
    ctx = new uint8_t[ctx_size_];
}

89
90
91
92
93
94
void lan_queue_base::reset()
{
    enabling = false;
    queue_base::reset();
}

95
96
97
98
99
void lan_queue_base::enable()
{
    if (enabling || enabled)
        return;

100
#ifdef DEBUG_LAN
101
    log << " lan enabling queue " << idx << logger::endl;
102
#endif
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
    enabling = true;

    qctx_fetch *qf = new qctx_fetch(*this);
    qf->write = false;
    qf->dma_addr = ((fpm_basereg & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) >>
        I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) * 512;
    qf->dma_addr += ctx_size * idx;
    qf->len = ctx_size;
    qf->data = ctx;

    lanmgr.dev.hmc.issue_mem_op(*qf);
}

void lan_queue_base::ctx_fetched()
{
118
#ifdef DEBUG_LAN
119
    log << " lan ctx fetched " << idx << logger::endl;
120
#endif
121
122
123
124
125
126
127
128
129
130
131
132

    initialize();

    enabling = false;
    enabled = true;
    reg_ena |= I40E_QRX_ENA_QENA_STAT_MASK;

    reg_updated();
}

void lan_queue_base::disable()
{
133
#ifdef DEBUG_LAN
134
    log << " lan disabling queue " << idx << logger::endl;
135
#endif
136
137
138
139
140
    enabled = false;
    // TODO: write back
    reg_ena &= ~I40E_QRX_ENA_QENA_STAT_MASK;
}

141
142
143
void lan_queue_base::interrupt()
{
    uint32_t qctl = reg_intqctl;
144
    uint32_t gctl = lanmgr.dev.regs.pfint_dyn_ctl0;
145
#ifdef DEBUG_LAN
146
    log << " interrupt qctl=" << qctl << " gctl=" << gctl << logger::endl;
147
#endif
148
149
150
151
152
153

    uint16_t msix_idx = (qctl & I40E_QINT_TQCTL_MSIX_INDX_MASK) >>
        I40E_QINT_TQCTL_ITR_INDX_SHIFT;
    uint8_t msix0_idx = (qctl & I40E_QINT_TQCTL_MSIX0_INDX_MASK) >>
        I40E_QINT_TQCTL_MSIX0_INDX_SHIFT;

154
155
156
157
158
159
160
    if (msix_idx != 0) {
        log << "TODO: only int 0 is supported" << logger::endl;
        abort();
    }

    bool cause_ena = !!(qctl & I40E_QINT_TQCTL_CAUSE_ENA_MASK) &&
      !!(gctl & I40E_PFINT_DYN_CTL0_INTENA_MASK);
161
    if (!cause_ena) {
162
#ifdef DEBUG_LAN
163
        log << " interrupt cause disabled" << logger::endl;
164
#endif
165
166
167
168
        return;
    }

    // TODO throttling?
169
#ifdef DEBUG_LAN
170
    log << "   setting int0.qidx=" << msix0_idx << logger::endl;
171
#endif
172
173
    lanmgr.dev.regs.pfint_icr0 |= I40E_PFINT_ICR0_INTEVENT_MASK |
        (1 << (I40E_PFINT_ICR0_QUEUE_0_SHIFT + msix0_idx));
174
175
176
177
178

    uint8_t itr = (qctl & I40E_QINT_TQCTL_ITR_INDX_MASK) >>
        I40E_QINT_TQCTL_ITR_INDX_SHIFT;
    lanmgr.dev.signal_interrupt(0, itr);

179
180
}

181
182
183
184
185
186
187
188
189
190
191
192
lan_queue_base::qctx_fetch::qctx_fetch(lan_queue_base &lq_)
    : lq(lq_)
{
}

void lan_queue_base::qctx_fetch::done()
{
    lq.ctx_fetched();
    delete this;
}

lan_queue_rx::lan_queue_rx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
193
        uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl_)
194
    : lan_queue_base(lanmgr_, "rxq", reg_tail_, idx_, reg_ena_, reg_fpmbase_,
195
            reg_intqctl_, 32)
196
{
197
198
199
    // use larger value for initialization
    desc_len = 32;
    ctxs_init();
200
201
}

202
203
void lan_queue_rx::reset()
{
204
    dcache.clear();
205
206
207
    queue_base::reset();
}

208
209
void lan_queue_rx::initialize()
{
210
#ifdef DEBUG_LAN
211
    log << " initialize()" << logger::endl;
212
#endif
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
    uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);

    uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
    uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
    uint16_t *qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 11);
    uint16_t *dbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 12);
    uint16_t *hbsz_p = reinterpret_cast<uint16_t *>(ctx_p + 13);
    uint32_t *rxmax_p = reinterpret_cast<uint32_t *>(ctx_p + 21);

    reg_dummy_head = (*head_p) & ((1 << 13) - 1);

    base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
    len = (*qlen_p >> 1) & ((1 << 13) - 1);

    dbuff_size = (((*dbsz_p) >> 6) & ((1 << 7) - 1)) * 128;
    hbuff_size = (((*hbsz_p) >> 5) & ((1 << 5) - 1)) * 64;
    uint8_t dtype = ((*hbsz_p) >> 10) & ((1 << 2) - 1);
    bool longdesc = !!(((*hbsz_p) >> 12) & 0x1);
    desc_len = (longdesc ? 32 : 16);
    crc_strip = !!(((*hbsz_p) >> 13) & 0x1);
    rxmax = (((*rxmax_p) >> 6) & ((1 << 14) - 1)) * 128;

    if (!longdesc) {
236
237
        log << "lan_queue_rx::initialize: currently only 32B descs "
            " supported" << logger::endl;
238
239
240
        abort();
    }
    if (dtype != 0) {
241
242
        log << "lan_queue_rx::initialize: no header split supported"
            << logger::endl;
243
244
245
        abort();
    }

246
#ifdef DEBUG_LAN
247
    log << "  head=" << reg_dummy_head << " base=" << base <<
248
249
        " len=" << len << " dbsz=" << dbuff_size << " hbsz=" << hbuff_size <<
        " dtype=" << (unsigned) dtype << " longdesc=" << longdesc <<
250
        " crcstrip=" << crc_strip << " rxmax=" << rxmax << logger::endl;
251
#endif
252
253
}

254
queue_base::desc_ctx &lan_queue_rx::desc_ctx_create()
255
{
256
    return *new rx_desc_ctx(*this);
257
258
}

259
void lan_queue_rx::packet_received(const void *data, size_t pktlen)
260
{
261
    if (dcache.empty()) {
262
#ifdef DEBUG_LAN
263
        log << " empty, dropping packet" << logger::endl;
264
#endif
265
266
        return;
    }
267

268
    rx_desc_ctx &ctx = *dcache.front();
269

270
#ifdef DEBUG_LAN
271
272
    log << " packet received didx=" << ctx.index << " cnt=" <<
        dcache.size() << logger::endl;
273
#endif
274

275
276
    dcache.pop_front();
    ctx.packet_received(data, pktlen);
277
278
}

279
280
lan_queue_rx::rx_desc_ctx::rx_desc_ctx(lan_queue_rx &queue_)
    : desc_ctx(queue_), rq(queue_)
281
282
283
{
}

284
void lan_queue_rx::rx_desc_ctx::data_written(uint64_t addr, size_t len)
285
{
286
287
    processed();
}
288

289
290
291
292
void lan_queue_rx::rx_desc_ctx::process()
{
    rq.dcache.push_back(this);
}
293

294
295
296
297
298
299
300
301
302
303
304
305
306
void lan_queue_rx::rx_desc_ctx::packet_received(const void *data, size_t pktlen)
{
    union i40e_32byte_rx_desc *rxd = reinterpret_cast<
        union i40e_32byte_rx_desc *> (desc);

    uint64_t addr = rxd->read.pkt_addr;

    memset(rxd, 0, sizeof(*rxd));
    rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
    rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
    // TODO: only if checksums are correct
    rxd->wb.qword1.status_error_len |= (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT);
    rxd->wb.qword1.status_error_len |= (pktlen << I40E_RXD_QW1_LENGTH_PBUF_SHIFT);
307

308
    data_write(addr, pktlen, data);
309
310
}

311
lan_queue_tx::lan_queue_tx(lan &lanmgr_, uint32_t &reg_tail_, size_t idx_,
312
        uint32_t &reg_ena_, uint32_t &reg_fpmbase_, uint32_t &reg_intqctl)
313
    : lan_queue_base(lanmgr_, "txq", reg_tail_, idx_, reg_ena_, reg_fpmbase_,
314
            reg_intqctl, 128)
315
316
{
    desc_len = 16;
317
    ctxs_init();
318
319
}

Antoine Kaufmann's avatar
Antoine Kaufmann committed
320
321
void lan_queue_tx::reset()
{
322
    ready_segments.clear();
Antoine Kaufmann's avatar
Antoine Kaufmann committed
323
324
325
    queue_base::reset();
}

326
327
void lan_queue_tx::initialize()
{
328
#ifdef DEBUG_LAN
329
    log << " initialize()" << logger::endl;
330
#endif
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
    uint8_t *ctx_p = reinterpret_cast<uint8_t *>(ctx);

    uint16_t *head_p = reinterpret_cast<uint16_t *>(ctx_p + 0);
    uint64_t *base_p = reinterpret_cast<uint64_t *>(ctx_p + 4);
    uint16_t *hwb_qlen_p = reinterpret_cast<uint16_t *>(ctx_p + 20);
    uint64_t *hwb_addr_p = reinterpret_cast<uint64_t *>(ctx_p + 24);

    reg_dummy_head = (*head_p) & ((1 << 13) - 1);

    base = ((*base_p) & ((1ULL << 57) - 1)) * 128;
    len = ((*hwb_qlen_p) >> 1) & ((1 << 13) - 1);

    hwb = !!(*hwb_qlen_p & (1 << 0));
    hwb_addr = *hwb_addr_p;

346
#ifdef DEBUG_LAN
347
    log << "  head=" << reg_dummy_head << " base=" << base <<
348
        " len=" << len << " hwb=" << hwb << " hwb_addr=" << hwb_addr <<
349
        logger::endl;
350
#endif
351
}
352

353
queue_base::desc_ctx &lan_queue_tx::desc_ctx_create()
354
{
355
356
    return *new tx_desc_ctx(*this);
}
357

358
359
360
361
362
363
364
365
366
367
368
369
void lan_queue_tx::do_writeback(uint32_t first_idx, uint32_t first_pos,
        uint32_t cnt)
{
    if (!hwb) {
        // if head index writeback is disabled we need to write descriptor back
        lan_queue_base::do_writeback(first_idx, first_pos, cnt);
    } else {
        // else we just need to write the index back
        dma_hwb *dma = new dma_hwb(*this, first_pos, cnt,
                (first_idx + cnt) % len);
        dma->dma_addr = hwb_addr;

370
#ifdef DEBUG_LAN
371
        log << " hwb=" << *((uint32_t *) dma->data) << logger::endl;
372
#endif
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
        runner->issue_dma(*dma);
    }
}

bool lan_queue_tx::trigger_tx_packet()
{
    size_t n = ready_segments.size();
    if (n == 0)
        return false;

    size_t dcnt;
    bool eop = false;
    uint64_t d1;
    uint16_t iipt, l4t, total_len = 0;
    for (dcnt = 0; dcnt < n && !eop; dcnt++) {
        tx_desc_ctx *rd = ready_segments.at(dcnt);

        d1 = rd->d->cmd_type_offset_bsz;
391
#ifdef DEBUG_LAN
392
393
        log << " data fetched didx=" << rd->index << " d1=" <<
            d1 << logger::endl;
394
#endif
395
396
397
398

        uint16_t pkt_len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
            I40E_TXD_QW1_TX_BUF_SZ_SHIFT;
        if (total_len + pkt_len > MTU) {
399
            log << "txq: trigger_tx_packet too large" << logger::endl;
400
401
            abort();
        }
402

403
404
405
406
407
408
409
        memcpy(pktbuf + total_len, rd->data, pkt_len);

        uint16_t cmd = (d1 & I40E_TXD_QW1_CMD_MASK) >> I40E_TXD_QW1_CMD_SHIFT;
        eop = (cmd & I40E_TX_DESC_CMD_EOP);
        iipt = cmd & (I40E_TX_DESC_CMD_IIPT_MASK);
        l4t = (cmd & I40E_TX_DESC_CMD_L4T_EOFT_MASK);

410
#ifdef DEBUG_LAN
411
412
        log << "    eop=" << eop << " len=" << pkt_len <<
            logger::endl;
413
#endif
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433

        total_len += pkt_len;
    }

    if (!eop)
        return false;

    uint32_t off = (d1 & I40E_TXD_QW1_OFFSET_MASK) >> I40E_TXD_QW1_OFFSET_SHIFT;
    uint16_t maclen = ((off & I40E_TXD_QW1_MACLEN_MASK) >>
        I40E_TX_DESC_LENGTH_MACLEN_SHIFT) * 2;
    uint16_t iplen = ((off & I40E_TXD_QW1_IPLEN_MASK) >>
        I40E_TX_DESC_LENGTH_IPLEN_SHIFT) * 4;
    /*uint16_t l4len = (off & I40E_TXD_QW1_L4LEN_MASK) >>
        I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;*/


    if (l4t == I40E_TX_DESC_CMD_L4T_EOFT_TCP) {
        uint16_t tcp_off = maclen + iplen;
        xsum_tcp(pktbuf + tcp_off, total_len - tcp_off);
    }
434
#ifdef DEBUG_LAN
435
436
    log << "    iipt=" << iipt << " l4t=" << l4t <<
        " maclen=" << maclen << " iplen=" << iplen<< logger::endl;
437
438
439
#else
    (void) iipt;
#endif
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

    runner->eth_send(pktbuf, total_len);

    while (dcnt-- > 0) {
        ready_segments.front()->processed();
        ready_segments.pop_front();
    }

    return true;
}

void lan_queue_tx::trigger_tx()
{
    while (trigger_tx_packet());
}

lan_queue_tx::tx_desc_ctx::tx_desc_ctx(lan_queue_tx &queue_)
    : desc_ctx(queue_), tq(queue_)
{
    d = reinterpret_cast<struct i40e_tx_desc *>(desc);
}

void lan_queue_tx::tx_desc_ctx::prepare()
{
    uint64_t d1 = d->cmd_type_offset_bsz;

466
#ifdef DEBUG_LAN
467
468
    queue.log << " desc fetched didx=" << index << " d1=" <<
        d1 << logger::endl;
469
#endif
470

471
    uint8_t dtype = (d1 & I40E_TXD_QW1_DTYPE_MASK) >> I40E_TXD_QW1_DTYPE_SHIFT;
Antoine Kaufmann's avatar
Antoine Kaufmann committed
472
473
474
475
    if (dtype == I40E_TX_DESC_DTYPE_DATA) {
        uint16_t len = (d1 & I40E_TXD_QW1_TX_BUF_SZ_MASK) >>
            I40E_TXD_QW1_TX_BUF_SZ_SHIFT;

476
#ifdef DEBUG_LAN
477
478
        queue.log << "  bufaddr=" << d->buffer_addr <<
            " len=" << len << logger::endl;
479
#endif
Antoine Kaufmann's avatar
Antoine Kaufmann committed
480

481
        data_fetch(d->buffer_addr, len);
Antoine Kaufmann's avatar
Antoine Kaufmann committed
482
483
    } else if (dtype == I40E_TX_DESC_DTYPE_CONTEXT) {
        struct i40e_tx_context_desc *ctxd =
484
            reinterpret_cast<struct i40e_tx_context_desc *> (d);
485
486
        queue.log << "  context descriptor: tp=" << ctxd->tunneling_params <<
            " l2t=" << ctxd->l2tag2 << " tctm=" << ctxd->type_cmd_tso_mss << logger::endl;
487
488
        abort();

489
        /*desc->buffer_addr = 0;
Antoine Kaufmann's avatar
Antoine Kaufmann committed
490
491
492
        desc->cmd_type_offset_bsz = I40E_TX_DESC_DTYPE_DESC_DONE <<
            I40E_TXD_QW1_DTYPE_SHIFT;

493
        desc_writeback(desc_buf, didx);*/
Antoine Kaufmann's avatar
Antoine Kaufmann committed
494
    } else {
495
        queue.log << "txq: only support context & data descriptors" << logger::endl;
496
497
        abort();
    }
498
499
500
501
502
503
504
505
506

}

void lan_queue_tx::tx_desc_ctx::process()
{
    tq.ready_segments.push_back(this);
    tq.trigger_tx();
}

507
508
509
510
511
512
513
void lan_queue_tx::tx_desc_ctx::processed()
{
    d->cmd_type_offset_bsz = I40E_TX_DESC_DTYPE_DESC_DONE <<
        I40E_TXD_QW1_DTYPE_SHIFT;
    desc_ctx::processed();
}

514
515
516
lan_queue_tx::dma_hwb::dma_hwb(lan_queue_tx &queue_, uint32_t pos_,
        uint32_t cnt_, uint32_t nh_)
    : queue(queue_), pos(pos_), cnt(cnt_), next_head(nh_)
517
518
519
520
521
522
523
524
525
526
527
528
{
    data = &next_head;
    len = 4;
    write = true;
}

lan_queue_tx::dma_hwb::~dma_hwb()
{
}

void lan_queue_tx::dma_hwb::done()
{
529
#ifdef DEBUG_LAN
530
    queue.log << " tx head written back" << logger::endl;
531
#endif
532
    queue.writeback_done(pos, cnt);
533
    queue.trigger();
534
535
    delete this;
}