Commit d365cd9e authored by Antoine Kaufmann's avatar Antoine Kaufmann
Browse files

sims/nic/e1000_gem5: add e1000 model from gem5

parent 2ddf5f12
#include <iostream>
#include <stdarg.h>
#include <simbricks/nicbm/nicbm.h>
#include "sims/nic/e1000_gem5/i8254xGBe.h"
class Gem5DMAOp : public nicbm::DMAOp, public nicbm::TimedEvent {
public:
EventFunctionWrapper &ev_;
Gem5DMAOp(EventFunctionWrapper &ev) : ev_(ev) {}
virtual ~Gem5DMAOp() = default;
};
/******************************************************************************/
/* nicbm callbacks */
void IGbE::SetupIntro(struct SimbricksProtoPcieDevIntro &di)
{
di.bars[0].len = 128 * 1024;
di.bars[0].flags = SIMBRICKS_PROTO_PCIE_BAR_64;
di.pci_vendor_id = 0x8086;
di.pci_device_id = 0x1075;
di.pci_class = 0x02;
di.pci_subclass = 0x00;
di.pci_revision = 0x01;
}
void IGbE::RegRead(uint8_t bar, uint64_t addr, void *dest,
size_t len)
{
read(addr, len, dest);
// TODO delay!
}
void IGbE::RegWrite(uint8_t bar, uint64_t addr, const void *src,
size_t len)
{
write(addr, len, src);
// TODO delay!
}
void IGbE::DmaComplete(nicbm::DMAOp &op)
{
Gem5DMAOp *dma = dynamic_cast <Gem5DMAOp *>(&op);
dma->ev_.sched = false;
dma->ev_.callback();
if (dma->write_)
delete[] ((uint8_t *) dma->data_);
delete dma;
}
void IGbE::EthRx(uint8_t port, const void *data, size_t len)
{
EthPacketPtr pp = std::make_shared<EthPacketData>(len);
pp->length = len;
memcpy(pp->data, data, len);
ethRxPkt(pp);
}
void IGbE::Timed(nicbm::TimedEvent &te)
{
if (Gem5DMAOp *dma = dynamic_cast <Gem5DMAOp *>(&te)) {
runner_->IssueDma(*dma);
} else if (EventFunctionWrapper *evw =
dynamic_cast <EventFunctionWrapper *>(&te)) {
evw->sched = false;
evw->callback();
} else {
abort();
}
}
/******************************************************************************/
/* gem5-ish APIs */
void IGbE::schedule(EventFunctionWrapper &ev, Tick t)
{
if (ev.sched) {
fprintf(stderr, "schedule: already scheduled\n");
abort();
}
ev.time_ = t;
ev.sched = true;
runner_->EventSchedule(ev);
}
void IGbE::reschedule(EventFunctionWrapper &ev, Tick t, bool always)
{
if (ev.sched) {
runner_->EventCancel(ev);
ev.sched = false;
} else if (!always) {
fprintf(stderr, "reschedule: not yet scheduled\n");
abort();
}
schedule(ev, t);
}
void IGbE::deschedule(EventFunctionWrapper &ev)
{
if (!ev.sched) {
fprintf(stderr, "deschedule: not scheduledd\n");
abort();
}
runner_->EventCancel(ev);
ev.sched = false;
}
void IGbE::intrPost()
{
runner_->IntXIssue(true);
}
void IGbE::intrClear()
{
runner_->IntXIssue(false);
}
void IGbE::dmaWrite(Addr daddr, size_t len, EventFunctionWrapper &ev,
const void *buf, Tick delay)
{
ev.sched = true;
Gem5DMAOp *op = new Gem5DMAOp(ev);
op->data_ = new uint8_t[len];
memcpy(op->data_, buf, len);
op->len_ = len;
op->write_ = true;
op->dma_addr_ = daddr;
if (delay == 0) {
runner_->IssueDma(*op);
} else {
op->time_ = runner_->TimePs() + delay;
runner_->EventSchedule(*op);
}
}
void IGbE::dmaRead(Addr saddr, size_t len, EventFunctionWrapper &ev,
void *buf, Tick delay)
{
ev.sched = true;
Gem5DMAOp *op = new Gem5DMAOp(ev);
op->data_ = buf;
op->len_ = len;
op->write_ = false;
op->dma_addr_ = saddr;
if (delay == 0) {
runner_->IssueDma(*op);
} else {
op->time_ = runner_->TimePs() + delay;
runner_->EventSchedule(*op);
}
}
bool IGbE::sendPacket(EthPacketPtr p)
{
runner_->EthSend(p->data, p->length);
ethTxDone();
return true;
}
void warn(const char *fmt, ...)
{
fprintf(stderr, "warn: ");
va_list va;
va_start(va, fmt);
vfprintf(stderr, fmt, va);
va_end(va);
}
void panic(const char *fmt, ...)
{
fprintf(stderr, "panic: ");
va_list va;
va_start(va, fmt);
vfprintf(stderr, fmt, va);
va_end(va);
abort();
}
/******************************************************************************/
int main(int argc, char *argv[])
{
IGbEParams params;
params.rx_fifo_size = 384 * 1024;
params.tx_fifo_size = 384 * 1024;
params.fetch_delay = 10 * 1000;
params.wb_delay = 10 * 1000;
params.fetch_comp_delay = 10 * 1000;
params.wb_comp_delay = 10 * 1000;
params.rx_write_delay = 0;
params.tx_read_delay = 0;
params.pio_delay = 0; // TODO
params.rx_desc_cache_size = 64;
params.tx_desc_cache_size = 64;
params.phy_pid = 0x02A8;
params.phy_epid = 0x0380;
IGbE *dev = new IGbE(&params);
nicbm::Runner *runner = new nicbm::Runner(*dev);
dev->init();
return runner->RunMain(argc, argv);
}
\ No newline at end of file
/*
* Copyright (c) 2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "sims/nic/e1000_gem5/gem5/bitfield.h"
/** Lookup table used for High Speed bit reversing */
const uint8_t reverseLookUpTable[] =
{
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
0x3F, 0xBF, 0x7F, 0xFF
};
/*
* Copyright (c) 2017, 2019 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BASE_BITFIELD_HH__
#define __BASE_BITFIELD_HH__
#include <inttypes.h>
#include <cassert>
#include <cstddef>
#include <type_traits>
/** Lookup table used for High Speed bit reversing */
extern const uint8_t reverseLookUpTable[];
/**
* Generate a 64-bit mask of 'nbits' 1s, right justified. If a number of bits
* greater than 64 is given, it is truncated to 64.
*
* @param nbits The number of bits set in the mask.
*/
inline uint64_t
mask(int nbits)
{
return (nbits >= 64) ? (uint64_t)-1LL : (1ULL << nbits) - 1;
}
/**
* Extract the bitfield from position 'first' to 'last' (inclusive)
* from 'val' and right justify it. MSB is numbered 63, LSB is 0.
*/
template <class T>
inline
T
bits(T val, int first, int last)
{
int nbits = first - last + 1;
return (val >> last) & mask(nbits);
}
/**
* Extract the bit from this position from 'val' and right justify it.
*/
template <class T>
inline
T
bits(T val, int bit)
{
return bits(val, bit, bit);
}
/**
* Mask off the given bits in place like bits() but without shifting.
* msb = 63, lsb = 0
*/
template <class T>
inline
T
mbits(T val, int first, int last)
{
return val & (mask(first+1) & ~mask(last));
}
inline uint64_t
mask(int first, int last)
{
return mbits((uint64_t)-1LL, first, last);
}
/**
* Sign-extend an N-bit value to 64 bits.
*/
template <int N>
inline
uint64_t
sext(uint64_t val)
{
int sign_bit = bits(val, N-1, N-1);
return sign_bit ? (val | ~mask(N)) : val;
}
/**
* Returns val with bits first to last set to the LSBs of bit_val
*
* E.g.:
* first: 7
* last: 4
* val: 0xFFFF
* bit_val: 0x0000
* returned: 0xFF0F
*/
template <class T, class B>
inline
T
insertBits(T val, int first, int last, B bit_val)
{
T t_bit_val = bit_val;
T bmask = mask(first - last + 1) << last;
return ((t_bit_val << last) & bmask) | (val & ~bmask);
}
/**
* Overloaded for access to only one bit in value
*/
template <class T, class B>
inline
T
insertBits(T val, int bit, B bit_val)
{
return insertBits(val, bit, bit, bit_val);
}
/**
* A convenience function to replace bits first to last of val with bit_val
* in place.
*/
template <class T, class B>
inline
void
replaceBits(T& val, int first, int last, B bit_val)
{
val = insertBits(val, first, last, bit_val);
}
/** Overloaded function to allow to access only 1 bit*/
template <class T, class B>
inline
void
replaceBits(T& val, int bit, B bit_val)
{
val = insertBits(val, bit, bit, bit_val);
}
/**
* Takes a variable lenght word and returns the mirrored version
* (Bit by bit, LSB=>MSB).
*
* algorithm from
* http://graphics.stanford.edu/~seander/bithacks.html
* #ReverseBitsByLookupTable
*
* @param val: variable lenght word
* @param size: number of bytes to mirror
* @return mirrored word
*/
template <class T>
T
reverseBits(T val, std::size_t size = sizeof(T))
{
static_assert(std::is_integral<T>::value, "Expecting an integer type");
assert(size <= sizeof(T));
T output = 0;
for (auto byte = 0; byte < size; byte++, val = static_cast<T>(val >> 8)) {
output = (output << 8) | reverseLookUpTable[val & 0xFF];
}
return output;
}
/**
* Returns the bit position of the MSB that is set in the input
*/
inline
int
findMsbSet(uint64_t val) {
int msb = 0;
if (!val)
return 0;
if (bits(val, 63,32)) { msb += 32; val >>= 32; }
if (bits(val, 31,16)) { msb += 16; val >>= 16; }
if (bits(val, 15,8)) { msb += 8; val >>= 8; }
if (bits(val, 7,4)) { msb += 4; val >>= 4; }
if (bits(val, 3,2)) { msb += 2; val >>= 2; }
if (bits(val, 1,1)) { msb += 1; }
return msb;
}
/**
* Returns the bit position of the LSB that is set in the input
*/
inline int
findLsbSet(uint64_t val) {
int lsb = 0;
if (!val)
return sizeof(val) * 8;
if (!bits(val, 31,0)) { lsb += 32; val >>= 32; }
if (!bits(val, 15,0)) { lsb += 16; val >>= 16; }
if (!bits(val, 7,0)) { lsb += 8; val >>= 8; }
if (!bits(val, 3,0)) { lsb += 4; val >>= 4; }
if (!bits(val, 1,0)) { lsb += 2; val >>= 2; }
if (!bits(val, 0,0)) { lsb += 1; }
return lsb;
}
/**
* Checks if a number is a power of two, or zero.
*/
template <class T>
inline bool
isPow2(T v) {
return (v & (v - 1)) == (T)0;
}
/**
* Returns the number of set ones in the provided value.
* PD algorithm from
* http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
*/
inline int
popCount(uint64_t val) {
#ifndef __has_builtin
#define __has_builtin(foo) 0
#endif
#if defined(__GNUC__) || (defined(__clang__) && __has_builtin(__builtin_popcountl))
return __builtin_popcountl(val);
#else
const uint64_t m1 = 0x5555555555555555; // ..010101b
const uint64_t m2 = 0x3333333333333333; // ..110011b
const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; // ..001111b
const uint64_t sum = 0x0101010101010101;
val -= (val >> 1) & m1; // 2 bits count -> 2 bits
val = (val & m2) + ((val >> 2) & m2); // 4 bits count -> 4 bits
val = (val + (val >> 4)) & m4; // 8 bits count -> 8 bits
return (val * sum) >> 56; // horizontal sum
#endif // defined(__GNUC__) || (defined(__clang__) && __has_builtin(__builtin_popcountl))
}
/**
* Align to the next highest power of two.
*
* The number passed in is aligned to the next highest power of two,
* if it is not already a power of two. Please note that if 0 is
* passed in, 0 is returned.
*
* This code has been modified from the following:
* http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
*/
inline uint64_t alignToPowerOfTwo(uint64_t val)
{
val--;
val |= val >> 1;
val |= val >> 2;
val |= val >> 4;
val |= val >> 8;
val |= val >> 16;
val |= val >> 32;
val++;
return val;
};
/**
* Count trailing zeros in a 32-bit value.
*
* @param An input value
* @return The number of trailing zeros or 32 if the value is zero.
*/
inline int ctz32(uint32_t value)
{
return value ? __builtin_ctzl(value) : 32;
}
/**
* Count trailing zeros in a 64-bit value.
*
* @param An input value
* @return The number of trailing zeros or 64 if the value is zero.
*/
inline int ctz64(uint64_t value)
{
return value ? __builtin_ctzll(value) : 64;
}
#endif // __BASE_BITFIELD_HH__
/*
* eth.h
*
* Ethernet.
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* $Id: eth.h,v 1.15 2004/01/03 08:47:23 dugsong Exp $
*/
#ifndef DNET_ETH_H
#define DNET_ETH_H
#define ETH_ADDR_LEN 6
#define ETH_ADDR_BITS 48
#define ETH_TYPE_LEN 2
#define ETH_CRC_LEN 4
#define ETH_HDR_LEN 14
#define ETH_LEN_MIN 64 /* minimum frame length with CRC */
#define ETH_LEN_MAX 1518 /* maximum frame length with CRC */
#define ETH_MTU (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
#define ETH_MIN (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
typedef struct eth_addr {
uint8_t data[ETH_ADDR_LEN];
} eth_addr_t;
struct eth_hdr {
eth_addr_t eth_dst; /* destination address */
eth_addr_t eth_src; /* source address */
uint16_t eth_type; /* payload type */
};
/*
* Ethernet payload types - http://standards.ieee.org/regauth/ethertype
*/
#define ETH_TYPE_PUP 0x0200 /* PUP protocol */
#define ETH_TYPE_IP 0x0800 /* IP protocol */
#define ETH_TYPE_ARP 0x0806 /* address resolution protocol */
#define ETH_TYPE_REVARP 0x8035 /* reverse addr resolution protocol */
#define ETH_TYPE_8021Q 0x8100 /* IEEE 802.1Q VLAN tagging */
#define ETH_TYPE_IPV6 0x86DD /* IPv6 protocol */
#define ETH_TYPE_MPLS 0x8847 /* MPLS */
#define ETH_TYPE_MPLS_MCAST 0x8848 /* MPLS Multicast */
#define ETH_TYPE_PPPOEDISC 0x8863 /* PPP Over Ethernet Discovery Stage */
#define ETH_TYPE_PPPOE 0x8864 /* PPP Over Ethernet Session Stage */
#define ETH_TYPE_LOOPBACK 0x9000 /* used to test interfaces */
#define ETH_IS_MULTICAST(ea) (*(ea) & 0x01) /* is address mcast/bcast? */
#define ETH_ADDR_BROADCAST "\xff\xff\xff\xff\xff\xff"
#define eth_pack_hdr(h, dst, src, type) do { \
struct eth_hdr *eth_pack_p = (struct eth_hdr *)(h); \
memmove(&eth_pack_p->eth_dst, &(dst), ETH_ADDR_LEN); \
memmove(&eth_pack_p->eth_src, &(src), ETH_ADDR_LEN); \
eth_pack_p->eth_type = htons(type); \
} while (0)
typedef struct eth_handle eth_t;
__BEGIN_DECLS
eth_t *eth_open(const char *device);
int eth_get(eth_t *e, eth_addr_t *ea);
int eth_set(eth_t *e, const eth_addr_t *ea);
size_t eth_send(eth_t *e, const void *buf, size_t len);
eth_t *eth_close(eth_t *e);
char *eth_ntop(const eth_addr_t *eth, char *dst, size_t len);
int eth_pton(const char *src, eth_addr_t *dst);
char *eth_ntoa(const eth_addr_t *eth);
#define eth_aton eth_pton
__END_DECLS
#endif /* DNET_ETH_H */
/*
* Copyright (c) 2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* Copyright (c) 2010 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstddef>
#include <cstdio>
#include <sstream>
#include <string>
#include "sims/nic/e1000_gem5/support.h"
#include "sims/nic/e1000_gem5/gem5/inet.h"
using namespace std;
EthAddr::EthAddr()
{
memset(data, 0, ETH_ADDR_LEN);
}
EthAddr::EthAddr(const uint8_t ea[ETH_ADDR_LEN])
{
for (int i = 0; i < ETH_ADDR_LEN; ++i)
data[i] = ea[i];
}
EthAddr::EthAddr(const eth_addr &ea)
{
for (int i = 0; i < ETH_ADDR_LEN; ++i)
data[i] = ea.data[i];
}
EthAddr::EthAddr(const std::string &addr)
{
parse(addr);
}
const EthAddr &
EthAddr::operator=(const eth_addr &ea)
{
*data = *ea.data;
return *this;
}
const EthAddr &
EthAddr::operator=(const std::string &addr)
{
parse(addr);
return *this;
}
void
EthAddr::parse(const std::string &addr)
{
// the hack below is to make sure that ETH_ADDR_LEN is 6 otherwise
// the sscanf function won't work.
int bytes[ETH_ADDR_LEN == 6 ? ETH_ADDR_LEN : -1];
if (sscanf(addr.c_str(), "%x:%x:%x:%x:%x:%x", &bytes[0], &bytes[1],
&bytes[2], &bytes[3], &bytes[4], &bytes[5]) != ETH_ADDR_LEN) {
memset(data, 0xff, ETH_ADDR_LEN);
return;
}
for (int i = 0; i < ETH_ADDR_LEN; ++i) {
if (bytes[i] & ~0xff) {
memset(data, 0xff, ETH_ADDR_LEN);
return;
}
data[i] = bytes[i];
}
}
string
EthAddr::string() const
{
stringstream stream;
stream << *this;
return stream.str();
}
bool
operator==(const EthAddr &left, const EthAddr &right)
{
return !memcmp(left.bytes(), right.bytes(), ETH_ADDR_LEN);
}
ostream &
operator<<(ostream &stream, const EthAddr &ea)
{
const uint8_t *a = ea.addr();
stream << a[0] << ":" << a[1] << ":" << a[2] << ":" << a[3] << ":" << a[4]
<< ":" << a[5];
return stream;
}
string
IpAddress::string() const
{
stringstream stream;
stream << *this;
return stream.str();
}
bool
operator==(const IpAddress &left, const IpAddress &right)
{
return left.ip() == right.ip();
}
ostream &
operator<<(ostream &stream, const IpAddress &ia)
{
uint32_t ip = ia.ip();
stream << (uint8_t)(ip >> 24) << "." << (uint8_t)(ip >> 16) << "." <<
(uint8_t)(ip >> 8) << "." << (uint8_t)(ip >> 0);
return stream;
}
string
IpNetmask::string() const
{
stringstream stream;
stream << *this;
return stream.str();
}
bool
operator==(const IpNetmask &left, const IpNetmask &right)
{
return (left.ip() == right.ip()) &&
(left.netmask() == right.netmask());
}
ostream &
operator<<(ostream &stream, const IpNetmask &in)
{
stream << (const IpAddress &)in << "/" << in.netmask();
return stream;
}
string
IpWithPort::string() const
{
stringstream stream;
stream << *this;
return stream.str();
}
bool
operator==(const IpWithPort &left, const IpWithPort &right)
{
return (left.ip() == right.ip()) && (left.port() == right.port());
}
ostream &
operator<<(ostream &stream, const IpWithPort &iwp)
{
stream << (const IpAddress &)iwp << ":" << iwp.port();
return stream;
}
uint16_t
cksum(const IpPtr &ptr)
{
int sum = ip_cksum_add(ptr->bytes(), ptr->hlen(), 0);
return ip_cksum_carry(sum);
}
uint16_t
__tu_cksum(const IpPtr &ip)
{
int tcplen = ip->len() - ip->hlen();
int sum = ip_cksum_add(ip->payload(), tcplen, 0);
sum = ip_cksum_add(&ip->ip_src, 8, sum); // source and destination
sum += htons(ip->ip_p + tcplen);
return ip_cksum_carry(sum);
}
uint16_t
__tu_cksum6(const Ip6Ptr &ip6)
{
int tcplen = ip6->plen() - ip6->extensionLength();
int sum = ip_cksum_add(ip6->payload(), tcplen, 0);
sum = ip_cksum_add(ip6->src(), 32, sum);
sum += htons(ip6->proto() + tcplen);
return ip_cksum_carry(sum);
}
uint16_t
cksum(const TcpPtr &tcp)
{
if (IpPtr(tcp.packet())) {
return __tu_cksum(IpPtr(tcp.packet()));
} else if (Ip6Ptr(tcp.packet())) {
return __tu_cksum6(Ip6Ptr(tcp.packet()));
} else {
panic("Unrecognized IP packet format");
}
// Should never reach here
return 0;
}
uint16_t
cksum(const UdpPtr &udp)
{
if (IpPtr(udp.packet())) {
return __tu_cksum(IpPtr(udp.packet()));
} else if (Ip6Ptr(udp.packet())) {
return __tu_cksum6(Ip6Ptr(udp.packet()));
} else {
panic("Unrecognized IP packet format");
}
return 0;
}
bool
IpHdr::options(vector<const IpOpt *> &vec) const
{
vec.clear();
const uint8_t *data = bytes() + sizeof(struct ip_hdr);
int all = hlen() - sizeof(struct ip_hdr);
while (all > 0) {
const IpOpt *opt = (const IpOpt *)data;
int len = opt->len();
if (all < len)
return false;
vec.push_back(opt);
all -= len;
data += len;
}
return true;
}
#define IP6_EXTENSION(nxt) (nxt == IP_PROTO_HOPOPTS) ? true : \
(nxt == IP_PROTO_ROUTING) ? true : \
(nxt == IP_PROTO_FRAGMENT) ? true : \
(nxt == IP_PROTO_AH) ? true : \
(nxt == IP_PROTO_ESP) ? true: \
(nxt == IP_PROTO_DSTOPTS) ? true : false
/* Scan the IP6 header for all header extensions
* and return the number of headers found
*/
int
Ip6Hdr::extensionLength() const
{
const uint8_t *data = bytes() + IP6_HDR_LEN;
uint8_t nxt = ip6_nxt;
int len = 0;
int all = plen();
while (IP6_EXTENSION(nxt)) {
const Ip6Opt *ext = (const Ip6Opt *)data;
nxt = ext->nxt();
len += ext->len();
data += ext->len();
all -= ext->len();
assert(all >= 0);
}
return len;
}
/* Scan the IP6 header for a particular extension
* header type and return a pointer to it if it
* exists, otherwise return NULL
*/
const Ip6Opt*
Ip6Hdr::getExt(uint8_t ext_type) const
{
const uint8_t *data = bytes() + IP6_HDR_LEN;
uint8_t nxt = ip6_nxt;
Ip6Opt* opt = NULL;
int all = plen();
while (IP6_EXTENSION(nxt)) {
opt = (Ip6Opt *)data;
if (nxt == ext_type) {
break;
}
nxt = opt->nxt();
data += opt->len();
all -= opt->len();
opt = NULL;
assert(all >= 0);
}
return (const Ip6Opt*)opt;
}
/* Scan the IP6 header and any extension headers
* to find what type of Layer 4 header exists
* after this header
*/
uint8_t
Ip6Hdr::proto() const
{
const uint8_t *data = bytes() + IP6_HDR_LEN;
uint8_t nxt = ip6_nxt;
int all = plen();
while (IP6_EXTENSION(nxt)) {
const Ip6Opt *ext = (const Ip6Opt *)data;
nxt = ext->nxt();
data += ext->len();
all -= ext->len();
assert(all >= 0);
}
return nxt;
}
bool
TcpHdr::options(vector<const TcpOpt *> &vec) const
{
vec.clear();
const uint8_t *data = bytes() + sizeof(struct tcp_hdr);
int all = off() - sizeof(struct tcp_hdr);
while (all > 0) {
const TcpOpt *opt = (const TcpOpt *)data;
int len = opt->len();
if (all < len)
return false;
vec.push_back(opt);
all -= len;
data += len;
}
return true;
}
int
hsplit(const EthPacketPtr &ptr)
{
int split_point = 0;
IpPtr ip(ptr);
Ip6Ptr ip6(ptr);
if (ip) {
split_point = ip.pstart();
TcpPtr tcp(ip);
if (tcp)
split_point = tcp.pstart();
UdpPtr udp(ip);
if (udp)
split_point = udp.pstart();
} else if (ip6) {
split_point = ip6.pstart();
TcpPtr tcp(ip6);
if (tcp)
split_point = tcp.pstart();
UdpPtr udp(ip6);
if (udp)
split_point = udp.pstart();
}
return split_point;
}
\ No newline at end of file
/*
* Copyright (c) 2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* Copyright (c) 2010 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BASE_INET_HH__
#define __BASE_INET_HH__
#include <iosfwd>
#include <string>
#include <utility>
#include <vector>
#include "sims/nic/e1000_gem5/support.h"
#include "sims/nic/e1000_gem5/gem5/eth.h"
#include "sims/nic/e1000_gem5/gem5/ip.h"
#include "sims/nic/e1000_gem5/gem5/ip6.h"
#include "sims/nic/e1000_gem5/gem5/tcp.h"
#include "sims/nic/e1000_gem5/gem5/udp.h"
/*
* Ethernet Stuff
*/
struct EthAddr
{
protected:
uint8_t data[ETH_ADDR_LEN];
void parse(const std::string &addr);
public:
EthAddr();
EthAddr(const uint8_t ea[ETH_ADDR_LEN]);
EthAddr(const eth_addr &ea);
EthAddr(const std::string &addr);
const EthAddr &operator=(const eth_addr &ea);
const EthAddr &operator=(const std::string &addr);
int size() const { return sizeof(eth_addr); }
const uint8_t *bytes() const { return &data[0]; }
uint8_t *bytes() { return &data[0]; }
const uint8_t *addr() const { return &data[0]; }
bool unicast() const { return !(data[0] & 0x01); }
bool multicast() const { return !unicast() && !broadcast(); }
bool broadcast() const
{
bool isBroadcast = true;
for (int i = 0; i < ETH_ADDR_LEN; ++i) {
isBroadcast = isBroadcast && data[i] == 0xff;
}
return isBroadcast;
}
std::string string() const;
operator uint64_t() const
{
uint64_t reg = 0;
reg |= ((uint64_t)data[0]) << 40;
reg |= ((uint64_t)data[1]) << 32;
reg |= ((uint64_t)data[2]) << 24;
reg |= ((uint64_t)data[3]) << 16;
reg |= ((uint64_t)data[4]) << 8;
reg |= ((uint64_t)data[5]) << 0;
return reg;
}
};
std::ostream &operator<<(std::ostream &stream, const EthAddr &ea);
bool operator==(const EthAddr &left, const EthAddr &right);
struct EthHdr : public eth_hdr
{
bool isVlan() const { return (ntohs(eth_type) == ETH_TYPE_8021Q); }
uint16_t type() const {
if (!isVlan())
return ntohs(eth_type);
else
// L3 type is now 16 bytes into the hdr with 802.1Q
// instead of 12. dnet/eth.h only supports 802.1
return ntohs(*((uint16_t*)(((uint8_t *)this) + 16)));
}
uint16_t vlanId() const {
if (isVlan())
return ntohs(*((uint16_t*)(((uint8_t *)this) + 14)));
else
return 0x0000;
}
const EthAddr &src() const { return *(EthAddr *)&eth_src; }
const EthAddr &dst() const { return *(EthAddr *)&eth_dst; }
int size() const {
if (!isVlan())
return sizeof(eth_hdr);
else
return (sizeof(eth_hdr)+4);
}
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + size(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + size(); }
};
class EthPtr
{
protected:
friend class IpPtr;
friend class Ip6Ptr;
EthPacketPtr p;
public:
EthPtr() {}
EthPtr(const EthPacketPtr &ptr) : p(ptr) { }
EthHdr *operator->() { return (EthHdr *)p->data; }
EthHdr &operator*() { return *(EthHdr *)p->data; }
operator EthHdr *() { return (EthHdr *)p->data; }
const EthHdr *operator->() const { return (const EthHdr *)p->data; }
const EthHdr &operator*() const { return *(const EthHdr *)p->data; }
operator const EthHdr *() const { return (const EthHdr *)p->data; }
const EthPtr &operator=(const EthPacketPtr &ptr) { p = ptr; return *this; }
EthPacketPtr packet() const { return p; }
bool operator!() const { return !p; }
operator bool() const { return (p != nullptr); }
int off() const { return 0; }
int pstart() const { return off() + ((const EthHdr*)p->data)->size(); }
};
/*
* IP Stuff
*/
struct IpAddress
{
protected:
uint32_t _ip;
public:
IpAddress() : _ip(0)
{}
IpAddress(const uint32_t __ip) : _ip(__ip)
{}
uint32_t ip() const { return _ip; }
std::string string() const;
};
std::ostream &operator<<(std::ostream &stream, const IpAddress &ia);
bool operator==(const IpAddress &left, const IpAddress &right);
struct IpNetmask : public IpAddress
{
protected:
uint8_t _netmask;
public:
IpNetmask() : IpAddress(), _netmask(0)
{}
IpNetmask(const uint32_t __ip, const uint8_t __netmask) :
IpAddress(__ip), _netmask(__netmask)
{}
uint8_t netmask() const { return _netmask; }
std::string string() const;
};
std::ostream &operator<<(std::ostream &stream, const IpNetmask &in);
bool operator==(const IpNetmask &left, const IpNetmask &right);
struct IpWithPort : public IpAddress
{
protected:
uint16_t _port;
public:
IpWithPort() : IpAddress(), _port(0)
{}
IpWithPort(const uint32_t __ip, const uint16_t __port) :
IpAddress(__ip), _port(__port)
{}
uint8_t port() const { return _port; }
std::string string() const;
};
std::ostream &operator<<(std::ostream &stream, const IpWithPort &iwp);
bool operator==(const IpWithPort &left, const IpWithPort &right);
struct IpOpt;
struct IpHdr : public ip_hdr
{
uint8_t version() const { return ip_v; }
uint8_t hlen() const { return ip_hl * 4; }
uint8_t tos() const { return ip_tos; }
uint16_t len() const { return ntohs(ip_len); }
uint16_t id() const { return ntohs(ip_id); }
uint16_t frag_flags() const { return ntohs(ip_off) >> 13; }
uint16_t frag_off() const { return ntohs(ip_off) & 0x1fff; }
uint8_t ttl() const { return ip_ttl; }
uint8_t proto() const { return ip_p; }
uint16_t sum() const { return ip_sum; }
uint32_t src() const { return ntohl(ip_src); }
uint32_t dst() const { return ntohl(ip_dst); }
void sum(uint16_t sum) { ip_sum = sum; }
void id(uint16_t _id) { ip_id = htons(_id); }
void len(uint16_t _len) { ip_len = htons(_len); }
bool options(std::vector<const IpOpt *> &vec) const;
int size() const { return hlen(); }
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + size(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + size(); }
};
class IpPtr
{
protected:
friend class TcpPtr;
friend class UdpPtr;
EthPacketPtr p;
bool eth_hdr_vlan;
void set(const EthPacketPtr &ptr)
{
p = 0;
eth_hdr_vlan = false;
if (ptr) {
EthHdr *eth = (EthHdr *)ptr->data;
if (eth->type() == ETH_TYPE_IP)
p = ptr;
if (eth->isVlan())
eth_hdr_vlan = true;
}
}
public:
IpPtr() : p(0), eth_hdr_vlan(false) {}
IpPtr(const EthPacketPtr &ptr) : p(0), eth_hdr_vlan(false) { set(ptr); }
IpPtr(const EthPtr &ptr) : p(0), eth_hdr_vlan(false) { set(ptr.p); }
IpPtr(const IpPtr &ptr) : p(ptr.p), eth_hdr_vlan(ptr.eth_hdr_vlan) { }
IpHdr *get() { return (IpHdr *)(p->data + sizeof(eth_hdr) +
((eth_hdr_vlan) ? 4 : 0)); }
IpHdr *operator->() { return get(); }
IpHdr &operator*() { return *get(); }
const IpHdr *get() const
{ return (const IpHdr *)(p->data + sizeof(eth_hdr) +
((eth_hdr_vlan) ? 4 : 0)); }
const IpHdr *operator->() const { return get(); }
const IpHdr &operator*() const { return *get(); }
const IpPtr &operator=(const EthPacketPtr &ptr) { set(ptr); return *this; }
const IpPtr &operator=(const EthPtr &ptr) { set(ptr.p); return *this; }
const IpPtr &operator=(const IpPtr &ptr) { p = ptr.p; return *this; }
EthPacketPtr packet() const { return p; }
bool operator!() const { return !p; }
operator bool() const { return (p != nullptr); }
int off() const { return (sizeof(eth_hdr) + ((eth_hdr_vlan) ? 4 : 0)); }
int pstart() const { return (off() + get()->size()); }
};
uint16_t cksum(const IpPtr &ptr);
struct IpOpt : public ip_opt
{
uint8_t type() const { return opt_type; }
uint8_t typeNumber() const { return IP_OPT_NUMBER(opt_type); }
uint8_t typeClass() const { return IP_OPT_CLASS(opt_type); }
uint8_t typeCopied() const { return IP_OPT_COPIED(opt_type); }
uint8_t len() const { return IP_OPT_TYPEONLY(type()) ? 1 : opt_len; }
bool isNumber(int num) const { return typeNumber() == IP_OPT_NUMBER(num); }
bool isClass(int cls) const { return typeClass() == IP_OPT_CLASS(cls); }
bool isCopied(int cpy) const { return typeCopied() == IP_OPT_COPIED(cpy); }
const uint8_t *data() const { return opt_data.data8; }
void sec(ip_opt_data_sec &sec) const;
void lsrr(ip_opt_data_rr &rr) const;
void ssrr(ip_opt_data_rr &rr) const;
void ts(ip_opt_data_ts &ts) const;
uint16_t satid() const { return ntohs(opt_data.satid); }
uint16_t mtup() const { return ntohs(opt_data.mtu); }
uint16_t mtur() const { return ntohs(opt_data.mtu); }
void tr(ip_opt_data_tr &tr) const;
uint16_t rtralt() const { return ntohs(opt_data.rtralt); }
void sdb(std::vector<uint32_t> &vec) const;
};
/*
* Ip6 Classes
*/
struct Ip6Opt;
struct Ip6Hdr : public ip6_hdr
{
uint8_t version() const { return ip6_vfc; }
uint32_t flow() const { return ntohl(ip6_flow); }
uint16_t plen() const { return ntohs(ip6_plen); }
uint16_t hlen() const { return IP6_HDR_LEN; }
uint8_t nxt() const { return ip6_nxt; }
uint8_t hlim() const { return ip6_hlim; }
const uint8_t* src() const { return ip6_src.data; }
const uint8_t* dst() const { return ip6_dst.data; }
int extensionLength() const;
const Ip6Opt* getExt(uint8_t ext) const;
const Ip6Opt* fragmentExt() const { return getExt(IP_PROTO_FRAGMENT); }
const Ip6Opt* rtTypeExt() const { return getExt(IP_PROTO_ROUTING); }
const Ip6Opt* dstOptExt() const { return getExt(IP_PROTO_DSTOPTS); }
uint8_t proto() const;
void plen(uint16_t _plen) { ip6_plen = htons(_plen); }
int size() const { return IP6_HDR_LEN + extensionLength(); }
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + IP6_HDR_LEN
+ extensionLength(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + IP6_HDR_LEN
+ extensionLength(); }
};
class Ip6Ptr
{
protected:
friend class TcpPtr;
friend class UdpPtr;
EthPacketPtr p;
bool eth_hdr_vlan;
void set(const EthPacketPtr &ptr)
{
p = 0;
eth_hdr_vlan = false;
if (ptr) {
EthHdr *eth = (EthHdr *)ptr->data;
if (eth->type() == ETH_TYPE_IPV6)
p = ptr;
if (eth->isVlan())
eth_hdr_vlan = true;
}
}
public:
Ip6Ptr() : p(0), eth_hdr_vlan(false) {}
Ip6Ptr(const EthPacketPtr &ptr) : p(0), eth_hdr_vlan(false) { set(ptr); }
Ip6Ptr(const EthPtr &ptr) : p(0), eth_hdr_vlan(false) { set(ptr.p); }
Ip6Ptr(const Ip6Ptr &ptr) : p(ptr.p), eth_hdr_vlan(ptr.eth_hdr_vlan) { }
Ip6Hdr *get() { return (Ip6Hdr *)(p->data + sizeof(eth_hdr)
+ ((eth_hdr_vlan) ? 4 : 0)); }
Ip6Hdr *operator->() { return get(); }
Ip6Hdr &operator*() { return *get(); }
const Ip6Hdr *get() const
{ return (const Ip6Hdr *)(p->data + sizeof(eth_hdr)
+ ((eth_hdr_vlan) ? 4 : 0)); }
const Ip6Hdr *operator->() const { return get(); }
const Ip6Hdr &operator*() const { return *get(); }
const Ip6Ptr &operator=(const EthPacketPtr &ptr)
{ set(ptr); return *this; }
const Ip6Ptr &operator=(const EthPtr &ptr)
{ set(ptr.p); return *this; }
const Ip6Ptr &operator=(const Ip6Ptr &ptr)
{ p = ptr.p; return *this; }
EthPacketPtr packet() const { return p; }
bool operator!() const { return !p; }
operator bool() const { return (p != nullptr); }
int off() const { return sizeof(eth_hdr) + ((eth_hdr_vlan) ? 4 : 0); }
int pstart() const { return off() + get()->size(); }
};
// Dnet supplied ipv6 opt header is incomplete and
// newer NIC card filters expect a more robust
// ipv6 header option declaration.
struct ip6_opt_fragment {
uint16_t offlg;
uint32_t ident;
};
struct ip6_opt_routing_type2 {
uint8_t type;
uint8_t segleft;
uint32_t reserved;
ip6_addr_t addr;
};
#define HOME_ADDRESS_OPTION 0xC9
struct ip6_opt_dstopts {
uint8_t type;
uint8_t length;
ip6_addr_t addr;
} __attribute__((packed));
struct ip6_opt_hdr
{
uint8_t ext_nxt;
uint8_t ext_len;
union {
struct ip6_opt_fragment fragment;
struct ip6_opt_routing_type2 rtType2;
struct ip6_opt_dstopts dstOpts;
} ext_data;
} __attribute__((packed));
struct Ip6Opt : public ip6_opt_hdr
{
uint8_t nxt() const { return ext_nxt; }
uint8_t extlen() const { return ext_len; }
uint8_t len() const { return extlen() + 8; }
// Supporting the types of header extensions likely to be encountered:
// fragment, routing type 2 and dstopts.
// Routing type 2
uint8_t rtType2Type() const { return ext_data.rtType2.type; }
uint8_t rtType2SegLft() const { return ext_data.rtType2.segleft; }
const uint8_t* rtType2Addr() const { return ext_data.rtType2.addr.data; }
// Fragment
uint16_t fragmentOfflg() const { return ntohs(ext_data.fragment.offlg); }
uint32_t fragmentIdent() const { return ntohl(ext_data.fragment.ident); }
// Dst Options/Home Address Option
uint8_t dstOptType() const { return ext_data.dstOpts.type; }
uint8_t dstOptLength() const { return ext_data.dstOpts.length; }
const uint8_t* dstOptAddr() const { return ext_data.dstOpts.addr.data; }
};
/*
* TCP Stuff
*/
struct TcpOpt;
struct TcpHdr : public tcp_hdr
{
uint16_t sport() const { return ntohs(th_sport); }
uint16_t dport() const { return ntohs(th_dport); }
uint32_t seq() const { return ntohl(th_seq); }
uint32_t ack() const { return ntohl(th_ack); }
uint8_t off() const { return th_off*4; }
uint8_t flags() const { return th_flags & 0x3f; }
uint16_t win() const { return ntohs(th_win); }
uint16_t sum() const { return th_sum; }
uint16_t urp() const { return ntohs(th_urp); }
void sum(uint16_t sum) { th_sum = sum; }
void seq(uint32_t _seq) { th_seq = htonl(_seq); }
void flags(uint8_t _flags) { th_flags = _flags; }
bool options(std::vector<const TcpOpt *> &vec) const;
int size() const { return off(); }
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + size(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + size(); }
};
class TcpPtr
{
protected:
EthPacketPtr p;
int _off;
void set(const EthPacketPtr &ptr, int offset) { p = ptr; _off = offset; }
void set(const IpPtr &ptr)
{
if (ptr && ptr->proto() == IP_PROTO_TCP)
set(ptr.p, ptr.pstart());
else
set(0, 0);
}
void set(const Ip6Ptr &ptr)
{
if (ptr && ptr->proto() == IP_PROTO_TCP)
set(ptr.p, ptr.pstart());
else
set(0, 0);
}
public:
TcpPtr() : p(0), _off(0) {}
TcpPtr(const IpPtr &ptr) : p(0), _off(0) { set(ptr); }
TcpPtr(const Ip6Ptr &ptr) : p(0), _off(0) { set(ptr); }
TcpPtr(const TcpPtr &ptr) : p(ptr.p), _off(ptr._off) {}
TcpHdr *get() { return (TcpHdr *)(p->data + _off); }
TcpHdr *operator->() { return get(); }
TcpHdr &operator*() { return *get(); }
const TcpHdr *get() const { return (const TcpHdr *)(p->data + _off); }
const TcpHdr *operator->() const { return get(); }
const TcpHdr &operator*() const { return *get(); }
const TcpPtr &operator=(const IpPtr &i)
{ set(i); return *this; }
const TcpPtr &operator=(const TcpPtr &t)
{ set(t.p, t._off); return *this; }
EthPacketPtr packet() const { return p; }
bool operator!() const { return !p; }
operator bool() const { return (p != nullptr); }
int off() const { return _off; }
int pstart() const { return off() + get()->size(); }
};
uint16_t cksum(const TcpPtr &ptr);
struct TcpOpt : public tcp_opt
{
uint8_t type() const { return opt_type; }
uint8_t len() const { return TCP_OPT_TYPEONLY(type()) ? 1 : opt_len; }
bool isopt(int opt) const { return type() == opt; }
const uint8_t *data() const { return opt_data.data8; }
uint16_t mss() const { return ntohs(opt_data.mss); }
uint8_t wscale() const { return opt_data.wscale; }
uint32_t echo() const { return ntohl(opt_data.echo); }
uint32_t tsval() const { return ntohl(opt_data.timestamp[0]); }
uint32_t tsecr() const { return ntohl(opt_data.timestamp[1]); }
uint32_t cc() const { return ntohl(opt_data.cc); }
uint8_t cksum() const{ return opt_data.cksum; }
const uint8_t *md5() const { return opt_data.md5; }
int size() const { return len(); }
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + size(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + size(); }
};
/*
* UDP Stuff
*/
struct UdpHdr : public udp_hdr
{
uint16_t sport() const { return ntohs(uh_sport); }
uint16_t dport() const { return ntohs(uh_dport); }
uint16_t len() const { return ntohs(uh_ulen); }
uint16_t sum() const { return uh_sum; }
void sum(uint16_t sum) { uh_sum = sum; }
void len(uint16_t _len) { uh_ulen = htons(_len); }
int size() const { return sizeof(udp_hdr); }
const uint8_t *bytes() const { return (const uint8_t *)this; }
const uint8_t *payload() const { return bytes() + size(); }
uint8_t *bytes() { return (uint8_t *)this; }
uint8_t *payload() { return bytes() + size(); }
};
class UdpPtr
{
protected:
EthPacketPtr p;
int _off;
void set(const EthPacketPtr &ptr, int offset) { p = ptr; _off = offset; }
void set(const IpPtr &ptr)
{
if (ptr && ptr->proto() == IP_PROTO_UDP)
set(ptr.p, ptr.pstart());
else
set(0, 0);
}
void set(const Ip6Ptr &ptr)
{
if (ptr && ptr->proto() == IP_PROTO_UDP)
set(ptr.p, ptr.pstart());
else
set(0, 0);
}
public:
UdpPtr() : p(0), _off(0) {}
UdpPtr(const IpPtr &ptr) : p(0), _off(0) { set(ptr); }
UdpPtr(const Ip6Ptr &ptr) : p(0), _off(0) { set(ptr); }
UdpPtr(const UdpPtr &ptr) : p(ptr.p), _off(ptr._off) {}
UdpHdr *get() { return (UdpHdr *)(p->data + _off); }
UdpHdr *operator->() { return get(); }
UdpHdr &operator*() { return *get(); }
const UdpHdr *get() const { return (const UdpHdr *)(p->data + _off); }
const UdpHdr *operator->() const { return get(); }
const UdpHdr &operator*() const { return *get(); }
const UdpPtr &operator=(const IpPtr &i) { set(i); return *this; }
const UdpPtr &operator=(const UdpPtr &t)
{ set(t.p, t._off); return *this; }
EthPacketPtr packet() const { return p; }
bool operator!() const { return !p; }
operator bool() const { return (p != nullptr); }
int off() const { return _off; }
int pstart() const { return off() + get()->size(); }
};
uint16_t __tu_cksum6(const Ip6Ptr &ip6);
uint16_t __tu_cksum(const IpPtr &ip);
uint16_t cksum(const UdpPtr &ptr);
int hsplit(const EthPacketPtr &ptr);
#endif // __BASE_INET_HH__
/*
* ip.h
*
* Internet Protocol (RFC 791).
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* $Id: ip.h,v 1.23 2003/03/16 17:39:17 dugsong Exp $
*/
#ifndef DNET_IP_H
#define DNET_IP_H
#define IP_ADDR_LEN 4 /* IP address length */
#define IP_ADDR_BITS 32 /* IP address bits */
#define IP_HDR_LEN 20 /* base IP header length */
#define IP_OPT_LEN 2 /* base IP option length */
#define IP_OPT_LEN_MAX 40
#define IP_HDR_LEN_MAX (IP_HDR_LEN + IP_OPT_LEN_MAX)
#define IP_LEN_MAX 65535
#define IP_LEN_MIN IP_HDR_LEN
typedef uint32_t ip_addr_t;
#ifndef __GNUC__
# define __attribute__(x)
# pragma pack(1)
#endif
/*
* IP header, without options
*/
struct ip_hdr {
#if DNET_BYTESEX == DNET_BIG_ENDIAN
uint8_t ip_v:4, /* version */
ip_hl:4; /* header length (incl any options) */
#elif DNET_BYTESEX == DNET_LIL_ENDIAN
uint8_t ip_hl:4,
ip_v:4;
#else
# error "need to include <dnet.h>"
#endif
uint8_t ip_tos; /* type of service */
uint16_t ip_len; /* total length (incl header) */
uint16_t ip_id; /* identification */
uint16_t ip_off; /* fragment offset and flags */
uint8_t ip_ttl; /* time to live */
uint8_t ip_p; /* protocol */
uint16_t ip_sum; /* checksum */
ip_addr_t ip_src; /* source address */
ip_addr_t ip_dst; /* destination address */
} __attribute__((packed));
/*
* Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
*/
#define IP_TOS_DEFAULT 0x00 /* default */
#define IP_TOS_LOWDELAY 0x10 /* low delay */
#define IP_TOS_THROUGHPUT 0x08 /* high throughput */
#define IP_TOS_RELIABILITY 0x04 /* high reliability */
#define IP_TOS_LOWCOST 0x02 /* low monetary cost - XXX */
#define IP_TOS_ECT 0x02 /* ECN-capable transport */
#define IP_TOS_CE 0x01 /* congestion experienced */
/*
* IP precedence (high 3 bits of ip_tos), hopefully unused
*/
#define IP_TOS_PREC_ROUTINE 0x00
#define IP_TOS_PREC_PRIORITY 0x20
#define IP_TOS_PREC_IMMEDIATE 0x40
#define IP_TOS_PREC_FLASH 0x60
#define IP_TOS_PREC_FLASHOVERRIDE 0x80
#define IP_TOS_PREC_CRITIC_ECP 0xa0
#define IP_TOS_PREC_INTERNETCONTROL 0xc0
#define IP_TOS_PREC_NETCONTROL 0xe0
/*
* Fragmentation flags (ip_off)
*/
#define IP_RF 0x8000 /* reserved */
#define IP_DF 0x4000 /* don't fragment */
#define IP_MF 0x2000 /* more fragments (not last frag) */
#define IP_OFFMASK 0x1fff /* mask for fragment offset */
/*
* Time-to-live (ip_ttl), seconds
*/
#define IP_TTL_DEFAULT 64 /* default ttl, RFC 1122, RFC 1340 */
#define IP_TTL_MAX 255 /* maximum ttl */
/*
* Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers
*/
#define IP_PROTO_IP 0 /* dummy for IP */
#define IP_PROTO_HOPOPTS IP_PROTO_IP /* IPv6 hop-by-hop options */
#define IP_PROTO_ICMP 1 /* ICMP */
#define IP_PROTO_IGMP 2 /* IGMP */
#define IP_PROTO_GGP 3 /* gateway-gateway protocol */
#define IP_PROTO_IPIP 4 /* IP in IP */
#define IP_PROTO_ST 5 /* ST datagram mode */
#define IP_PROTO_TCP 6 /* TCP */
#define IP_PROTO_CBT 7 /* CBT */
#define IP_PROTO_EGP 8 /* exterior gateway protocol */
#define IP_PROTO_IGP 9 /* interior gateway protocol */
#define IP_PROTO_BBNRCC 10 /* BBN RCC monitoring */
#define IP_PROTO_NVP 11 /* Network Voice Protocol */
#define IP_PROTO_PUP 12 /* PARC universal packet */
#define IP_PROTO_ARGUS 13 /* ARGUS */
#define IP_PROTO_EMCON 14 /* EMCON */
#define IP_PROTO_XNET 15 /* Cross Net Debugger */
#define IP_PROTO_CHAOS 16 /* Chaos */
#define IP_PROTO_UDP 17 /* UDP */
#define IP_PROTO_MUX 18 /* multiplexing */
#define IP_PROTO_DCNMEAS 19 /* DCN measurement */
#define IP_PROTO_HMP 20 /* Host Monitoring Protocol */
#define IP_PROTO_PRM 21 /* Packet Radio Measurement */
#define IP_PROTO_IDP 22 /* Xerox NS IDP */
#define IP_PROTO_TRUNK1 23 /* Trunk-1 */
#define IP_PROTO_TRUNK2 24 /* Trunk-2 */
#define IP_PROTO_LEAF1 25 /* Leaf-1 */
#define IP_PROTO_LEAF2 26 /* Leaf-2 */
#define IP_PROTO_RDP 27 /* "Reliable Datagram" proto */
#define IP_PROTO_IRTP 28 /* Inet Reliable Transaction */
#define IP_PROTO_TP 29 /* ISO TP class 4 */
#define IP_PROTO_NETBLT 30 /* Bulk Data Transfer */
#define IP_PROTO_MFPNSP 31 /* MFE Network Services */
#define IP_PROTO_MERITINP 32 /* Merit Internodal Protocol */
#define IP_PROTO_SEP 33 /* Sequential Exchange proto */
#define IP_PROTO_3PC 34 /* Third Party Connect proto */
#define IP_PROTO_IDPR 35 /* Interdomain Policy Route */
#define IP_PROTO_XTP 36 /* Xpress Transfer Protocol */
#define IP_PROTO_DDP 37 /* Datagram Delivery Proto */
#define IP_PROTO_CMTP 38 /* IDPR Ctrl Message Trans */
#define IP_PROTO_TPPP 39 /* TP++ Transport Protocol */
#define IP_PROTO_IL 40 /* IL Transport Protocol */
#define IP_PROTO_IPV6 41 /* IPv6 */
#define IP_PROTO_SDRP 42 /* Source Demand Routing */
#define IP_PROTO_ROUTING 43 /* IPv6 routing header */
#define IP_PROTO_FRAGMENT 44 /* IPv6 fragmentation header */
#define IP_PROTO_RSVP 46 /* Reservation protocol */
#define IP_PROTO_GRE 47 /* General Routing Encap */
#define IP_PROTO_MHRP 48 /* Mobile Host Routing */
#define IP_PROTO_ENA 49 /* ENA */
#define IP_PROTO_ESP 50 /* Encap Security Payload */
#define IP_PROTO_AH 51 /* Authentication Header */
#define IP_PROTO_INLSP 52 /* Integated Net Layer Sec */
#define IP_PROTO_SWIPE 53 /* SWIPE */
#define IP_PROTO_NARP 54 /* NBMA Address Resolution */
#define IP_PROTO_MOBILE 55 /* Mobile IP, RFC 2004 */
#define IP_PROTO_TLSP 56 /* Transport Layer Security */
#define IP_PROTO_SKIP 57 /* SKIP */
#define IP_PROTO_ICMPV6 58 /* ICMP for IPv6 */
#define IP_PROTO_NONE 59 /* IPv6 no next header */
#define IP_PROTO_DSTOPTS 60 /* IPv6 destination options */
#define IP_PROTO_ANYHOST 61 /* any host internal proto */
#define IP_PROTO_CFTP 62 /* CFTP */
#define IP_PROTO_ANYNET 63 /* any local network */
#define IP_PROTO_EXPAK 64 /* SATNET and Backroom EXPAK */
#define IP_PROTO_KRYPTOLAN 65 /* Kryptolan */
#define IP_PROTO_RVD 66 /* MIT Remote Virtual Disk */
#define IP_PROTO_IPPC 67 /* Inet Pluribus Packet Core */
#define IP_PROTO_DISTFS 68 /* any distributed fs */
#define IP_PROTO_SATMON 69 /* SATNET Monitoring */
#define IP_PROTO_VISA 70 /* VISA Protocol */
#define IP_PROTO_IPCV 71 /* Inet Packet Core Utility */
#define IP_PROTO_CPNX 72 /* Comp Proto Net Executive */
#define IP_PROTO_CPHB 73 /* Comp Protocol Heart Beat */
#define IP_PROTO_WSN 74 /* Wang Span Network */
#define IP_PROTO_PVP 75 /* Packet Video Protocol */
#define IP_PROTO_BRSATMON 76 /* Backroom SATNET Monitor */
#define IP_PROTO_SUNND 77 /* SUN ND Protocol */
#define IP_PROTO_WBMON 78 /* WIDEBAND Monitoring */
#define IP_PROTO_WBEXPAK 79 /* WIDEBAND EXPAK */
#define IP_PROTO_EON 80 /* ISO CNLP */
#define IP_PROTO_VMTP 81 /* Versatile Msg Transport*/
#define IP_PROTO_SVMTP 82 /* Secure VMTP */
#define IP_PROTO_VINES 83 /* VINES */
#define IP_PROTO_TTP 84 /* TTP */
#define IP_PROTO_NSFIGP 85 /* NSFNET-IGP */
#define IP_PROTO_DGP 86 /* Dissimilar Gateway Proto */
#define IP_PROTO_TCF 87 /* TCF */
#define IP_PROTO_EIGRP 88 /* EIGRP */
#define IP_PROTO_OSPF 89 /* Open Shortest Path First */
#define IP_PROTO_SPRITERPC 90 /* Sprite RPC Protocol */
#define IP_PROTO_LARP 91 /* Locus Address Resolution */
#define IP_PROTO_MTP 92 /* Multicast Transport Proto */
#define IP_PROTO_AX25 93 /* AX.25 Frames */
#define IP_PROTO_IPIPENCAP 94 /* yet-another IP encap */
#define IP_PROTO_MICP 95 /* Mobile Internet Ctrl */
#define IP_PROTO_SCCSP 96 /* Semaphore Comm Sec Proto */
#define IP_PROTO_ETHERIP 97 /* Ethernet in IPv4 */
#define IP_PROTO_ENCAP 98 /* encapsulation header */
#define IP_PROTO_ANYENC 99 /* private encryption scheme */
#define IP_PROTO_GMTP 100 /* GMTP */
#define IP_PROTO_IFMP 101 /* Ipsilon Flow Mgmt Proto */
#define IP_PROTO_PNNI 102 /* PNNI over IP */
#define IP_PROTO_PIM 103 /* Protocol Indep Multicast */
#define IP_PROTO_ARIS 104 /* ARIS */
#define IP_PROTO_SCPS 105 /* SCPS */
#define IP_PROTO_QNX 106 /* QNX */
#define IP_PROTO_AN 107 /* Active Networks */
#define IP_PROTO_IPCOMP 108 /* IP Payload Compression */
#define IP_PROTO_SNP 109 /* Sitara Networks Protocol */
#define IP_PROTO_COMPAQPEER 110 /* Compaq Peer Protocol */
#define IP_PROTO_IPXIP 111 /* IPX in IP */
#define IP_PROTO_VRRP 112 /* Virtual Router Redundancy */
#define IP_PROTO_PGM 113 /* PGM Reliable Transport */
#define IP_PROTO_ANY0HOP 114 /* 0-hop protocol */
#define IP_PROTO_L2TP 115 /* Layer 2 Tunneling Proto */
#define IP_PROTO_DDX 116 /* D-II Data Exchange (DDX) */
#define IP_PROTO_IATP 117 /* Interactive Agent Xfer */
#define IP_PROTO_STP 118 /* Schedule Transfer Proto */
#define IP_PROTO_SRP 119 /* SpectraLink Radio Proto */
#define IP_PROTO_UTI 120 /* UTI */
#define IP_PROTO_SMP 121 /* Simple Message Protocol */
#define IP_PROTO_SM 122 /* SM */
#define IP_PROTO_PTP 123 /* Performance Transparency */
#define IP_PROTO_ISIS 124 /* ISIS over IPv4 */
#define IP_PROTO_FIRE 125 /* FIRE */
#define IP_PROTO_CRTP 126 /* Combat Radio Transport */
#define IP_PROTO_CRUDP 127 /* Combat Radio UDP */
#define IP_PROTO_SSCOPMCE 128 /* SSCOPMCE */
#define IP_PROTO_IPLT 129 /* IPLT */
#define IP_PROTO_SPS 130 /* Secure Packet Shield */
#define IP_PROTO_PIPE 131 /* Private IP Encap in IP */
#define IP_PROTO_SCTP 132 /* Stream Ctrl Transmission */
#define IP_PROTO_FC 133 /* Fibre Channel */
#define IP_PROTO_RSVPIGN 134 /* RSVP-E2E-IGNORE */
#define IP_PROTO_RAW 255 /* Raw IP packets */
#define IP_PROTO_RESERVED IP_PROTO_RAW /* Reserved */
#define IP_PROTO_MAX 255
/*
* Option types (opt_type) - http://www.iana.org/assignments/ip-parameters
*/
#define IP_OPT_CONTROL 0x00 /* control */
#define IP_OPT_DEBMEAS 0x40 /* debugging & measurement */
#define IP_OPT_COPY 0x80 /* copy into all fragments */
#define IP_OPT_RESERVED1 0x20
#define IP_OPT_RESERVED2 0x60
#define IP_OPT_EOL 0 /* end of option list */
#define IP_OPT_NOP 1 /* no operation */
#define IP_OPT_SEC (2|IP_OPT_COPY) /* DoD basic security */
#define IP_OPT_LSRR (3|IP_OPT_COPY) /* loose source route */
#define IP_OPT_TS (4|IP_OPT_DEBMEAS) /* timestamp */
#define IP_OPT_ESEC (5|IP_OPT_COPY) /* DoD extended security */
#define IP_OPT_CIPSO (6|IP_OPT_COPY) /* commercial security */
#define IP_OPT_RR 7 /* record route */
#define IP_OPT_SATID (8|IP_OPT_COPY) /* stream ID (obsolete) */
#define IP_OPT_SSRR (9|IP_OPT_COPY) /* strict source route */
#define IP_OPT_ZSU 10 /* experimental measurement */
#define IP_OPT_MTUP 11 /* MTU probe */
#define IP_OPT_MTUR 12 /* MTU reply */
#define IP_OPT_FINN (13|IP_OPT_COPY|IP_OPT_DEBMEAS) /* exp flow control */
#define IP_OPT_VISA (14|IP_OPT_COPY) /* exp access control */
#define IP_OPT_ENCODE 15 /* ??? */
#define IP_OPT_IMITD (16|IP_OPT_COPY) /* IMI traffic descriptor */
#define IP_OPT_EIP (17|IP_OPT_COPY) /* extended IP, RFC 1385 */
#define IP_OPT_TR (18|IP_OPT_DEBMEAS) /* traceroute */
#define IP_OPT_ADDEXT (19|IP_OPT_COPY) /* IPv7 ext addr, RFC 1475 */
#define IP_OPT_RTRALT (20|IP_OPT_COPY) /* router alert, RFC 2113 */
#define IP_OPT_SDB (21|IP_OPT_COPY) /* directed bcast, RFC 1770 */
#define IP_OPT_NSAPA (22|IP_OPT_COPY) /* NSAP addresses */
#define IP_OPT_DPS (23|IP_OPT_COPY) /* dynamic packet state */
#define IP_OPT_UMP (24|IP_OPT_COPY) /* upstream multicast */
#define IP_OPT_MAX 25
#define IP_OPT_COPIED(o) ((o) & 0x80)
#define IP_OPT_CLASS(o) ((o) & 0x60)
#define IP_OPT_NUMBER(o) ((o) & 0x1f)
#define IP_OPT_TYPEONLY(o) ((o) == IP_OPT_EOL || (o) == IP_OPT_NOP)
/*
* Security option data - RFC 791, 3.1
*/
struct ip_opt_data_sec {
uint16_t s; /* security */
uint16_t c; /* compartments */
uint16_t h; /* handling restrictions */
uint8_t tcc[3]; /* transmission control code */
} __attribute__((__packed__));
#define IP_OPT_SEC_UNCLASS 0x0000 /* unclassified */
#define IP_OPT_SEC_CONFID 0xf135 /* confidential */
#define IP_OPT_SEC_EFTO 0x789a /* EFTO */
#define IP_OPT_SEC_MMMM 0xbc4d /* MMMM */
#define IP_OPT_SEC_PROG 0x5e26 /* PROG */
#define IP_OPT_SEC_RESTR 0xaf13 /* restricted */
#define IP_OPT_SEC_SECRET 0xd788 /* secret */
#define IP_OPT_SEC_TOPSECRET 0x6bc5 /* top secret */
/*
* {Loose Source, Record, Strict Source} Route option data - RFC 791, 3.1
*/
struct ip_opt_data_rr {
uint8_t ptr; /* from start of option, >= 4 */
uint32_t iplist __flexarr; /* list of IP addresses */
} __attribute__((__packed__));
/*
* Timestamp option data - RFC 791, 3.1
*/
struct ip_opt_data_ts {
uint8_t ptr; /* from start of option, >= 5 */
#if DNET_BYTESEX == DNET_BIG_ENDIAN
uint8_t oflw:4, /* number of IPs skipped */
flg:4; /* address[ / timestamp] flag */
#elif DNET_BYTESEX == DNET_LIL_ENDIAN
uint8_t flg:4,
oflw:4;
#endif
uint32_t ipts __flexarr; /* IP address [/ timestamp] pairs */
} __attribute__((__packed__));
#define IP_OPT_TS_TSONLY 0 /* timestamps only */
#define IP_OPT_TS_TSADDR 1 /* IP address / timestamp pairs */
#define IP_OPT_TS_PRESPEC 3 /* IP address / zero timestamp pairs */
/*
* Traceroute option data - RFC 1393, 2.2
*/
struct ip_opt_data_tr {
uint16_t id; /* ID number */
uint16_t ohc; /* outbound hop count */
uint16_t rhc; /* return hop count */
uint32_t origip; /* originator IP address */
} __attribute__((__packed__));
/*
* IP option (following IP header)
*/
struct ip_opt {
uint8_t opt_type; /* option type */
uint8_t opt_len; /* option length >= IP_OPT_LEN */
union ip_opt_data {
struct ip_opt_data_sec sec; /* IP_OPT_SEC */
struct ip_opt_data_rr rr; /* IP_OPT_{L,S}RR */
struct ip_opt_data_ts ts; /* IP_OPT_TS */
uint16_t satid; /* IP_OPT_SATID */
uint16_t mtu; /* IP_OPT_MTU{P,R} */
struct ip_opt_data_tr tr; /* IP_OPT_TR */
uint32_t addext[2]; /* IP_OPT_ADDEXT */
uint16_t rtralt; /* IP_OPT_RTRALT */
uint32_t sdb[9]; /* IP_OPT_SDB */
uint8_t data8[IP_OPT_LEN_MAX - IP_OPT_LEN];
} opt_data;
} __attribute__((__packed__));
#ifndef __GNUC__
# pragma pack()
#endif
/*
* Classful addressing
*/
#define IP_CLASSA(i) (((uint32_t)(i) & htonl(0x80000000)) == \
htonl(0x00000000))
#define IP_CLASSA_NET (htonl(0xff000000))
#define IP_CLASSA_NSHIFT 24
#define IP_CLASSA_HOST (htonl(0x00ffffff))
#define IP_CLASSA_MAX 128
#define IP_CLASSB(i) (((uint32_t)(i) & htonl(0xc0000000)) == \
htonl(0x80000000))
#define IP_CLASSB_NET (htonl(0xffff0000))
#define IP_CLASSB_NSHIFT 16
#define IP_CLASSB_HOST (htonl(0x0000ffff))
#define IP_CLASSB_MAX 65536
#define IP_CLASSC(i) (((uint32_t)(i) & htonl(0xe0000000)) == \
htonl(0xc0000000))
#define IP_CLASSC_NET (htonl(0xffffff00))
#define IP_CLASSC_NSHIFT 8
#define IP_CLASSC_HOST (htonl(0x000000ff))
#define IP_CLASSD(i) (((uint32_t)(i) & htonl(0xf0000000)) == \
htonl(0xe0000000))
/* These ones aren't really net and host fields, but routing needn't know. */
#define IP_CLASSD_NET (htonl(0xf0000000))
#define IP_CLASSD_NSHIFT 28
#define IP_CLASSD_HOST (htonl(0x0fffffff))
#define IP_MULTICAST(i) IP_CLASSD(i)
#define IP_EXPERIMENTAL(i) (((uint32_t)(i) & htonl(0xf0000000)) == \
htonl(0xf0000000))
#define IP_BADCLASS(i) (((uint32_t)(i) & htonl(0xf0000000)) == \
htonl(0xf0000000))
#define IP_LOCAL_GROUP(i) (((uint32_t)(i) & htonl(0xffffff00)) == \
htonl(0xe0000000))
/*
* Reserved addresses
*/
#define IP_ADDR_ANY (htonl(0x00000000)) /* 0.0.0.0 */
#define IP_ADDR_BROADCAST (htonl(0xffffffff)) /* 255.255.255.255 */
#define IP_ADDR_LOOPBACK (htonl(0x7f000001)) /* 127.0.0.1 */
#define IP_ADDR_MCAST_ALL (htonl(0xe0000001)) /* 224.0.0.1 */
#define IP_ADDR_MCAST_LOCAL (htonl(0xe00000ff)) /* 224.0.0.225 */
#define ip_pack_hdr(hdr, tos, len, id, off, ttl, p, src, dst) do { \
struct ip_hdr *ip_pack_p = (struct ip_hdr *)(hdr); \
ip_pack_p->ip_v = 4; ip_pack_p->ip_hl = 5; \
ip_pack_p->ip_tos = tos; ip_pack_p->ip_len = htons(len); \
ip_pack_p->ip_id = htons(id); ip_pack_p->ip_off = htons(off); \
ip_pack_p->ip_ttl = ttl; ip_pack_p->ip_p = p; \
ip_pack_p->ip_src = src; ip_pack_p->ip_dst = dst; \
} while (0)
typedef struct ip_handle ip_t;
__BEGIN_DECLS
ip_t *ip_open(void);
size_t ip_send(ip_t *i, const void *buf, size_t len);
ip_t *ip_close(ip_t *i);
char *ip_ntop(const ip_addr_t *ip, char *dst, size_t len);
int ip_pton(const char *src, ip_addr_t *dst);
char *ip_ntoa(const ip_addr_t *ip);
#define ip_aton ip_pton
size_t ip_add_option(void *buf, size_t len,
int proto, const void *optbuf, size_t optlen);
void ip_checksum(void *buf, size_t len);
inline int
ip_cksum_add(const void *buf, size_t len, int cksum)
{
uint16_t *sp = (uint16_t *)buf;
int sn;
sn = len / 2;
do {
cksum += *sp++;
} while (--sn > 0);
if (len & 1)
cksum += htons(*(u_char *)sp << 8);
return (cksum);
}
inline uint16_t
ip_cksum_carry(int x)
{
x = (x >> 16) + (x & 0xffff);
return ~(x + (x >> 16)) & 0xffff;
}
__END_DECLS
#endif /* DNET_IP_H */
/*
* ip6.h
*
* Internet Protocol, Version 6 (RFC 2460).
*
* Copyright (c) 2002 Dug Song <dugsong@monkey.org>
*
* $Id: ip6.h,v 1.6 2004/02/23 10:01:15 dugsong Exp $
*/
#ifndef DNET_IP6_H
#define DNET_IP6_H
#define IP6_ADDR_LEN 16
#define IP6_ADDR_BITS 128
#define IP6_HDR_LEN 40 /* IPv6 header length */
#define IP6_LEN_MIN IP6_HDR_LEN
#define IP6_LEN_MAX 65535 /* non-jumbo payload */
#define IP6_MTU_MIN 1280 /* minimum MTU (1024 + 256) */
typedef struct ip6_addr {
uint8_t data[IP6_ADDR_LEN];
} ip6_addr_t;
#ifndef __GNUC__
# define __attribute__(x)
# pragma pack(1)
#endif
/*
* IPv6 header
*/
struct ip6_hdr {
union {
struct ip6_hdr_ctl {
uint32_t ip6_un1_flow; /* 20 bits of flow ID */
uint16_t ip6_un1_plen; /* payload length */
uint8_t ip6_un1_nxt; /* next header */
uint8_t ip6_un1_hlim; /* hop limit */
} ip6_un1;
uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */
} ip6_ctlun;
ip6_addr_t ip6_src;
ip6_addr_t ip6_dst;
} __attribute__((__packed__));
#define ip6_vfc ip6_ctlun.ip6_un2_vfc
#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow
#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt /* IP_PROTO_* */
#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim
#define IP6_VERSION 0x60
#define IP6_VERSION_MASK 0xf0 /* ip6_vfc version */
#if DNET_BYTESEX == DNET_BIG_ENDIAN
#define IP6_FLOWINFO_MASK 0x0fffffff /* ip6_flow info (28 bits) */
#define IP6_FLOWLABEL_MASK 0x000fffff /* ip6_flow label (20 bits) */
#elif DNET_BYTESEX == DNET_LIL_ENDIAN
#define IP6_FLOWINFO_MASK 0xffffff0f /* ip6_flow info (28 bits) */
#define IP6_FLOWLABEL_MASK 0xffff0f00 /* ip6_flow label (20 bits) */
#endif
/*
* Hop limit (ip6_hlim)
*/
#define IP6_HLIM_DEFAULT 64
#define IP6_HLIM_MAX 255
/*
* Preferred extension header order from RFC 2460, 4.1:
*
* IP_PROTO_IPV6, IP_PROTO_HOPOPTS, IP_PROTO_DSTOPTS, IP_PROTO_ROUTING,
* IP_PROTO_FRAGMENT, IP_PROTO_AH, IP_PROTO_ESP, IP_PROTO_DSTOPTS, IP_PROTO_*
*/
/*
* Routing header data (IP_PROTO_ROUTING)
*/
struct ip6_ext_data_routing {
uint8_t type; /* routing type */
uint8_t segleft; /* segments left */
/* followed by routing type specific data */
} __attribute__((__packed__));
struct ip6_ext_data_routing0 {
uint8_t type; /* always zero */
uint8_t segleft; /* segments left */
uint8_t reserved; /* reserved field */
uint8_t slmap[3]; /* strict/loose bit map */
ip6_addr_t addr[1]; /* up to 23 addresses */
} __attribute__((__packed__));
/*
* Fragment header data (IP_PROTO_FRAGMENT)
*/
struct ip6_ext_data_fragment {
uint16_t offlg; /* offset, reserved, and flag */
uint32_t ident; /* identification */
} __attribute__((__packed__));
/*
* Fragmentation offset, reserved, and flags (offlg)
*/
#if DNET_BYTESEX == DNET_BIG_ENDIAN
#define IP6_OFF_MASK 0xfff8 /* mask out offset from offlg */
#define IP6_RESERVED_MASK 0x0006 /* reserved bits in offlg */
#define IP6_MORE_FRAG 0x0001 /* more-fragments flag */
#elif DNET_BYTESEX == DNET_LIL_ENDIAN
#define IP6_OFF_MASK 0xf8ff /* mask out offset from offlg */
#define IP6_RESERVED_MASK 0x0600 /* reserved bits in offlg */
#define IP6_MORE_FRAG 0x0100 /* more-fragments flag */
#endif
/*
* Option types, for IP_PROTO_HOPOPTS, IP_PROTO_DSTOPTS headers
*/
#define IP6_OPT_PAD1 0x00 /* 00 0 00000 */
#define IP6_OPT_PADN 0x01 /* 00 0 00001 */
#define IP6_OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */
#define IP6_OPT_JUMBO_LEN 6
#define IP6_OPT_RTALERT 0x05 /* 00 0 00101 */
#define IP6_OPT_RTALERT_LEN 4
#define IP6_OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */
#define IP6_OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */
#define IP6_OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */
#define IP6_OPT_LEN_MIN 2
#define IP6_OPT_TYPE(o) ((o) & 0xC0) /* high 2 bits of opt_type */
#define IP6_OPT_TYPE_SKIP 0x00 /* continue processing on failure */
#define IP6_OPT_TYPE_DISCARD 0x40 /* discard packet on failure */
#define IP6_OPT_TYPE_FORCEICMP 0x80 /* discard and send ICMP on failure */
#define IP6_OPT_TYPE_ICMP 0xC0 /* ...only if non-multicast dst */
#define IP6_OPT_MUTABLE 0x20 /* option data may change en route */
/*
* Extension header (chained via {ip6,ext}_nxt, following IPv6 header)
*/
struct ip6_ext_hdr {
uint8_t ext_nxt; /* next header */
uint8_t ext_len; /* following length in units of 8 octets */
union {
struct ip6_ext_data_routing routing;
struct ip6_ext_data_fragment fragment;
} ext_data;
} __attribute__((__packed__));
#ifndef __GNUC__
# pragma pack()
#endif
/*
* Reserved addresses
*/
#define IP6_ADDR_UNSPEC \
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
#define IP6_ADDR_LOOPBACK \
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
#define ip6_pack_hdr(hdr, fc, fl, plen, nxt, hlim, src, dst) do { \
struct ip6_hdr *ip6 = (struct ip6_hdr *)(hdr); \
ip6->ip6_flow = htonl(((uint32_t)(fc) << 28) & \
(IP6_FLOWLABEL_MASK | (fl))); \
ip6->ip6_vfc = (IP6_VERSION | ((fc) >> 4)); \
ip6->ip6_plen = htons((plen)); \
ip6->ip6_nxt = (nxt); ip6->ip6_hlim = (hlim); \
memmove(&ip6->ip6_src, &(src), IP6_ADDR_LEN); \
memmove(&ip6->ip6_dst, &(dst), IP6_ADDR_LEN); \
} while (0);
__BEGIN_DECLS
char *ip6_ntop(const ip6_addr_t *ip6, char *dst, size_t size);
int ip6_pton(const char *src, ip6_addr_t *dst);
char *ip6_ntoa(const ip6_addr_t *ip6);
#define ip6_aton ip6_pton
void ip6_checksum(void *buf, size_t len);
__END_DECLS
#endif /* DNET_IP6_H */
/*
* Copyright (c) 2004-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "sims/nic/e1000_gem5/gem5/pktfifo.h"
using namespace std;
bool
PacketFifo::copyout(void *dest, unsigned offset, unsigned len)
{
char *data = (char *)dest;
if (offset + len >= size())
return false;
iterator i = fifo.begin();
iterator end = fifo.end();
while (len > 0) {
EthPacketPtr &pkt = i->packet;
while (offset >= pkt->length) {
offset -= pkt->length;
++i;
}
if (i == end)
panic("invalid fifo");
unsigned size = min(pkt->length - offset, len);
memcpy(data, pkt->data, size);
offset = 0;
len -= size;
data += size;
++i;
}
return true;
}
\ No newline at end of file
/*
* Copyright (c) 2004-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DEV_NET_PKTFIFO_HH__
#define __DEV_NET_PKTFIFO_HH__
#include <iosfwd>
#include <list>
#include <string>
#include "sims/nic/e1000_gem5/support.h"
struct EthPacket;
struct PacketFifoEntry
{
EthPacketPtr packet;
uint64_t number;
unsigned slack;
int priv;
PacketFifoEntry()
{
clear();
}
PacketFifoEntry(const PacketFifoEntry &s)
: packet(s.packet), number(s.number), slack(s.slack), priv(s.priv)
{
}
PacketFifoEntry(EthPacketPtr p, uint64_t n)
: packet(p), number(n), slack(0), priv(-1)
{
}
void clear()
{
packet = NULL;
number = 0;
slack = 0;
priv = -1;
}
};
class PacketFifo
{
public:
typedef std::list<PacketFifoEntry> fifo_list;
typedef fifo_list::iterator iterator;
typedef fifo_list::const_iterator const_iterator;
protected:
std::list<PacketFifoEntry> fifo;
uint64_t _counter;
unsigned _maxsize;
unsigned _size;
unsigned _reserved;
public:
explicit PacketFifo(int max)
: _counter(0), _maxsize(max), _size(0), _reserved(0) {}
virtual ~PacketFifo() {}
unsigned packets() const { return fifo.size(); }
unsigned maxsize() const { return _maxsize; }
unsigned size() const { return _size; }
unsigned reserved() const { return _reserved; }
unsigned avail() const { return _maxsize - _size - _reserved; }
bool empty() const { return size() <= 0; }
bool full() const { return avail() <= 0; }
unsigned
reserve(unsigned len = 0)
{
assert(avail() >= len);
_reserved += len;
return _reserved;
}
iterator begin() { return fifo.begin(); }
iterator end() { return fifo.end(); }
const_iterator begin() const { return fifo.begin(); }
const_iterator end() const { return fifo.end(); }
EthPacketPtr front() { return fifo.begin()->packet; }
bool push(EthPacketPtr ptr)
{
assert(ptr->length);
assert(_reserved <= ptr->length);
if (avail() < ptr->length - _reserved)
return false;
_size += ptr->length;
PacketFifoEntry entry;
entry.packet = ptr;
entry.number = _counter++;
fifo.push_back(entry);
_reserved = 0;
return true;
}
void pop()
{
if (empty())
return;
iterator entry = fifo.begin();
_size -= entry->packet->length;
_size -= entry->slack;
entry->packet = NULL;
fifo.pop_front();
}
void clear()
{
for (iterator i = begin(); i != end(); ++i)
i->clear();
fifo.clear();
_size = 0;
_reserved = 0;
}
void remove(iterator i)
{
if (i != fifo.begin()) {
iterator prev = i;
--prev;
assert(prev != fifo.end());
prev->slack += i->packet->length;
prev->slack += i->slack;
} else {
_size -= i->packet->length;
_size -= i->slack;
}
i->clear();
fifo.erase(i);
}
bool copyout(void *dest, unsigned offset, unsigned len);
int countPacketsBefore(const_iterator i) const
{
if (i == fifo.end())
return 0;
return i->number - fifo.begin()->number;
}
int countPacketsAfter(const_iterator i) const
{
auto end = fifo.end();
if (i == end)
return 0;
return (--end)->number - i->number;
}
void check() const
{
unsigned total = 0;
for (auto i = begin(); i != end(); ++i)
total += i->packet->length + i->slack;
if (total != _size)
panic("total (%d) is not == to size (%d)\n", total, _size);
}
};
#endif // __DEV_NET_PKTFIFO_HH__
/*
* tcp.h
*
* Transmission Control Protocol (RFC 793).
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* $Id: tcp.h,v 1.17 2004/02/23 10:02:11 dugsong Exp $
*/
#ifndef DNET_TCP_H
#define DNET_TCP_H
#define TCP_HDR_LEN 20 /* base TCP header length */
#define TCP_OPT_LEN 2 /* base TCP option length */
#define TCP_OPT_LEN_MAX 40
#define TCP_HDR_LEN_MAX (TCP_HDR_LEN + TCP_OPT_LEN_MAX)
#ifndef __GNUC__
# define __attribute__(x)
# pragma pack(1)
#endif
/*
* TCP header, without options
*/
struct tcp_hdr {
uint16_t th_sport; /* source port */
uint16_t th_dport; /* destination port */
uint32_t th_seq; /* sequence number */
uint32_t th_ack; /* acknowledgment number */
#if DNET_BYTESEX == DNET_BIG_ENDIAN
uint8_t th_off:4, /* data offset */
th_x2:4; /* (unused) */
#elif DNET_BYTESEX == DNET_LIL_ENDIAN
uint8_t th_x2:4,
th_off:4;
#else
# error "need to include <dnet.h>"
#endif
uint8_t th_flags; /* control flags */
uint16_t th_win; /* window */
uint16_t th_sum; /* checksum */
uint16_t th_urp; /* urgent pointer */
}__attribute__((packed));
/*
* TCP control flags (th_flags)
*/
#define TH_FIN 0x01 /* end of data */
#define TH_SYN 0x02 /* synchronize sequence numbers */
#define TH_RST 0x04 /* reset connection */
#define TH_PUSH 0x08 /* push */
#define TH_ACK 0x10 /* acknowledgment number set */
#define TH_URG 0x20 /* urgent pointer set */
#define TH_ECE 0x40 /* ECN echo, RFC 3168 */
#define TH_CWR 0x80 /* congestion window reduced */
#define TCP_PORT_MAX 65535 /* maximum port */
#define TCP_WIN_MAX 65535 /* maximum (unscaled) window */
/*
* Sequence number comparison macros
*/
#define TCP_SEQ_LT(a,b) ((int)((a)-(b)) < 0)
#define TCP_SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
#define TCP_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
#define TCP_SEQ_GEQ(a,b) ((int)((a)-(b)) >= 0)
/*
* TCP FSM states
*/
#define TCP_STATE_CLOSED 0 /* closed */
#define TCP_STATE_LISTEN 1 /* listening from connection */
#define TCP_STATE_SYN_SENT 2 /* active, have sent SYN */
#define TCP_STATE_SYN_RECEIVED 3 /* have sent and received SYN */
#define TCP_STATE_ESTABLISHED 4 /* established */
#define TCP_STATE_CLOSE_WAIT 5 /* rcvd FIN, waiting for close */
#define TCP_STATE_FIN_WAIT_1 6 /* have closed, sent FIN */
#define TCP_STATE_CLOSING 7 /* closed xchd FIN, await FIN-ACK */
#define TCP_STATE_LAST_ACK 8 /* had FIN and close, await FIN-ACK */
#define TCP_STATE_FIN_WAIT_2 9 /* have closed, FIN is acked */
#define TCP_STATE_TIME_WAIT 10 /* in 2*MSL quiet wait after close */
#define TCP_STATE_MAX 11
/*
* Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
*/
#define TCP_OPT_EOL 0 /* end of option list */
#define TCP_OPT_NOP 1 /* no operation */
#define TCP_OPT_MSS 2 /* maximum segment size */
#define TCP_OPT_WSCALE 3 /* window scale factor, RFC 1072 */
#define TCP_OPT_SACKOK 4 /* SACK permitted, RFC 2018 */
#define TCP_OPT_SACK 5 /* SACK, RFC 2018 */
#define TCP_OPT_ECHO 6 /* echo (obsolete), RFC 1072 */
#define TCP_OPT_ECHOREPLY 7 /* echo reply (obsolete), RFC 1072 */
#define TCP_OPT_TIMESTAMP 8 /* timestamp, RFC 1323 */
#define TCP_OPT_POCONN 9 /* partial order conn, RFC 1693 */
#define TCP_OPT_POSVC 10 /* partial order service, RFC 1693 */
#define TCP_OPT_CC 11 /* connection count, RFC 1644 */
#define TCP_OPT_CCNEW 12 /* CC.NEW, RFC 1644 */
#define TCP_OPT_CCECHO 13 /* CC.ECHO, RFC 1644 */
#define TCP_OPT_ALTSUM 14 /* alt checksum request, RFC 1146 */
#define TCP_OPT_ALTSUMDATA 15 /* alt checksum data, RFC 1146 */
#define TCP_OPT_SKEETER 16 /* Skeeter */
#define TCP_OPT_BUBBA 17 /* Bubba */
#define TCP_OPT_TRAILSUM 18 /* trailer checksum */
#define TCP_OPT_MD5 19 /* MD5 signature, RFC 2385 */
#define TCP_OPT_SCPS 20 /* SCPS capabilities */
#define TCP_OPT_SNACK 21 /* selective negative acks */
#define TCP_OPT_REC 22 /* record boundaries */
#define TCP_OPT_CORRUPT 23 /* corruption experienced */
#define TCP_OPT_SNAP 24 /* SNAP */
#define TCP_OPT_TCPCOMP 26 /* TCP compression filter */
#define TCP_OPT_MAX 27
#define TCP_OPT_TYPEONLY(type) \
((type) == TCP_OPT_EOL || (type) == TCP_OPT_NOP)
/*
* TCP option (following TCP header)
*/
struct tcp_opt {
uint8_t opt_type; /* option type */
uint8_t opt_len; /* option length >= TCP_OPT_LEN */
union tcp_opt_data {
uint16_t mss; /* TCP_OPT_MSS */
uint8_t wscale; /* TCP_OPT_WSCALE */
uint16_t sack[19]; /* TCP_OPT_SACK */
uint32_t echo; /* TCP_OPT_ECHO{REPLY} */
uint32_t timestamp[2]; /* TCP_OPT_TIMESTAMP */
uint32_t cc; /* TCP_OPT_CC{NEW,ECHO} */
uint8_t cksum; /* TCP_OPT_ALTSUM */
uint8_t md5[16]; /* TCP_OPT_MD5 */
uint8_t data8[TCP_OPT_LEN_MAX - TCP_OPT_LEN];
} opt_data;
} __attribute__((__packed__));
#ifndef __GNUC__
# pragma pack()
#endif
#define tcp_pack_hdr(hdr, sport, dport, seq, ack, flags, win, urp) do { \
struct tcp_hdr *tcp_pack_p = (struct tcp_hdr *)(hdr); \
tcp_pack_p->th_sport = htons(sport); \
tcp_pack_p->th_dport = htons(dport); \
tcp_pack_p->th_seq = htonl(seq); \
tcp_pack_p->th_ack = htonl(ack); \
tcp_pack_p->th_x2 = 0; tcp_pack_p->th_off = 5; \
tcp_pack_p->th_flags = flags; \
tcp_pack_p->th_win = htons(win); \
tcp_pack_p->th_urp = htons(urp); \
} while (0)
#endif /* DNET_TCP_H */
/*
* udp.h
*
* User Datagram Protocol (RFC 768).
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* $Id: udp.h,v 1.8 2002/04/02 05:05:39 dugsong Exp $
*/
#ifndef DNET_UDP_H
#define DNET_UDP_H
#define UDP_HDR_LEN 8
struct udp_hdr {
uint16_t uh_sport; /* source port */
uint16_t uh_dport; /* destination port */
uint16_t uh_ulen; /* udp length (including header) */
uint16_t uh_sum; /* udp checksum */
};
#define UDP_PORT_MAX 65535
#define udp_pack_hdr(hdr, sport, dport, ulen) do { \
struct udp_hdr *udp_pack_p = (struct udp_hdr *)(hdr); \
udp_pack_p->uh_sport = htons(sport); \
udp_pack_p->uh_dport = htons(dport); \
udp_pack_p->uh_ulen = htons(ulen); \
} while (0)
#endif /* DNET_UDP_H */
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @file
* Device model for Intel's 8254x line of gigabit ethernet controllers.
* In particular an 82547 revision 2 (82547GI) MAC because it seems to have the
* fewest workarounds in the driver. It will probably work with most of the
* other MACs with slight modifications.
*/
#include "sims/nic/e1000_gem5/i8254xGBe.h"
/*
* @todo really there are multiple dma engines.. we should implement them.
*/
#include <algorithm>
#include <memory>
/*#include "base/inet.hh"
#include "base/trace.hh"
#include "debug/Drain.hh"
#include "debug/EthernetAll.hh"
#include "mem/packet.hh"
#include "mem/packet_access.hh"
#include "params/IGbE.hh"
#include "sim/stats.hh"
#include "sim/system.hh"*/
using namespace iGbReg;
IGbE::IGbE(const Params *p)
: params_(*p), rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size),
inTick(false), rxTick(false), txTick(false), txFifoTick(false),
rxDmaPacket(false),
pktOffset(0), fetchDelay(p->fetch_delay), wbDelay(p->wb_delay),
fetchCompDelay(p->fetch_comp_delay), wbCompDelay(p->wb_comp_delay),
rxWriteDelay(p->rx_write_delay), txReadDelay(p->tx_read_delay),
pioDelay(p->pio_delay),
rdtrEvent([this]{ rdtrProcess(); }, "e1000-rdtr"),
radvEvent([this]{ radvProcess(); }, "e1000-radv"),
tadvEvent([this]{ tadvProcess(); }, "e1000-tadv"),
tidvEvent([this]{ tidvProcess(); }, "e1000-tidv"),
tickEvent([this]{ tick(); }, "e1000-tick"),
interEvent([this]{ delayIntEvent(); }, "e1000-inter"),
rxDescCache(this, "RxDesc", p->rx_desc_cache_size),
txDescCache(this, "TxDesc", p->tx_desc_cache_size),
lastInterrupt(0)
{
// Initialized internal registers per Intel documentation
// All registers intialized to 0 by per register constructor
regs.ctrl.fd(1);
regs.ctrl.lrst(1);
regs.ctrl.speed(2);
regs.ctrl.frcspd(1);
regs.sts.speed(3); // Say we're 1000Mbps
regs.sts.fd(1); // full duplex
regs.sts.lu(1); // link up
regs.eecd.fwe(1);
regs.eecd.ee_type(1);
regs.imr = 0;
regs.iam = 0;
regs.rxdctl.gran(1);
regs.rxdctl.wthresh(1);
regs.fcrth(1);
regs.tdwba = 0;
regs.rlpml = 0;
regs.sw_fw_sync = 0;
regs.pba.rxa(0x30);
regs.pba.txa(0x10);
eeOpBits = 0;
eeAddrBits = 0;
eeDataBits = 0;
eeOpcode = 0;
// clear all 64 16 bit words of the eeprom
memset(&flash, 0, EEPROM_SIZE*2);
rxFifo.clear();
txFifo.clear();
}
void
IGbE::init()
{
// Set the MAC address
macAddr = runner_->GetMacAddr();
memcpy(flash, &macAddr, ETH_ADDR_LEN);
for (int x = 0; x < ETH_ADDR_LEN/2; x++)
flash[x] = htobe(flash[x]);
uint16_t csum = 0;
for (int x = 0; x < EEPROM_SIZE; x++)
csum += htobe(flash[x]);
// Magic happy checksum value
flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum));
}
IGbE::~IGbE()
{
}
// Handy macro for range-testing register access addresses
#define IN_RANGE(val, base, len) (val >= base && val < (base + len))
Tick
IGbE::read(Addr daddr, uint8_t len, void *dst)
{
uint32_t *p32 = (uint32_t *) dst;
// Only 32bit accesses allowed
assert(len == 4);
DPRINTF(Ethernet, "Read device register %#lx\n", daddr);
//
// Handle read of register here
//
switch (daddr) {
case REG_CTRL:
*p32 = regs.ctrl();
break;
case REG_STATUS:
*p32 = regs.sts();
break;
case REG_EECD:
*p32 = regs.eecd();
break;
case REG_EERD:
*p32 = regs.eerd();
break;
case REG_CTRL_EXT:
*p32 = regs.ctrl_ext();
break;
case REG_MDIC:
*p32 = regs.mdic();
break;
case REG_ICR:
DPRINTF(Ethernet, "Reading ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
*p32 = regs.icr();
if (regs.icr.int_assert() || regs.imr == 0) {
regs.icr = regs.icr() & ~mask(30);
DPRINTF(Ethernet, "Cleared ICR. ICR=%#x\n", regs.icr());
}
if (regs.ctrl_ext.iame() && regs.icr.int_assert())
regs.imr &= ~regs.iam;
chkInterrupt();
break;
case REG_EICR:
// This is only useful for MSI, but the driver reads it every time
// Just don't do anything
*p32 = 0;
break;
case REG_ITR:
*p32 = regs.itr();
break;
case REG_RCTL:
*p32 = regs.rctl();
break;
case REG_FCTTV:
*p32 = regs.fcttv();
break;
case REG_TCTL:
*p32 = regs.tctl();
break;
case REG_PBA:
*p32 = regs.pba();
break;
case REG_WUC:
case REG_WUFC:
case REG_WUS:
case REG_LEDCTL:
*p32 = 0; // We don't care, so just return 0
break;
case REG_FCRTL:
*p32 = regs.fcrtl();
break;
case REG_FCRTH:
*p32 = regs.fcrth();
break;
case REG_RDBAL:
*p32 = regs.rdba.rdbal();
break;
case REG_RDBAH:
*p32 = regs.rdba.rdbah();
break;
case REG_RDLEN:
*p32 = regs.rdlen();
break;
case REG_SRRCTL:
*p32 = regs.srrctl();
break;
case REG_RDH:
*p32 = regs.rdh();
break;
case REG_RDT:
*p32 = regs.rdt();
break;
case REG_RDTR:
*p32 = regs.rdtr();
if (regs.rdtr.fpd()) {
rxDescCache.writeback(0);
DPRINTF(EthernetIntr,
"Posting interrupt because of RDTR.FPD write\n");
postInterrupt(IT_RXT);
regs.rdtr.fpd(0);
}
break;
case REG_RXDCTL:
*p32 = regs.rxdctl();
break;
case REG_RADV:
*p32 = regs.radv();
break;
case REG_TDBAL:
*p32 = regs.tdba.tdbal();
break;
case REG_TDBAH:
*p32 = regs.tdba.tdbah();
break;
case REG_TDLEN:
*p32 = regs.tdlen();
break;
case REG_TDH:
*p32 = regs.tdh();
break;
case REG_TXDCA_CTL:
*p32 = regs.txdca_ctl();
break;
case REG_TDT:
*p32 = regs.tdt();
break;
case REG_TIDV:
*p32 = regs.tidv();
break;
case REG_TXDCTL:
*p32 = regs.txdctl();
break;
case REG_TADV:
*p32 = regs.tadv();
break;
case REG_TDWBAL:
*p32 = regs.tdwba & mask(32);
break;
case REG_TDWBAH:
*p32 = regs.tdwba >> 32;
break;
case REG_RXCSUM:
*p32 = regs.rxcsum();
break;
case REG_RLPML:
*p32 = regs.rlpml;
break;
case REG_RFCTL:
*p32 = regs.rfctl();
break;
case REG_MANC:
*p32 = regs.manc();
break;
case REG_SWSM:
*p32 = regs.swsm();
regs.swsm.smbi(1);
break;
case REG_FWSM:
*p32 = regs.fwsm();
break;
case REG_SWFWSYNC:
*p32 = regs.sw_fw_sync;
break;
default:
if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
!IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
!IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4) &&
!IN_RANGE(daddr, REG_CRCERRS, STATS_REGS_SIZE))
panic("Read request to unknown register number: %#x\n", daddr);
else
*p32 = 0;
};
return pioDelay;
}
Tick
IGbE::write(Addr daddr, uint8_t len, const void *src)
{
// Only 32bit accesses allowed
assert(len == sizeof(uint32_t));
uint32_t val = *((uint32_t *) src);
DPRINTF(Ethernet, "Wrote device register %#lx value %#x\n",
daddr, val);
//
// Handle write of register here
//
Regs::RCTL oldrctl;
Regs::TCTL oldtctl;
switch (daddr) {
case REG_CTRL:
regs.ctrl = val;
if (regs.ctrl.tfce())
warn("TX Flow control enabled, should implement\n");
if (regs.ctrl.rfce())
warn("RX Flow control enabled, should implement\n");
break;
case REG_CTRL_EXT:
regs.ctrl_ext = val;
break;
case REG_STATUS:
regs.sts = val;
break;
case REG_EECD:
int oldClk;
oldClk = regs.eecd.sk();
regs.eecd = val;
// See if this is a eeprom access and emulate accordingly
if (!oldClk && regs.eecd.sk()) {
if (eeOpBits < 8) {
eeOpcode = eeOpcode << 1 | regs.eecd.din();
eeOpBits++;
} else if (eeAddrBits < 8 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
eeAddr = eeAddr << 1 | regs.eecd.din();
eeAddrBits++;
} else if (eeDataBits < 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) {
assert(eeAddr>>1 < EEPROM_SIZE);
DPRINTF(EthernetEEPROM, "EEPROM bit read: %d word: %#X\n",
flash[eeAddr>>1] >> eeDataBits & 0x1,
flash[eeAddr>>1]);
regs.eecd.dout((flash[eeAddr>>1] >> (15-eeDataBits)) & 0x1);
eeDataBits++;
} else if (eeDataBits < 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI) {
regs.eecd.dout(0);
eeDataBits++;
} else
panic("What's going on with eeprom interface? opcode:"
" %#x:%d addr: %#x:%d, data: %d\n", (uint32_t)eeOpcode,
(uint32_t)eeOpBits, (uint32_t)eeAddr,
(uint32_t)eeAddrBits, (uint32_t)eeDataBits);
// Reset everything for the next command
if ((eeDataBits == 16 && eeOpcode == EEPROM_READ_OPCODE_SPI) ||
(eeDataBits == 8 && eeOpcode == EEPROM_RDSR_OPCODE_SPI)) {
eeOpBits = 0;
eeAddrBits = 0;
eeDataBits = 0;
eeOpcode = 0;
eeAddr = 0;
}
DPRINTF(EthernetEEPROM, "EEPROM: opcode: %#X:%d addr: %#X:%d\n",
(uint32_t)eeOpcode, (uint32_t) eeOpBits,
(uint32_t)eeAddr>>1, (uint32_t)eeAddrBits);
if (eeOpBits == 8 && !(eeOpcode == EEPROM_READ_OPCODE_SPI ||
eeOpcode == EEPROM_RDSR_OPCODE_SPI ))
panic("Unknown eeprom opcode: %#X:%d\n", (uint32_t)eeOpcode,
(uint32_t)eeOpBits);
}
// If driver requests eeprom access, immediately give it to it
regs.eecd.ee_gnt(regs.eecd.ee_req());
break;
case REG_EERD:
regs.eerd = val;
if (regs.eerd.start()) {
regs.eerd.done(1);
assert(regs.eerd.addr() < EEPROM_SIZE);
regs.eerd.data(flash[regs.eerd.addr()]);
regs.eerd.start(0);
DPRINTF(EthernetEEPROM, "EEPROM: read addr: %#X data %#x\n",
regs.eerd.addr(), regs.eerd.data());
}
break;
case REG_MDIC:
regs.mdic = val;
if (regs.mdic.i())
panic("No support for interrupt on mdic complete\n");
if (regs.mdic.phyadd() != 1)
panic("No support for reading anything but phy\n");
DPRINTF(Ethernet, "%s phy address %x\n",
regs.mdic.op() == 1 ? "Writing" : "Reading",
regs.mdic.regadd());
switch (regs.mdic.regadd()) {
case PHY_PSTATUS:
regs.mdic.data(0x796D); // link up
break;
case PHY_PID:
regs.mdic.data(params()->phy_pid);
break;
case PHY_EPID:
regs.mdic.data(params()->phy_epid);
break;
case PHY_GSTATUS:
regs.mdic.data(0x7C00);
break;
case PHY_EPSTATUS:
regs.mdic.data(0x3000);
break;
case PHY_AGC:
regs.mdic.data(0x180); // some random length
break;
default:
regs.mdic.data(0);
}
regs.mdic.r(1);
break;
case REG_ICR:
DPRINTF(Ethernet, "Writing ICR. ICR=%#x IMR=%#x IAM=%#x IAME=%d\n",
regs.icr(), regs.imr, regs.iam, regs.ctrl_ext.iame());
if (regs.ctrl_ext.iame())
regs.imr &= ~regs.iam;
regs.icr = ~bits(val,30,0) & regs.icr();
chkInterrupt();
break;
case REG_ITR:
regs.itr = val;
break;
case REG_ICS:
DPRINTF(EthernetIntr, "Posting interrupt because of ICS write\n");
postInterrupt((IntTypes)val);
break;
case REG_IMS:
regs.imr |= val;
chkInterrupt();
break;
case REG_IMC:
regs.imr &= ~val;
chkInterrupt();
break;
case REG_IAM:
regs.iam = val;
break;
case REG_RCTL:
oldrctl = regs.rctl;
regs.rctl = val;
if (regs.rctl.rst()) {
rxDescCache.reset();
DPRINTF(EthernetSM, "RXS: Got RESET!\n");
rxFifo.clear();
regs.rctl.rst(0);
}
if (regs.rctl.en())
rxTick = true;
restartClock();
break;
case REG_FCTTV:
regs.fcttv = val;
break;
case REG_TCTL:
regs.tctl = val;
oldtctl = regs.tctl;
regs.tctl = val;
if (regs.tctl.en())
txTick = true;
restartClock();
if (regs.tctl.en() && !oldtctl.en()) {
txDescCache.reset();
}
break;
case REG_PBA:
regs.pba.rxa(val);
regs.pba.txa(64 - regs.pba.rxa());
break;
case REG_WUC:
case REG_WUFC:
case REG_WUS:
case REG_LEDCTL:
case REG_FCAL:
case REG_FCAH:
case REG_FCT:
case REG_VET:
case REG_AIFS:
case REG_TIPG:
; // We don't care, so don't store anything
break;
case REG_IVAR0:
warn("Writing to IVAR0, ignoring...\n");
break;
case REG_FCRTL:
regs.fcrtl = val;
break;
case REG_FCRTH:
regs.fcrth = val;
break;
case REG_RDBAL:
regs.rdba.rdbal( val & ~mask(4));
rxDescCache.areaChanged();
break;
case REG_RDBAH:
regs.rdba.rdbah(val);
rxDescCache.areaChanged();
break;
case REG_RDLEN:
regs.rdlen = val & ~mask(7);
rxDescCache.areaChanged();
break;
case REG_SRRCTL:
regs.srrctl = val;
break;
case REG_RDH:
regs.rdh = val;
rxDescCache.areaChanged();
break;
case REG_RDT:
regs.rdt = val;
DPRINTF(EthernetSM, "RXS: RDT Updated.\n");
DPRINTF(EthernetSM, "RXS: RDT Fetching Descriptors!\n");
rxDescCache.fetchDescriptors();
break;
case REG_RDTR:
regs.rdtr = val;
break;
case REG_RADV:
regs.radv = val;
break;
case REG_RXDCTL:
regs.rxdctl = val;
break;
case REG_TDBAL:
regs.tdba.tdbal( val & ~mask(4));
txDescCache.areaChanged();
break;
case REG_TDBAH:
regs.tdba.tdbah(val);
txDescCache.areaChanged();
break;
case REG_TDLEN:
regs.tdlen = val & ~mask(7);
txDescCache.areaChanged();
break;
case REG_TDH:
regs.tdh = val;
txDescCache.areaChanged();
break;
case REG_TXDCA_CTL:
regs.txdca_ctl = val;
if (regs.txdca_ctl.enabled())
panic("No support for DCA\n");
break;
case REG_TDT:
regs.tdt = val;
DPRINTF(EthernetSM, "TXS: TX Tail pointer updated\n");
DPRINTF(EthernetSM, "TXS: TDT Fetching Descriptors!\n");
txDescCache.fetchDescriptors();
break;
case REG_TIDV:
regs.tidv = val;
break;
case REG_TXDCTL:
regs.txdctl = val;
break;
case REG_TADV:
regs.tadv = val;
break;
case REG_TDWBAL:
regs.tdwba &= ~mask(32);
regs.tdwba |= val;
txDescCache.completionWriteback(regs.tdwba & ~mask(1),
regs.tdwba & mask(1));
break;
case REG_TDWBAH:
regs.tdwba &= mask(32);
regs.tdwba |= (uint64_t)val << 32;
txDescCache.completionWriteback(regs.tdwba & ~mask(1),
regs.tdwba & mask(1));
break;
case REG_RXCSUM:
regs.rxcsum = val;
break;
case REG_RLPML:
regs.rlpml = val;
break;
case REG_RFCTL:
regs.rfctl = val;
if (regs.rfctl.exsten())
panic("Extended RX descriptors not implemented\n");
break;
case REG_MANC:
regs.manc = val;
break;
case REG_SWSM:
regs.swsm = val;
if (regs.fwsm.eep_fw_semaphore())
regs.swsm.swesmbi(0);
break;
case REG_SWFWSYNC:
regs.sw_fw_sync = val;
break;
default:
if (!IN_RANGE(daddr, REG_VFTA, VLAN_FILTER_TABLE_SIZE*4) &&
!IN_RANGE(daddr, REG_RAL, RCV_ADDRESS_TABLE_SIZE*8) &&
!IN_RANGE(daddr, REG_MTA, MULTICAST_TABLE_SIZE*4))
panic("Write request to unknown register number: %#x\n", daddr);
};
return pioDelay;
}
void
IGbE::postInterrupt(IntTypes t, bool now)
{
assert(t);
// Interrupt is already pending
if (t & regs.icr() && !now)
return;
regs.icr = regs.icr() | t;
Tick itr_interval = 1000ULL * 256 * regs.itr.interval();
DPRINTF(EthernetIntr,
"EINT: postInterrupt() curTick(): %ld itr: %d interval: %ld\n",
curTick(), regs.itr.interval(), itr_interval);
if (regs.itr.interval() == 0 || now ||
lastInterrupt + itr_interval <= curTick()) {
if (interEvent.scheduled()) {
deschedule(interEvent);
}
cpuPostInt();
} else {
Tick int_time = lastInterrupt + itr_interval;
assert(int_time > 0);
DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for tick %ld\n",
int_time);
if (!interEvent.scheduled()) {
schedule(interEvent, int_time);
}
}
}
void
IGbE::delayIntEvent()
{
cpuPostInt();
}
void
IGbE::cpuPostInt()
{
//postedInterrupts++;
if (!(regs.icr() & regs.imr)) {
DPRINTF(Ethernet, "Interrupt Masked. Not Posting\n");
return;
}
DPRINTF(Ethernet, "Posting Interrupt\n");
if (interEvent.scheduled()) {
deschedule(interEvent);
}
if (rdtrEvent.scheduled()) {
regs.icr.rxt0(1);
deschedule(rdtrEvent);
}
if (radvEvent.scheduled()) {
regs.icr.rxt0(1);
deschedule(radvEvent);
}
if (tadvEvent.scheduled()) {
regs.icr.txdw(1);
deschedule(tadvEvent);
}
if (tidvEvent.scheduled()) {
regs.icr.txdw(1);
deschedule(tidvEvent);
}
regs.icr.int_assert(1);
DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n",
regs.icr());
intrPost();
lastInterrupt = curTick();
}
void
IGbE::cpuClearInt()
{
if (regs.icr.int_assert()) {
regs.icr.int_assert(0);
DPRINTF(EthernetIntr,
"EINT: Clearing interrupt to CPU now. Vector %#x\n",
regs.icr());
intrClear();
}
}
void
IGbE::chkInterrupt()
{
DPRINTF(Ethernet, "Checking interrupts icr: %#x imr: %#x\n", regs.icr(),
regs.imr);
// Check if we need to clear the cpu interrupt
if (!(regs.icr() & regs.imr)) {
DPRINTF(Ethernet, "Mask cleaned all interrupts\n");
if (interEvent.scheduled())
deschedule(interEvent);
if (regs.icr.int_assert())
cpuClearInt();
}
DPRINTF(Ethernet, "ITR = %#X itr.interval = %#X\n",
regs.itr(), regs.itr.interval());
if (regs.icr() & regs.imr) {
if (regs.itr.interval() == 0) {
cpuPostInt();
} else {
DPRINTF(Ethernet,
"Possibly scheduling interrupt because of imr write\n");
if (!interEvent.scheduled()) {
Tick t = curTick() + 1000 * 256 * regs.itr.interval();
DPRINTF(Ethernet, "Scheduling for %ld\n", t);
schedule(interEvent, t);
}
}
}
}
///////////////////////////// IGbE::DescCache //////////////////////////////
template<class T>
IGbE::DescCache<T>::DescCache(IGbE *i, const std::string n, int s)
: igbe(i), _name(n), cachePnt(0), size(s), curFetching(0),
wbOut(0), moreToWb(false), wbAlignment(0), pktPtr(NULL),
wbDelayEvent([this]{ writeback1(); }, n),
fetchDelayEvent([this]{ fetchDescriptors1(); }, n),
fetchEvent([this]{ fetchComplete(); }, n),
wbEvent([this]{ wbComplete(); }, n)
{
fetchBuf = new T[size];
wbBuf = new T[size];
}
template<class T>
IGbE::DescCache<T>::~DescCache()
{
reset();
delete[] fetchBuf;
delete[] wbBuf;
}
template<class T>
void
IGbE::DescCache<T>::areaChanged()
{
if (usedCache.size() > 0 || curFetching || wbOut)
panic("Descriptor Address, Length or Head changed. Bad\n");
reset();
}
template<class T>
void
IGbE::DescCache<T>::writeback(Addr aMask)
{
int curHead = descHead();
int max_to_wb = usedCache.size();
// Check if this writeback is less restrictive that the previous
// and if so setup another one immediately following it
if (wbOut) {
if (aMask < wbAlignment) {
moreToWb = true;
wbAlignment = aMask;
}
DPRINTF(EthernetDesc,
"[%s] Writing back already in process, returning\n", _name.c_str());
return;
}
moreToWb = false;
wbAlignment = aMask;
DPRINTF(EthernetDesc, "[%s] Writing back descriptors head: %d tail: "
"%ld len: %ld cachePnt: %d max_to_wb: %d descleft: %d\n", _name.c_str(),
curHead, descTail(), descLen(), cachePnt, max_to_wb,
descLeft());
if (max_to_wb + curHead >= descLen()) {
max_to_wb = descLen() - curHead;
moreToWb = true;
// this is by definition aligned correctly
} else if (wbAlignment != 0) {
// align the wb point to the mask
max_to_wb = max_to_wb & ~wbAlignment;
}
DPRINTF(EthernetDesc, "[%s] Writing back %d descriptors\n",
_name.c_str(), max_to_wb);
if (max_to_wb <= 0) {
if (usedCache.size())
igbe->anBegin(annSmWb, "Wait Alignment");
else
igbe->anWe(annSmWb, annUsedCacheQ);
return;
}
wbOut = max_to_wb;
assert(!wbDelayEvent.scheduled());
igbe->schedule(wbDelayEvent, igbe->curTick() + igbe->wbDelay);
igbe->anBegin(annSmWb, "Prepare Writeback Desc");
}
template<class T>
void
IGbE::DescCache<T>::writeback1()
{
DPRINTF(EthernetDesc, "[%s] Begining DMA of %d descriptors\n",
_name.c_str(), wbOut);
for (int x = 0; x < wbOut; x++) {
assert(usedCache.size());
memcpy(&wbBuf[x], usedCache[x], sizeof(T));
igbe->anPq(annSmWb, annUsedCacheQ);
igbe->anPq(annSmWb, annDescQ);
igbe->anQ(annSmWb, annUsedDescQ);
}
igbe->anBegin(annSmWb, "Writeback Desc DMA");
assert(wbOut);
igbe->dmaWrite(pciToDma(descBase() + descHead() * sizeof(T)),
wbOut * sizeof(T), wbEvent, (uint8_t*)wbBuf,
igbe->wbCompDelay);
}
template<class T>
void
IGbE::DescCache<T>::fetchDescriptors()
{
size_t max_to_fetch;
if (curFetching) {
DPRINTF(EthernetDesc,
"[%s] Currently fetching %d descriptors, returning\n",
_name.c_str(), curFetching);
return;
}
if (descTail() >= cachePnt)
max_to_fetch = descTail() - cachePnt;
else
max_to_fetch = descLen() - cachePnt;
size_t free_cache = size - usedCache.size() - unusedCache.size();
if (!max_to_fetch)
igbe->anWe(annSmFetch, annUnusedDescQ);
else
igbe->anPq(annSmFetch, annUnusedDescQ, max_to_fetch);
if (max_to_fetch) {
if (!free_cache)
igbe->anWf(annSmFetch, annDescQ);
else
igbe->anRq(annSmFetch, annDescQ, free_cache);
}
max_to_fetch = std::min(max_to_fetch, free_cache);
DPRINTF(EthernetDesc, "[%s] Fetching descriptors head: %ld tail: "
"%ld len: %ld cachePnt: %d max_to_fetch: %zd descleft: %d\n",
_name.c_str(), descHead(), descTail(), descLen(), cachePnt,
max_to_fetch, descLeft());
// Nothing to do
if (max_to_fetch == 0)
return;
// So we don't have two descriptor fetches going on at once
curFetching = max_to_fetch;
assert(!fetchDelayEvent.scheduled());
igbe->schedule(fetchDelayEvent, igbe->curTick() + igbe->fetchDelay);
igbe->anBegin(annSmFetch, "Prepare Fetch Desc");
}
template<class T>
void
IGbE::DescCache<T>::fetchDescriptors1()
{
igbe->anBegin(annSmFetch, "Fetch Desc");
DPRINTF(EthernetDesc, "[%s] Fetching descriptors at %#lx (%#lx), size: %#lx\n",
_name.c_str(),
descBase() + cachePnt * sizeof(T),
pciToDma(descBase() + cachePnt * sizeof(T)),
curFetching * sizeof(T));
assert(curFetching);
igbe->dmaRead(pciToDma(descBase() + cachePnt * sizeof(T)),
curFetching * sizeof(T), fetchEvent, (uint8_t*)fetchBuf,
igbe->fetchCompDelay);
}
template<class T>
void
IGbE::DescCache<T>::fetchComplete()
{
T *newDesc;
igbe->anBegin(annSmFetch, "Fetch Complete");
for (int x = 0; x < curFetching; x++) {
newDesc = new T;
memcpy(newDesc, &fetchBuf[x], sizeof(T));
unusedCache.push_back(newDesc);
igbe->anDq(annSmFetch, annUnusedDescQ);
igbe->anQ(annSmFetch, annUnusedCacheQ);
igbe->anQ(annSmFetch, annDescQ);
}
#ifdef DEBUG_E1000
int oldCp = cachePnt;
#endif
cachePnt += curFetching;
assert(cachePnt <= descLen());
if (cachePnt == descLen())
cachePnt = 0;
curFetching = 0;
DPRINTF(EthernetDesc, "[%s] Fetching complete cachePnt %d -> %d\n",
_name.c_str(), oldCp, cachePnt);
if ((descTail() >= cachePnt ? (descTail() - cachePnt) : (descLen() -
cachePnt)) == 0)
{
igbe->anWe(annSmFetch, annUnusedDescQ);
} else if (!(size - usedCache.size() - unusedCache.size())) {
igbe->anWf(annSmFetch, annDescQ);
} else {
igbe->anBegin(annSmFetch, "Wait");
}
enableSm();
//igbe->checkDrain();
}
template<class T>
void
IGbE::DescCache<T>::wbComplete()
{
igbe->anBegin(annSmWb, "Finish Writeback");
long curHead = descHead();
#ifdef DEBUG_E1000
long oldHead = curHead;
#endif
for (int x = 0; x < wbOut; x++) {
assert(usedCache.size());
delete usedCache[0];
usedCache.pop_front();
igbe->anDq(annSmWb, annUsedCacheQ);
igbe->anDq(annSmWb, annDescQ);
}
curHead += wbOut;
wbOut = 0;
if (curHead >= descLen())
curHead -= descLen();
// Update the head
updateHead(curHead);
DPRINTF(EthernetDesc, "[%s] Writeback complete curHead %ld -> %ld\n",
_name.c_str(), oldHead, curHead);
// If we still have more to wb, call wb now
actionAfterWb();
if (moreToWb) {
moreToWb = false;
DPRINTF(EthernetDesc, "[%s] Writeback has more todo\n", _name.c_str());
writeback(wbAlignment);
}
if (!wbOut) {
//igbe->checkDrain();
if (usedCache.size())
igbe->anBegin(annSmWb, "Wait");
else
igbe->anWe(annSmWb, annUsedCacheQ);
}
fetchAfterWb();
}
template<class T>
void
IGbE::DescCache<T>::reset()
{
DPRINTF(EthernetDesc, "[%s] Reseting descriptor cache\n", _name.c_str());
for (typename CacheType::size_type x = 0; x < usedCache.size(); x++)
delete usedCache[x];
for (typename CacheType::size_type x = 0; x < unusedCache.size(); x++)
delete unusedCache[x];
usedCache.clear();
unusedCache.clear();
cachePnt = 0;
}
///////////////////////////// IGbE::RxDescCache //////////////////////////////
IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s)
: DescCache<RxDesc>(i, n, s), pktDone(false), splitCount(0),
pktEvent([this]{ pktComplete(); }, n),
pktHdrEvent([this]{ pktSplitDone(); }, n),
pktDataEvent([this]{ pktSplitDone(); }, n)
{
annSmFetch = "RX Desc Fetch";
annSmWb = "RX Desc Writeback";
annUnusedDescQ = "RX Unused Descriptors";
annUnusedCacheQ = "RX Unused Descriptor Cache";
annUsedCacheQ = "RX Used Descriptor Cache";
annUsedDescQ = "RX Used Descriptors";
annDescQ = "RX Descriptors";
}
void
IGbE::RxDescCache::pktSplitDone()
{
splitCount++;
DPRINTF(EthernetDesc,
"Part of split packet done: splitcount now %d\n", splitCount);
assert(splitCount <= 2);
if (splitCount != 2)
return;
splitCount = 0;
DPRINTF(EthernetDesc,
"Part of split packet done: calling pktComplete()\n");
pktComplete();
}
int
IGbE::RxDescCache::writePacket(EthPacketPtr packet, int pkt_offset)
{
assert(unusedCache.size());
//if (!unusedCache.size())
// return false;
pktPtr = packet;
pktDone = false;
unsigned buf_len, hdr_len;
RxDesc *desc = unusedCache.front();
switch (igbe->regs.srrctl.desctype()) {
case RXDT_LEGACY:
assert(pkt_offset == 0);
bytesCopied = packet->length;
DPRINTF(EthernetDesc, "Packet Length: %d Desc Size: %d\n",
packet->length, igbe->regs.rctl.descSize());
assert(packet->length < igbe->regs.rctl.descSize());
igbe->dmaWrite(pciToDma(desc->legacy.buf),
packet->length, pktEvent, packet->data,
igbe->rxWriteDelay);
break;
case RXDT_ADV_ONEBUF:
assert(pkt_offset == 0);
bytesCopied = packet->length;
buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
igbe->regs.rctl.descSize();
DPRINTF(EthernetDesc, "Packet Length: %d srrctl: %#x Desc Size: %d\n",
packet->length, igbe->regs.srrctl(), buf_len);
assert(packet->length < buf_len);
igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
packet->length, pktEvent, packet->data,
igbe->rxWriteDelay);
desc->adv_wb.header_len = htole(0);
desc->adv_wb.sph = htole(0);
desc->adv_wb.pkt_len = htole((uint16_t)(pktPtr->length));
break;
case RXDT_ADV_SPLIT_A:
int split_point;
buf_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.bufLen() :
igbe->regs.rctl.descSize();
hdr_len = igbe->regs.rctl.lpe() ? igbe->regs.srrctl.hdrLen() : 0;
DPRINTF(EthernetDesc,
"lpe: %d Packet Length: %d offset: %d srrctl: %#x "
"hdr addr: %#lx Hdr Size: %d desc addr: %#lx Desc Size: %d\n",
igbe->regs.rctl.lpe(), packet->length, pkt_offset,
igbe->regs.srrctl(), desc->adv_read.hdr, hdr_len,
desc->adv_read.pkt, buf_len);
split_point = hsplit(pktPtr);
if (packet->length <= hdr_len) {
bytesCopied = packet->length;
assert(pkt_offset == 0);
DPRINTF(EthernetDesc, "Hdr split: Entire packet in header\n");
igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
packet->length, pktEvent, packet->data,
igbe->rxWriteDelay);
desc->adv_wb.header_len = htole((uint16_t)packet->length);
desc->adv_wb.sph = htole(0);
desc->adv_wb.pkt_len = htole(0);
} else if (split_point) {
if (pkt_offset) {
// we are only copying some data, header/data has already been
// copied
int max_to_copy =
std::min(packet->length - pkt_offset, buf_len);
bytesCopied += max_to_copy;
DPRINTF(EthernetDesc,
"Hdr split: Continuing data buffer copy\n");
igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
max_to_copy, pktEvent,
packet->data + pkt_offset, igbe->rxWriteDelay);
desc->adv_wb.header_len = htole(0);
desc->adv_wb.pkt_len = htole((uint16_t)max_to_copy);
desc->adv_wb.sph = htole(0);
} else {
int max_to_copy =
std::min(packet->length - split_point, buf_len);
bytesCopied += max_to_copy + split_point;
DPRINTF(EthernetDesc, "Hdr split: splitting at %d\n",
split_point);
igbe->dmaWrite(pciToDma(desc->adv_read.hdr),
split_point, pktHdrEvent,
packet->data, igbe->rxWriteDelay);
igbe->dmaWrite(pciToDma(desc->adv_read.pkt),
max_to_copy, pktDataEvent,
packet->data + split_point, igbe->rxWriteDelay);
desc->adv_wb.header_len = htole(split_point);
desc->adv_wb.sph = 1;
desc->adv_wb.pkt_len = htole((uint16_t)(max_to_copy));
}
} else {
panic("Header split not fitting within header buffer or "
"undecodable packet not fitting in header unsupported\n");
}
break;
default:
panic("Unimplemnted RX receive buffer type: %d\n",
igbe->regs.srrctl.desctype());
}
return bytesCopied;
}
void
IGbE::RxDescCache::pktComplete()
{
assert(unusedCache.size());
RxDesc *desc;
desc = unusedCache.front();
igbe->anBegin("RXS", "Update Desc");
uint16_t crcfixup = igbe->regs.rctl.secrc() ? 0 : 4 ;
DPRINTF(EthernetDesc, "pktPtr->length: %d bytesCopied: %d "
"stripcrc offset: %d value written: %d %d\n",
pktPtr->length, bytesCopied, crcfixup,
htole((uint16_t)(pktPtr->length + crcfixup)),
(uint16_t)(pktPtr->length + crcfixup));
// no support for anything but starting at 0
assert(igbe->regs.rxcsum.pcss() == 0);
DPRINTF(EthernetDesc, "Packet written to memory updating Descriptor\n");
uint16_t status = RXDS_DD;
uint8_t err = 0;
uint16_t ext_err = 0;
uint16_t csum = 0;
uint16_t ptype = 0;
uint16_t ip_id = 0;
assert(bytesCopied <= pktPtr->length);
if (bytesCopied == pktPtr->length)
status |= RXDS_EOP;
IpPtr ip(pktPtr);
Ip6Ptr ip6(pktPtr);
if (ip || ip6) {
if (ip) {
DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
ip->id());
ptype |= RXDP_IPV4;
ip_id = ip->id();
}
if (ip6)
ptype |= RXDP_IPV6;
if (ip && igbe->regs.rxcsum.ipofld()) {
DPRINTF(EthernetDesc, "Checking IP checksum\n");
status |= RXDS_IPCS;
csum = htole(cksum(ip));
if (cksum(ip) != 0) {
err |= RXDE_IPE;
ext_err |= RXDEE_IPE;
DPRINTF(EthernetDesc, "Checksum is bad!!\n");
}
}
TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
if (tcp && igbe->regs.rxcsum.tuofld()) {
DPRINTF(EthernetDesc, "Checking TCP checksum\n");
status |= RXDS_TCPCS;
ptype |= RXDP_TCP;
csum = htole(cksum(tcp));
if (cksum(tcp) != 0) {
DPRINTF(EthernetDesc, "Checksum is bad!!\n");
err |= RXDE_TCPE;
ext_err |= RXDEE_TCPE;
}
}
UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
if (udp && igbe->regs.rxcsum.tuofld()) {
DPRINTF(EthernetDesc, "Checking UDP checksum\n");
status |= RXDS_UDPCS;
ptype |= RXDP_UDP;
csum = htole(cksum(udp));
if (cksum(udp) != 0) {
DPRINTF(EthernetDesc, "Checksum is bad!!\n");
ext_err |= RXDEE_TCPE;
err |= RXDE_TCPE;
}
}
} else { // if ip
DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
}
switch (igbe->regs.srrctl.desctype()) {
case RXDT_LEGACY:
desc->legacy.len = htole((uint16_t)(pktPtr->length + crcfixup));
desc->legacy.status = htole(status);
desc->legacy.errors = htole(err);
// No vlan support at this point... just set it to 0
desc->legacy.vlan = 0;
break;
case RXDT_ADV_SPLIT_A:
case RXDT_ADV_ONEBUF:
desc->adv_wb.rss_type = htole(0);
desc->adv_wb.pkt_type = htole(ptype);
if (igbe->regs.rxcsum.pcsd()) {
// no rss support right now
desc->adv_wb.rss_hash = htole(0);
} else {
desc->adv_wb.id = htole(ip_id);
desc->adv_wb.csum = htole(csum);
}
desc->adv_wb.status = htole(status);
desc->adv_wb.errors = htole(ext_err);
// no vlan support
desc->adv_wb.vlan_tag = htole(0);
break;
default:
panic("Unimplemnted RX receive buffer type %d\n",
igbe->regs.srrctl.desctype());
}
DPRINTF(EthernetDesc, "Descriptor complete w0: %#lx w1: %#lx\n",
desc->adv_read.pkt, desc->adv_read.hdr);
if (bytesCopied == pktPtr->length) {
DPRINTF(EthernetDesc,
"Packet completely written to descriptor buffers\n");
// Deal with the rx timer interrupts
if (igbe->regs.rdtr.delay()) {
Tick delay = igbe->regs.rdtr.delay() * igbe->intClock();
DPRINTF(EthernetSM, "RXS: Scheduling DTR for %ld\n", delay);
igbe->reschedule(igbe->rdtrEvent, igbe->curTick() + delay);
}
if (igbe->regs.radv.idv()) {
Tick delay = igbe->regs.radv.idv() * igbe->intClock();
DPRINTF(EthernetSM, "RXS: Scheduling ADV for %ld\n", delay);
if (!igbe->radvEvent.scheduled()) {
igbe->schedule(igbe->radvEvent, igbe->curTick() + delay);
}
}
// if neither radv or rdtr, maybe itr is set...
if (!igbe->regs.rdtr.delay() && !igbe->regs.radv.idv()) {
DPRINTF(EthernetSM,
"RXS: Receive interrupt delay disabled, posting IT_RXT\n");
igbe->postInterrupt(IT_RXT);
}
// If the packet is small enough, interrupt appropriately
// I wonder if this is delayed or not?!
if (pktPtr->length <= igbe->regs.rsrpd.idv()) {
DPRINTF(EthernetSM,
"RXS: Posting IT_SRPD beacuse small packet received\n");
igbe->postInterrupt(IT_SRPD);
}
bytesCopied = 0;
}
pktPtr = NULL;
//igbe->checkDrain();
enableSm();
pktDone = true;
igbe->anBegin("RXS", "Done Updating Desc");
DPRINTF(EthernetDesc, "Processing of this descriptor complete\n");
igbe->anDq("RXS", annUnusedCacheQ);
unusedCache.pop_front();
igbe->anQ("RXS", annUsedCacheQ);
usedCache.push_back(desc);
}
void
IGbE::RxDescCache::enableSm()
{
igbe->rxTick = true;
igbe->restartClock();
}
bool
IGbE::RxDescCache::packetDone()
{
if (pktDone) {
pktDone = false;
return true;
}
return false;
}
bool
IGbE::RxDescCache::hasOutstandingEvents()
{
return pktEvent.scheduled() || wbEvent.scheduled() ||
fetchEvent.scheduled() || pktHdrEvent.scheduled() ||
pktDataEvent.scheduled();
}
///////////////////////////// IGbE::TxDescCache //////////////////////////////
IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s)
: DescCache<TxDesc>(i,n, s), pktDone(false), isTcp(false),
pktWaiting(false), pktMultiDesc(false),
completionAddress(0), completionEnabled(false),
useTso(false), tsoHeaderLen(0), tsoMss(0), tsoTotalLen(0), tsoUsedLen(0),
tsoPrevSeq(0), tsoPktPayloadBytes(0), tsoLoadedHeader(false),
tsoPktHasHeader(false), tsoDescBytesUsed(0), tsoCopyBytes(0), tsoPkts(0),
pktEvent([this]{ pktComplete(); }, n),
headerEvent([this]{ headerComplete(); }, n),
nullEvent([this]{ nullCallback(); }, n)
{
annSmFetch = "TX Desc Fetch";
annSmWb = "TX Desc Writeback";
annUnusedDescQ = "TX Unused Descriptors";
annUnusedCacheQ = "TX Unused Descriptor Cache";
annUsedCacheQ = "TX Used Descriptor Cache";
annUsedDescQ = "TX Used Descriptors";
annDescQ = "TX Descriptors";
}
void
IGbE::TxDescCache::processContextDesc()
{
assert(unusedCache.size());
TxDesc *desc;
DPRINTF(EthernetDesc, "Checking and processing context descriptors\n");
while (!useTso && unusedCache.size() &&
TxdOp::isContext(unusedCache.front())) {
DPRINTF(EthernetDesc, "Got context descriptor type...\n");
desc = unusedCache.front();
DPRINTF(EthernetDesc, "Descriptor upper: %#lx lower: %#lX\n",
desc->d1, desc->d2);
// is this going to be a tcp or udp packet?
isTcp = TxdOp::tcp(desc) ? true : false;
// setup all the TSO variables, they'll be ignored if we don't use
// tso for this connection
tsoHeaderLen = TxdOp::hdrlen(desc);
tsoMss = TxdOp::mss(desc);
if (TxdOp::isType(desc, TxdOp::TXD_CNXT) && TxdOp::tse(desc)) {
DPRINTF(EthernetDesc, "TCP offload enabled for packet hdrlen: "
"%d mss: %d paylen %ld\n", TxdOp::hdrlen(desc),
TxdOp::mss(desc), TxdOp::getLen(desc));
useTso = true;
tsoTotalLen = TxdOp::getLen(desc);
tsoLoadedHeader = false;
tsoDescBytesUsed = 0;
tsoUsedLen = 0;
tsoPrevSeq = 0;
tsoPktHasHeader = false;
tsoPkts = 0;
tsoCopyBytes = 0;
}
TxdOp::setDd(desc);
unusedCache.pop_front();
igbe->anDq("TXS", annUnusedCacheQ);
usedCache.push_back(desc);
igbe->anQ("TXS", annUsedCacheQ);
}
if (!unusedCache.size())
return;
desc = unusedCache.front();
if (!useTso && TxdOp::isType(desc, TxdOp::TXD_ADVDATA) &&
TxdOp::tse(desc)) {
DPRINTF(EthernetDesc, "TCP offload(adv) enabled for packet "
"hdrlen: %ld mss: %ld paylen %d\n",
tsoHeaderLen, tsoMss, TxdOp::getTsoLen(desc));
useTso = true;
tsoTotalLen = TxdOp::getTsoLen(desc);
tsoLoadedHeader = false;
tsoDescBytesUsed = 0;
tsoUsedLen = 0;
tsoPrevSeq = 0;
tsoPktHasHeader = false;
tsoPkts = 0;
}
if (useTso && !tsoLoadedHeader) {
// we need to fetch a header
DPRINTF(EthernetDesc, "Starting DMA of TSO header\n");
assert(TxdOp::isData(desc) && TxdOp::getLen(desc) >= tsoHeaderLen);
pktWaiting = true;
assert(tsoHeaderLen <= 256);
igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
tsoHeaderLen, headerEvent, tsoHeader, 0);
}
}
void
IGbE::TxDescCache::headerComplete()
{
DPRINTF(EthernetDesc, "TSO: Fetching TSO header complete\n");
pktWaiting = false;
assert(unusedCache.size());
TxDesc *desc = unusedCache.front();
DPRINTF(EthernetDesc, "TSO: len: %ld tsoHeaderLen: %ld\n",
TxdOp::getLen(desc), tsoHeaderLen);
if (TxdOp::getLen(desc) == tsoHeaderLen) {
tsoDescBytesUsed = 0;
tsoLoadedHeader = true;
unusedCache.pop_front();
usedCache.push_back(desc);
} else {
DPRINTF(EthernetDesc, "TSO: header part of larger payload\n");
tsoDescBytesUsed = tsoHeaderLen;
tsoLoadedHeader = true;
}
enableSm();
//igbe->checkDrain();
}
unsigned
IGbE::TxDescCache::getPacketSize(EthPacketPtr p)
{
if (!unusedCache.size())
return 0;
DPRINTF(EthernetDesc, "Starting processing of descriptor\n");
assert(!useTso || tsoLoadedHeader);
TxDesc *desc = unusedCache.front();
if (useTso) {
DPRINTF(EthernetDesc, "getPacket(): TxDescriptor data "
"d1: %#lx d2: %#lx\n", desc->d1, desc->d2);
DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %ld mss: %ld total: %ld "
"used: %ld loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
if (tsoPktHasHeader)
tsoCopyBytes = std::min((tsoMss + tsoHeaderLen) - p->length,
TxdOp::getLen(desc) - tsoDescBytesUsed);
else
tsoCopyBytes = std::min(tsoMss,
TxdOp::getLen(desc) - tsoDescBytesUsed);
unsigned pkt_size =
tsoCopyBytes + (tsoPktHasHeader ? 0 : tsoHeaderLen);
DPRINTF(EthernetDesc, "TSO: descBytesUsed: %ld copyBytes: %ld "
"this descLen: %ld\n",
tsoDescBytesUsed, tsoCopyBytes, TxdOp::getLen(desc));
DPRINTF(EthernetDesc, "TSO: pktHasHeader: %d\n", tsoPktHasHeader);
DPRINTF(EthernetDesc, "TSO: Next packet is %d bytes\n", pkt_size);
return pkt_size;
}
DPRINTF(EthernetDesc, "Next TX packet is %ld bytes\n",
TxdOp::getLen(unusedCache.front()));
return TxdOp::getLen(desc);
}
void
IGbE::TxDescCache::getPacketData(EthPacketPtr p)
{
assert(unusedCache.size());
TxDesc *desc;
desc = unusedCache.front();
DPRINTF(EthernetDesc, "getPacketData(): TxDescriptor data "
"d1: %#lx d2: %#lx\n", desc->d1, desc->d2);
assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
TxdOp::getLen(desc));
pktPtr = p;
pktWaiting = true;
DPRINTF(EthernetDesc, "Starting DMA of packet at offset %d\n", p->length);
if (useTso) {
assert(tsoLoadedHeader);
if (!tsoPktHasHeader) {
DPRINTF(EthernetDesc,
"Loading TSO header (%ld bytes) into start of packet\n",
tsoHeaderLen);
memcpy(p->data, &tsoHeader,tsoHeaderLen);
p->length +=tsoHeaderLen;
tsoPktHasHeader = true;
}
}
if (useTso) {
DPRINTF(EthernetDesc,
"Starting DMA of packet at offset %d length: %ld\n",
p->length, tsoCopyBytes);
igbe->dmaRead(pciToDma(TxdOp::getBuf(desc))
+ tsoDescBytesUsed,
tsoCopyBytes, pktEvent, p->data + p->length,
igbe->txReadDelay);
tsoDescBytesUsed += tsoCopyBytes;
assert(tsoDescBytesUsed <= TxdOp::getLen(desc));
} else {
igbe->dmaRead(pciToDma(TxdOp::getBuf(desc)),
TxdOp::getLen(desc), pktEvent, p->data + p->length,
igbe->txReadDelay);
}
}
void
IGbE::TxDescCache::pktComplete()
{
TxDesc *desc;
assert(unusedCache.size());
assert(pktPtr);
igbe->anBegin("TXS", "Update Desc");
DPRINTF(EthernetDesc, "DMA of packet complete\n");
desc = unusedCache.front();
assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) &&
TxdOp::getLen(desc));
DPRINTF(EthernetDesc, "TxDescriptor data d1: %#lx d2: %#lx\n",
desc->d1, desc->d2);
// Set the length of the data in the EtherPacket
if (useTso) {
DPRINTF(EthernetDesc, "TSO: use: %d hdrlen: %ld mss: %ld total: %ld "
"used: %ld loaded hdr: %d\n", useTso, tsoHeaderLen, tsoMss,
tsoTotalLen, tsoUsedLen, tsoLoadedHeader);
pktPtr->length += tsoCopyBytes;
tsoUsedLen += tsoCopyBytes;
DPRINTF(EthernetDesc, "TSO: descBytesUsed: %ld copyBytes: %ld\n",
tsoDescBytesUsed, tsoCopyBytes);
} else {
pktPtr->length += TxdOp::getLen(desc);
}
if ((!TxdOp::eop(desc) && !useTso) ||
(pktPtr->length < ( tsoMss + tsoHeaderLen) &&
tsoTotalLen != tsoUsedLen && useTso)) {
assert(!useTso || (tsoDescBytesUsed == TxdOp::getLen(desc)));
igbe->anDq("TXS", annUnusedCacheQ);
unusedCache.pop_front();
igbe->anQ("TXS", annUsedCacheQ);
usedCache.push_back(desc);
tsoDescBytesUsed = 0;
pktDone = true;
pktWaiting = false;
pktMultiDesc = true;
DPRINTF(EthernetDesc, "Partial Packet Descriptor of %d bytes Done\n",
pktPtr->length);
pktPtr = NULL;
enableSm();
//igbe->checkDrain();
return;
}
pktMultiDesc = false;
// no support for vlans
assert(!TxdOp::vle(desc));
// we only support single packet descriptors at this point
if (!useTso)
assert(TxdOp::eop(desc));
// set that this packet is done
if (TxdOp::rs(desc))
TxdOp::setDd(desc);
DPRINTF(EthernetDesc, "TxDescriptor data d1: %#lx d2: %#lx\n",
desc->d1, desc->d2);
if (useTso) {
IpPtr ip(pktPtr);
Ip6Ptr ip6(pktPtr);
if (ip) {
DPRINTF(EthernetDesc, "TSO: Modifying IP header. Id + %d\n",
tsoPkts);
ip->id(ip->id() + tsoPkts++);
ip->len(pktPtr->length - EthPtr(pktPtr)->size());
}
if (ip6)
ip6->plen(pktPtr->length - EthPtr(pktPtr)->size());
TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
if (tcp) {
DPRINTF(EthernetDesc,
"TSO: Modifying TCP header. old seq %d + %ld\n",
tcp->seq(), tsoPrevSeq);
tcp->seq(tcp->seq() + tsoPrevSeq);
if (tsoUsedLen != tsoTotalLen)
tcp->flags(tcp->flags() & ~9); // clear fin & psh
}
UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
if (udp) {
DPRINTF(EthernetDesc, "TSO: Modifying UDP header.\n");
udp->len(pktPtr->length - EthPtr(pktPtr)->size());
}
tsoPrevSeq = tsoUsedLen;
}
/*if (DTRACE(EthernetDesc)) {
IpPtr ip(pktPtr);
if (ip)
DPRINTF(EthernetDesc, "Proccesing Ip packet with Id=%d\n",
ip->id());
else
DPRINTF(EthernetSM, "Proccesing Non-Ip packet\n");
}*/
// Checksums are only ofloaded for new descriptor types
if (TxdOp::isData(desc) && (TxdOp::ixsm(desc) || TxdOp::txsm(desc))) {
DPRINTF(EthernetDesc, "Calculating checksums for packet\n");
IpPtr ip(pktPtr);
Ip6Ptr ip6(pktPtr);
assert(ip || ip6);
if (ip && TxdOp::ixsm(desc)) {
ip->sum(0);
ip->sum(cksum(ip));
//igbe->txIpChecksums++;
DPRINTF(EthernetDesc, "Calculated IP checksum\n");
}
if (TxdOp::txsm(desc)) {
TcpPtr tcp = ip ? TcpPtr(ip) : TcpPtr(ip6);
UdpPtr udp = ip ? UdpPtr(ip) : UdpPtr(ip6);
if (tcp) {
tcp->sum(0);
tcp->sum(cksum(tcp));
//igbe->txTcpChecksums++;
DPRINTF(EthernetDesc, "Calculated TCP checksum\n");
} else if (udp) {
assert(udp);
udp->sum(0);
udp->sum(cksum(udp));
//igbe->txUdpChecksums++;
DPRINTF(EthernetDesc, "Calculated UDP checksum\n");
} else {
panic("Told to checksum, but don't know how\n");
}
}
}
if (TxdOp::ide(desc)) {
// Deal with the rx timer interrupts
DPRINTF(EthernetDesc, "Descriptor had IDE set\n");
if (igbe->regs.tidv.idv()) {
Tick delay = igbe->regs.tidv.idv() * igbe->intClock();
DPRINTF(EthernetDesc, "setting tidv\n");
igbe->reschedule(igbe->tidvEvent, igbe->curTick() + delay, true);
}
if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) {
Tick delay = igbe->regs.tadv.idv() * igbe->intClock();
DPRINTF(EthernetDesc, "setting tadv\n");
if (!igbe->tadvEvent.scheduled()) {
igbe->schedule(igbe->tadvEvent, igbe->curTick() + delay);
}
}
}
if (!useTso || TxdOp::getLen(desc) == tsoDescBytesUsed) {
DPRINTF(EthernetDesc, "Descriptor Done\n");
igbe->anDq("TXS", annUnusedCacheQ);
unusedCache.pop_front();
igbe->anQ("TXS", annUsedCacheQ);
usedCache.push_back(desc);
tsoDescBytesUsed = 0;
}
if (useTso && tsoUsedLen == tsoTotalLen)
useTso = false;
DPRINTF(EthernetDesc,
"------Packet of %d bytes ready for transmission-------\n",
pktPtr->length);
pktDone = true;
pktWaiting = false;
pktPtr = NULL;
tsoPktHasHeader = false;
if (igbe->regs.txdctl.wthresh() == 0) {
igbe->anBegin("TXS", "Desc Writeback");
DPRINTF(EthernetDesc, "WTHRESH == 0, writing back descriptor\n");
writeback(0);
} else if (!igbe->regs.txdctl.gran() && igbe->regs.txdctl.wthresh() <=
descInBlock(usedCache.size())) {
DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
igbe->anBegin("TXS", "Desc Writeback");
writeback((igbe->cacheBlockSize()-1)>>4);
} else if (igbe->regs.txdctl.wthresh() <= usedCache.size()) {
DPRINTF(EthernetDesc, "used > WTHRESH, writing back descriptor\n");
igbe->anBegin("TXS", "Desc Writeback");
writeback((igbe->cacheBlockSize()-1)>>4);
}
enableSm();
//igbe->checkDrain();
}
void
IGbE::TxDescCache::actionAfterWb()
{
DPRINTF(EthernetDesc, "actionAfterWb() completionEnabled: %d\n",
completionEnabled);
igbe->postInterrupt(iGbReg::IT_TXDW);
if (completionEnabled) {
descEnd = igbe->regs.tdh();
DPRINTF(EthernetDesc,
"Completion writing back value: %d to addr: %#lx\n", descEnd,
completionAddress);
igbe->dmaWrite(pciToDma(mbits(completionAddress, 63, 2)),
sizeof(descEnd), nullEvent, (uint8_t*)&descEnd, 0);
}
}
bool
IGbE::TxDescCache::packetAvailable()
{
if (pktDone) {
pktDone = false;
return true;
}
return false;
}
void
IGbE::TxDescCache::enableSm()
{
igbe->txTick = true;
igbe->restartClock();
}
bool
IGbE::TxDescCache::hasOutstandingEvents()
{
return pktEvent.scheduled() || wbEvent.scheduled() ||
fetchEvent.scheduled();
}
///////////////////////////////////// IGbE /////////////////////////////////
void
IGbE::restartClock()
{
DPRINTF(EthernetSM, "IGbE: requesting restart clock: sched=%d rxt=%d txt=%d txf=%d\n",
tickEvent.scheduled(), rxTick, txTick, txFifoTick);
if (!tickEvent.scheduled() && (rxTick || txTick || txFifoTick)) {
schedule(tickEvent, curTick() + 1000);
DPRINTF(EthernetSM, "IGbE: scheduled\n");
}
}
void
IGbE::txStateMachine()
{
if (!regs.tctl.en()) {
txTick = false;
DPRINTF(EthernetSM, "TXS: TX disabled, stopping ticking\n");
return;
}
// If we have a packet available and it's length is not 0 (meaning it's not
// a multidescriptor packet) put it in the fifo, otherwise an the next
// iteration we'll get the rest of the data
if (txPacket && txDescCache.packetAvailable()
&& !txDescCache.packetMultiDesc() && txPacket->length) {
anQ("TXS", "TX FIFO Q");
DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n");
#ifndef NDEBUG
bool success =
#endif
txFifo.push(txPacket);
txFifoTick = true;
assert(success);
txPacket = NULL;
anBegin("TXS", "Desc Writeback");
txDescCache.writeback((cacheBlockSize()-1)>>4);
return;
}
// Only support descriptor granularity
if (regs.txdctl.lwthresh() &&
txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) {
DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n");
postInterrupt(IT_TXDLOW);
}
if (!txPacket) {
txPacket = std::make_shared<EthPacketData>(16384);
}
if (!txDescCache.packetWaiting()) {
if (txDescCache.descLeft() == 0) {
postInterrupt(IT_TXQE);
anBegin("TXS", "Desc Writeback");
txDescCache.writeback(0);
anBegin("TXS", "Desc Fetch");
anWe("TXS", txDescCache.annUnusedCacheQ);
txDescCache.fetchDescriptors();
DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing "
"writeback stopping ticking and posting TXQE\n");
txTick = false;
return;
}
if (!(txDescCache.descUnused())) {
anBegin("TXS", "Desc Fetch");
txDescCache.fetchDescriptors();
anWe("TXS", txDescCache.annUnusedCacheQ);
DPRINTF(EthernetSM, "TXS: No descriptors available in cache, "
"fetching and stopping ticking\n");
txTick = false;
return;
}
anPq("TXS", txDescCache.annUnusedCacheQ);
txDescCache.processContextDesc();
if (txDescCache.packetWaiting()) {
DPRINTF(EthernetSM,
"TXS: Fetching TSO header, stopping ticking\n");
txTick = false;
return;
}
unsigned size = txDescCache.getPacketSize(txPacket);
if (size > 0 && txFifo.avail() > size) {
anRq("TXS", "TX FIFO Q");
anBegin("TXS", "DMA Packet");
DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and "
"beginning DMA of next packet\n", size);
txFifo.reserve(size);
txDescCache.getPacketData(txPacket);
} else if (size == 0) {
DPRINTF(EthernetSM, "TXS: getPacketSize returned: %d\n", size);
DPRINTF(EthernetSM,
"TXS: No packets to get, writing back used descriptors\n");
anBegin("TXS", "Desc Writeback");
txDescCache.writeback(0);
} else {
anWf("TXS", "TX FIFO Q");
DPRINTF(EthernetSM, "TXS: FIFO full, stopping ticking until space "
"available in FIFO\n");
txTick = false;
}
return;
}
DPRINTF(EthernetSM, "TXS: Nothing to do, stopping ticking\n");
txTick = false;
}
bool
IGbE::ethRxPkt(EthPacketPtr pkt)
{
//rxBytes += pkt->length;
//rxPackets++;
DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n");
anBegin("RXQ", "Wire Recv");
if (!regs.rctl.en()) {
DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n");
anBegin("RXQ", "FIFO Drop");
return true;
}
// restart the state machines if they are stopped
rxTick = true;
if ((rxTick || txTick) && !tickEvent.scheduled()) {
DPRINTF(EthernetSM,
"RXS: received packet into fifo, starting ticking\n");
restartClock();
}
if (!rxFifo.push(pkt)) {
DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n");
postInterrupt(IT_RXO, true);
anBegin("RXQ", "FIFO Drop");
return false;
}
#if 0
if (CPA::available() && cpa->enabled()) {
assert(sys->numSystemsRunning <= 2);
System *other_sys;
if (sys->systemList[0] == sys)
other_sys = sys->systemList[1];
else
other_sys = sys->systemList[0];
cpa->hwDq(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
anQ("RXQ", "RX FIFO Q");
cpa->hwWe(CPA::FL_NONE, sys, macAddr, "RXQ", "WireQ", 0, other_sys);
}
#endif
return true;
}
void
IGbE::rxStateMachine()
{
if (!regs.rctl.en()) {
rxTick = false;
DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n");
return;
}
// If the packet is done check for interrupts/descriptors/etc
if (rxDescCache.packetDone()) {
rxDmaPacket = false;
DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n");
int descLeft = rxDescCache.descLeft();
DPRINTF(EthernetSM, "RXS: descLeft: %d rdmts: %d rdlen: %d\n",
descLeft, regs.rctl.rdmts(), regs.rdlen());
// rdmts 2->1/8, 1->1/4, 0->1/2
unsigned ratio = (1ULL << (regs.rctl.rdmts() + 1));
if (descLeft * ratio <= regs.rdlen()) {
DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) "
"because of descriptors left\n");
postInterrupt(IT_RXDMT);
}
if (rxFifo.empty())
rxDescCache.writeback(0);
if (descLeft == 0) {
anBegin("RXS", "Writeback Descriptors");
rxDescCache.writeback(0);
DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing"
" writeback and stopping ticking\n");
rxTick = false;
}
// only support descriptor granulaties
assert(regs.rxdctl.gran());
if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) {
DPRINTF(EthernetSM,
"RXS: Writing back because WTHRESH >= descUsed\n");
anBegin("RXS", "Writeback Descriptors");
if (regs.rxdctl.wthresh() < (cacheBlockSize()>>4))
rxDescCache.writeback(regs.rxdctl.wthresh()-1);
else
rxDescCache.writeback((cacheBlockSize()-1)>>4);
}
if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) &&
((rxDescCache.descLeft() - rxDescCache.descUnused()) >
regs.rxdctl.hthresh())) {
DPRINTF(EthernetSM, "RXS: Fetching descriptors because "
"descUnused < PTHRESH\n");
anBegin("RXS", "Fetch Descriptors");
rxDescCache.fetchDescriptors();
}
if (rxDescCache.descUnused() == 0) {
anBegin("RXS", "Fetch Descriptors");
rxDescCache.fetchDescriptors();
anWe("RXS", rxDescCache.annUnusedCacheQ);
DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
"fetching descriptors and stopping ticking\n");
rxTick = false;
}
return;
}
if (rxDmaPacket) {
DPRINTF(EthernetSM,
"RXS: stopping ticking until packet DMA completes\n");
rxTick = false;
return;
}
if (!rxDescCache.descUnused()) {
anBegin("RXS", "Fetch Descriptors");
rxDescCache.fetchDescriptors();
anWe("RXS", rxDescCache.annUnusedCacheQ);
DPRINTF(EthernetSM, "RXS: No descriptors available in cache, "
"stopping ticking\n");
rxTick = false;
DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n");
return;
}
anPq("RXS", rxDescCache.annUnusedCacheQ);
if (rxFifo.empty()) {
anWe("RXS", "RX FIFO Q");
DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n");
rxTick = false;
return;
}
anPq("RXS", "RX FIFO Q");
anBegin("RXS", "Get Desc");
EthPacketPtr pkt;
pkt = rxFifo.front();
pktOffset = rxDescCache.writePacket(pkt, pktOffset);
DPRINTF(EthernetSM, "RXS: Writing packet into memory\n");
if (pktOffset == pkt->length) {
anBegin( "RXS", "FIFO Dequeue");
DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n");
pktOffset = 0;
anDq("RXS", "RX FIFO Q");
rxFifo.pop();
}
DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n");
rxTick = false;
rxDmaPacket = true;
anBegin("RXS", "DMA Packet");
}
void
IGbE::txWire()
{
txFifoTick = false;
if (txFifo.empty()) {
anWe("TXQ", "TX FIFO Q");
return;
}
anPq("TXQ", "TX FIFO Q");
if (sendPacket(txFifo.front())) {
anQ("TXQ", "WireQ");
/*if (DTRACE(EthernetSM)) {
IpPtr ip(txFifo.front());
if (ip)
DPRINTF(EthernetSM, "Transmitting Ip packet with Id=%d\n",
ip->id());
else
DPRINTF(EthernetSM, "Transmitting Non-Ip packet\n");
}*/
anDq("TXQ", "TX FIFO Q");
anBegin("TXQ", "Wire Send");
DPRINTF(EthernetSM,
"TxFIFO: Successful transmit, bytes available in fifo: %d\n",
txFifo.avail());
//txBytes += txFifo.front()->length;
//txPackets++;
txFifo.pop();
}
}
void
IGbE::tick()
{
DPRINTF(EthernetSM, "IGbE: -------------- Cycle --------------\n");
inTick = true;
if (rxTick)
rxStateMachine();
if (txTick)
txStateMachine();
// If txWire returns and txFifoTick is still set, that means the data we
// sent to the other end was already accepted and we can send another
// frame right away. This is consistent with the previous behavior which
// would send another frame if one was ready in ethTxDone. This version
// avoids growing the stack with each frame sent which can cause stack
// overflow.
while (txFifoTick)
txWire();
if (rxTick || txTick || txFifoTick) {
DPRINTF(EthernetSM, "IGbE: rescheduling next cycle\n");
schedule(tickEvent, curTick() + 1000);
} else {
DPRINTF(EthernetSM, "IGbE: no next cycle scheduled\n");
}
inTick = false;
}
void
IGbE::ethTxDone()
{
anBegin("TXQ", "Send Done");
// restart the tx state machines if they are stopped
// fifo to send another packet
// tx sm to put more data into the fifo
txFifoTick = true;
if (txDescCache.descLeft() != 0)
txTick = true;
if (!inTick)
restartClock();
DPRINTF(EthernetSM, "TxFIFO: Transmission complete\n");
}
\ No newline at end of file
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @file
* Device model for Intel's 8254x line of gigabit ethernet controllers.
*/
#ifndef __DEV_NET_I8254XGBE_HH__
#define __DEV_NET_I8254XGBE_HH__
#include <deque>
#include <string>
#include "sims/nic/e1000_gem5/support.h"
#include "sims/nic/e1000_gem5/gem5/pktfifo.h"
#include "sims/nic/e1000_gem5/gem5/inet.h"
#include "sims/nic/e1000_gem5/i8254xGBe_defs.h"
struct IGbEParams {
int rx_fifo_size;
int tx_fifo_size;
Tick fetch_delay, wb_delay;
Tick fetch_comp_delay, wb_comp_delay;
Tick rx_write_delay, tx_read_delay;
Tick pio_delay;
uint16_t phy_pid;
uint16_t phy_epid;
int rx_desc_cache_size;
int tx_desc_cache_size;
};
class IGbE : public nicbm::Runner::Device
{
protected:
Tick curTick() {
return runner_->TimePs();
}
virtual void SetupIntro(struct SimbricksProtoPcieDevIntro &di);
virtual void RegRead(uint8_t bar, uint64_t addr, void *dest,
size_t len);
virtual void RegWrite(uint8_t bar, uint64_t addr, const void *src,
size_t len);
virtual void DmaComplete(nicbm::DMAOp &op);
virtual void EthRx(uint8_t port, const void *data, size_t len);
virtual void Timed(nicbm::TimedEvent &te);
void schedule(EventFunctionWrapper &ev, Tick t);
void reschedule(EventFunctionWrapper &ev, Tick t, bool always=false);
void deschedule(EventFunctionWrapper &ev);
void intrPost();
void intrClear();
void dmaWrite(Addr daddr, size_t len, EventFunctionWrapper &ev,
const void *buf, Tick delay);
void dmaRead(Addr saddr, size_t len, EventFunctionWrapper &ev,
void *buf, Tick delay);
bool sendPacket(EthPacketPtr p);
private:
const IGbEParams &params_;
// device registers
iGbReg::Regs regs;
// eeprom data, status and control bits
int eeOpBits, eeAddrBits, eeDataBits;
uint8_t eeOpcode, eeAddr;
uint16_t flash[iGbReg::EEPROM_SIZE];
// packet fifos
PacketFifo rxFifo;
PacketFifo txFifo;
// Packet that we are currently putting into the txFifo
EthPacketPtr txPacket;
// Should to Rx/Tx State machine tick?
bool inTick;
bool rxTick;
bool txTick;
bool txFifoTick;
bool rxDmaPacket;
// Number of bytes copied from current RX packet
unsigned pktOffset;
// Delays in managaging descriptors
Tick fetchDelay, wbDelay;
Tick fetchCompDelay, wbCompDelay;
Tick rxWriteDelay, txReadDelay;
Tick pioDelay;
unsigned cacheBlockSize() const { return 64; }
// Event and function to deal with RDTR timer expiring
void rdtrProcess() {
rxDescCache.writeback(0);
DPRINTF(EthernetIntr,
"Posting RXT interrupt because RDTR timer expired\n");
postInterrupt(iGbReg::IT_RXT);
}
EventFunctionWrapper rdtrEvent;
// Event and function to deal with RADV timer expiring
void radvProcess() {
rxDescCache.writeback(0);
DPRINTF(EthernetIntr,
"Posting RXT interrupt because RADV timer expired\n");
postInterrupt(iGbReg::IT_RXT);
}
EventFunctionWrapper radvEvent;
// Event and function to deal with TADV timer expiring
void tadvProcess() {
txDescCache.writeback(0);
DPRINTF(EthernetIntr,
"Posting TXDW interrupt because TADV timer expired\n");
postInterrupt(iGbReg::IT_TXDW);
}
EventFunctionWrapper tadvEvent;
// Event and function to deal with TIDV timer expiring
void tidvProcess() {
txDescCache.writeback(0);
DPRINTF(EthernetIntr,
"Posting TXDW interrupt because TIDV timer expired\n");
postInterrupt(iGbReg::IT_TXDW);
}
EventFunctionWrapper tidvEvent;
// Main event to tick the device
void tick();
EventFunctionWrapper tickEvent;
uint64_t macAddr;
void rxStateMachine();
void txStateMachine();
void txWire();
/** Write an interrupt into the interrupt pending register and check mask
* and interrupt limit timer before sending interrupt to CPU
* @param t the type of interrupt we are posting
* @param now should we ignore the interrupt limiting timer
*/
void postInterrupt(iGbReg::IntTypes t, bool now = false);
/** Check and see if changes to the mask register have caused an interrupt
* to need to be sent or perhaps removed an interrupt cause.
*/
void chkInterrupt();
/** Send an interrupt to the cpu
*/
void delayIntEvent();
void cpuPostInt();
// Event to moderate interrupts
EventFunctionWrapper interEvent;
/** Clear the interupt line to the cpu
*/
void cpuClearInt();
Tick intClock() { return 1024 * 1024; /* 1us */ }
/** This function is used to restart the clock so it can handle things like
* draining and resume in one place. */
void restartClock();
/** Check if all the draining things that need to occur have occured and
* handle the drain event if so.
*/
//void checkDrain();
void anBegin(std::string sm, std::string st, int flags = 0) { }
void anQ(std::string sm, std::string q) { }
void anDq(std::string sm, std::string q) { }
void anPq(std::string sm, std::string q, int num = 1) { }
void anRq(std::string sm, std::string q, int num = 1) { }
void anWe(std::string sm, std::string q) {}
void anWf(std::string sm, std::string q) {}
template<class T>
class DescCache
{
protected:
virtual Addr descBase() const = 0;
virtual long descHead() const = 0;
virtual long descTail() const = 0;
virtual long descLen() const = 0;
virtual void updateHead(long h) = 0;
virtual void enableSm() = 0;
virtual void actionAfterWb() {}
virtual void fetchAfterWb() = 0;
typedef std::deque<T *> CacheType;
CacheType usedCache;
CacheType unusedCache;
T *fetchBuf;
T *wbBuf;
// Pointer to the device we cache for
IGbE *igbe;
// Name of this descriptor cache
std::string _name;
// How far we've cached
int cachePnt;
// The size of the descriptor cache
int size;
// How many descriptors we are currently fetching
int curFetching;
// How many descriptors we are currently writing back
int wbOut;
// if the we wrote back to the end of the descriptor ring and are going
// to have to wrap and write more
bool moreToWb;
// What the alignment is of the next descriptor writeback
Addr wbAlignment;
/** The packet that is currently being dmad to memory if any */
EthPacketPtr pktPtr;
/** Shortcut for DMA address translation */
Addr pciToDma(Addr a) { return a; }
public:
/** Annotate sm*/
std::string annSmFetch, annSmWb, annUnusedDescQ, annUsedCacheQ,
annUsedDescQ, annUnusedCacheQ, annDescQ;
DescCache(IGbE *i, const std::string n, int s);
virtual ~DescCache();
std::string name() { return _name; }
/** If the address/len/head change when we've got descriptors that are
* dirty that is very bad. This function checks that we don't and if we
* do panics.
*/
void areaChanged();
void writeback(Addr aMask);
void writeback1();
EventFunctionWrapper wbDelayEvent;
/** Fetch a chunk of descriptors into the descriptor cache.
* Calls fetchComplete when the memory system returns the data
*/
void fetchDescriptors();
void fetchDescriptors1();
EventFunctionWrapper fetchDelayEvent;
/** Called by event when dma to read descriptors is completed
*/
void fetchComplete();
EventFunctionWrapper fetchEvent;
/** Called by event when dma to writeback descriptors is completed
*/
void wbComplete();
EventFunctionWrapper wbEvent;
/* Return the number of descriptors left in the ring, so the device has
* a way to figure out if it needs to interrupt.
*/
unsigned
descLeft() const
{
unsigned left = unusedCache.size();
if (cachePnt > descTail())
left += (descLen() - cachePnt + descTail());
else
left += (descTail() - cachePnt);
return left;
}
/* Return the number of descriptors used and not written back.
*/
unsigned descUsed() const { return usedCache.size(); }
/* Return the number of cache unused descriptors we have. */
unsigned descUnused() const { return unusedCache.size(); }
/* Get into a state where the descriptor address/head/etc colud be
* changed */
void reset();
virtual bool hasOutstandingEvents() {
return wbEvent.scheduled() || fetchEvent.scheduled();
}
};
class RxDescCache : public DescCache<iGbReg::RxDesc>
{
protected:
Addr descBase() const override { return igbe->regs.rdba(); }
long descHead() const override { return igbe->regs.rdh(); }
long descLen() const override { return igbe->regs.rdlen() >> 4; }
long descTail() const override { return igbe->regs.rdt(); }
void updateHead(long h) override { igbe->regs.rdh(h); }
void enableSm() override;
void fetchAfterWb() override {
if (!igbe->rxTick)
fetchDescriptors();
}
bool pktDone;
/** Variable to head with header/data completion events */
int splitCount;
/** Bytes of packet that have been copied, so we know when to
set EOP */
unsigned bytesCopied;
public:
RxDescCache(IGbE *i, std::string n, int s);
/** Write the given packet into the buffer(s) pointed to by the
* descriptor and update the book keeping. Should only be called when
* there are no dma's pending.
* @param packet ethernet packet to write
* @param pkt_offset bytes already copied from the packet to memory
* @return pkt_offset + number of bytes copied during this call
*/
int writePacket(EthPacketPtr packet, int pkt_offset);
/** Called by event when dma to write packet is completed
*/
void pktComplete();
/** Check if the dma on the packet has completed and RX state machine
* can continue
*/
bool packetDone();
EventFunctionWrapper pktEvent;
// Event to handle issuing header and data write at the same time
// and only callking pktComplete() when both are completed
void pktSplitDone();
EventFunctionWrapper pktHdrEvent;
EventFunctionWrapper pktDataEvent;
bool hasOutstandingEvents() override;
};
friend class RxDescCache;
RxDescCache rxDescCache;
class TxDescCache : public DescCache<iGbReg::TxDesc>
{
protected:
Addr descBase() const override { return igbe->regs.tdba(); }
long descHead() const override { return igbe->regs.tdh(); }
long descTail() const override { return igbe->regs.tdt(); }
long descLen() const override { return igbe->regs.tdlen() >> 4; }
void updateHead(long h) override { igbe->regs.tdh(h); }
void enableSm() override;
void actionAfterWb() override;
void fetchAfterWb() override {
if (!igbe->txTick)
fetchDescriptors();
}
bool pktDone;
bool isTcp;
bool pktWaiting;
bool pktMultiDesc;
Addr completionAddress;
bool completionEnabled;
uint32_t descEnd;
// tso variables
bool useTso;
Addr tsoHeaderLen;
Addr tsoMss;
Addr tsoTotalLen;
Addr tsoUsedLen;
Addr tsoPrevSeq;
Addr tsoPktPayloadBytes;
bool tsoLoadedHeader;
bool tsoPktHasHeader;
uint8_t tsoHeader[256];
Addr tsoDescBytesUsed;
Addr tsoCopyBytes;
int tsoPkts;
public:
TxDescCache(IGbE *i, std::string n, int s);
/** Tell the cache to DMA a packet from main memory into its buffer and
* return the size the of the packet to reserve space in tx fifo.
* @return size of the packet
*/
unsigned getPacketSize(EthPacketPtr p);
void getPacketData(EthPacketPtr p);
void processContextDesc();
/** Return the number of dsecriptors in a cache block for threshold
* operations.
*/
unsigned
descInBlock(unsigned num_desc)
{
return num_desc / igbe->cacheBlockSize() / sizeof(iGbReg::TxDesc);
}
/** Ask if the packet has been transfered so the state machine can give
* it to the fifo.
* @return packet available in descriptor cache
*/
bool packetAvailable();
/** Ask if we are still waiting for the packet to be transfered.
* @return packet still in transit.
*/
bool packetWaiting() { return pktWaiting; }
/** Ask if this packet is composed of multiple descriptors
* so even if we've got data, we need to wait for more before
* we can send it out.
* @return packet can't be sent out because it's a multi-descriptor
* packet
*/
bool packetMultiDesc() { return pktMultiDesc;}
/** Called by event when dma to write packet is completed
*/
void pktComplete();
EventFunctionWrapper pktEvent;
void headerComplete();
EventFunctionWrapper headerEvent;
void completionWriteback(Addr a, bool enabled) {
DPRINTF(EthernetDesc,
"Completion writeback Addr: %#lx enabled: %d\n",
a, enabled);
completionAddress = a;
completionEnabled = enabled;
}
bool hasOutstandingEvents() override;
void nullCallback() {
DPRINTF(EthernetDesc, "Completion writeback complete\n");
}
EventFunctionWrapper nullEvent;
};
friend class TxDescCache;
TxDescCache txDescCache;
public:
typedef IGbEParams Params;
const Params *
params() const { return &params_; }
IGbE(const Params *params);
~IGbE();
void init();
Tick lastInterrupt;
Tick read(Addr addr, uint8_t len, void *dst);
Tick write(Addr addr, uint8_t len, const void *src);
bool ethRxPkt(EthPacketPtr packet);
void ethTxDone();
};
#endif //__DEV_NET_I8254XGBE_HH__
/*
* Copyright (c) 2006 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @file
* Register and structure descriptions for Intel's 8254x line of gigabit ethernet controllers.
*/
#include <stdint.h>
#include "sims/nic/e1000_gem5/support.h"
#include "sims/nic/e1000_gem5/gem5/bitfield.h"
namespace iGbReg {
// Registers used by the Intel GbE NIC
const uint32_t REG_CTRL = 0x00000;
const uint32_t REG_STATUS = 0x00008;
const uint32_t REG_EECD = 0x00010;
const uint32_t REG_EERD = 0x00014;
const uint32_t REG_CTRL_EXT = 0x00018;
const uint32_t REG_MDIC = 0x00020;
const uint32_t REG_FCAL = 0x00028;
const uint32_t REG_FCAH = 0x0002C;
const uint32_t REG_FCT = 0x00030;
const uint32_t REG_VET = 0x00038;
const uint32_t REG_PBA = 0x01000;
const uint32_t REG_ICR = 0x000C0;
const uint32_t REG_ITR = 0x000C4;
const uint32_t REG_ICS = 0x000C8;
const uint32_t REG_IMS = 0x000D0;
const uint32_t REG_IMC = 0x000D8;
const uint32_t REG_IAM = 0x000E0;
const uint32_t REG_RCTL = 0x00100;
const uint32_t REG_FCTTV = 0x00170;
const uint32_t REG_TIPG = 0x00410;
const uint32_t REG_AIFS = 0x00458;
const uint32_t REG_LEDCTL = 0x00e00;
const uint32_t REG_EICR = 0x01580;
const uint32_t REG_IVAR0 = 0x01700;
const uint32_t REG_FCRTL = 0x02160;
const uint32_t REG_FCRTH = 0x02168;
const uint32_t REG_RDBAL = 0x02800;
const uint32_t REG_RDBAH = 0x02804;
const uint32_t REG_RDLEN = 0x02808;
const uint32_t REG_SRRCTL = 0x0280C;
const uint32_t REG_RDH = 0x02810;
const uint32_t REG_RDT = 0x02818;
const uint32_t REG_RDTR = 0x02820;
const uint32_t REG_RXDCTL = 0x02828;
const uint32_t REG_RADV = 0x0282C;
const uint32_t REG_TCTL = 0x00400;
const uint32_t REG_TDBAL = 0x03800;
const uint32_t REG_TDBAH = 0x03804;
const uint32_t REG_TDLEN = 0x03808;
const uint32_t REG_TDH = 0x03810;
const uint32_t REG_TXDCA_CTL = 0x03814;
const uint32_t REG_TDT = 0x03818;
const uint32_t REG_TIDV = 0x03820;
const uint32_t REG_TXDCTL = 0x03828;
const uint32_t REG_TADV = 0x0382C;
const uint32_t REG_TDWBAL = 0x03838;
const uint32_t REG_TDWBAH = 0x0383C;
const uint32_t REG_CRCERRS = 0x04000;
const uint32_t REG_RXCSUM = 0x05000;
const uint32_t REG_RLPML = 0x05004;
const uint32_t REG_RFCTL = 0x05008;
const uint32_t REG_MTA = 0x05200;
const uint32_t REG_RAL = 0x05400;
const uint32_t REG_RAH = 0x05404;
const uint32_t REG_VFTA = 0x05600;
const uint32_t REG_WUC = 0x05800;
const uint32_t REG_WUFC = 0x05808;
const uint32_t REG_WUS = 0x05810;
const uint32_t REG_MANC = 0x05820;
const uint32_t REG_SWSM = 0x05B50;
const uint32_t REG_FWSM = 0x05B54;
const uint32_t REG_SWFWSYNC = 0x05B5C;
const uint8_t EEPROM_READ_OPCODE_SPI = 0x03;
const uint8_t EEPROM_RDSR_OPCODE_SPI = 0x05;
const uint8_t EEPROM_SIZE = 64;
const uint16_t EEPROM_CSUM = 0xBABA;
const uint8_t VLAN_FILTER_TABLE_SIZE = 128;
const uint8_t RCV_ADDRESS_TABLE_SIZE = 24;
const uint8_t MULTICAST_TABLE_SIZE = 128;
const uint32_t STATS_REGS_SIZE = 0x228;
// Registers in that are accessed in the PHY
const uint8_t PHY_PSTATUS = 0x1;
const uint8_t PHY_PID = 0x2;
const uint8_t PHY_EPID = 0x3;
const uint8_t PHY_GSTATUS = 10;
const uint8_t PHY_EPSTATUS = 15;
const uint8_t PHY_AGC = 18;
// Receive Descriptor Status Flags
const uint16_t RXDS_DYNINT = 0x800;
const uint16_t RXDS_UDPV = 0x400;
const uint16_t RXDS_CRCV = 0x100;
const uint16_t RXDS_PIF = 0x080;
const uint16_t RXDS_IPCS = 0x040;
const uint16_t RXDS_TCPCS = 0x020;
const uint16_t RXDS_UDPCS = 0x010;
const uint16_t RXDS_VP = 0x008;
const uint16_t RXDS_IXSM = 0x004;
const uint16_t RXDS_EOP = 0x002;
const uint16_t RXDS_DD = 0x001;
// Receive Descriptor Error Flags
const uint8_t RXDE_RXE = 0x80;
const uint8_t RXDE_IPE = 0x40;
const uint8_t RXDE_TCPE = 0x20;
const uint8_t RXDE_SEQ = 0x04;
const uint8_t RXDE_SE = 0x02;
const uint8_t RXDE_CE = 0x01;
// Receive Descriptor Extended Error Flags
const uint16_t RXDEE_HBO = 0x008;
const uint16_t RXDEE_CE = 0x010;
const uint16_t RXDEE_LE = 0x020;
const uint16_t RXDEE_PE = 0x080;
const uint16_t RXDEE_OSE = 0x100;
const uint16_t RXDEE_USE = 0x200;
const uint16_t RXDEE_TCPE = 0x400;
const uint16_t RXDEE_IPE = 0x800;
// Receive Descriptor Types
const uint8_t RXDT_LEGACY = 0x00;
const uint8_t RXDT_ADV_ONEBUF = 0x01;
const uint8_t RXDT_ADV_SPLIT_A = 0x05;
// Receive Descriptor Packet Types
const uint16_t RXDP_IPV4 = 0x001;
const uint16_t RXDP_IPV4E = 0x002;
const uint16_t RXDP_IPV6 = 0x004;
const uint16_t RXDP_IPV6E = 0x008;
const uint16_t RXDP_TCP = 0x010;
const uint16_t RXDP_UDP = 0x020;
const uint16_t RXDP_SCTP = 0x040;
const uint16_t RXDP_NFS = 0x080;
// Interrupt types
enum IntTypes
{
IT_NONE = 0x00000, //dummy value
IT_TXDW = 0x00001,
IT_TXQE = 0x00002,
IT_LSC = 0x00004,
IT_RXSEQ = 0x00008,
IT_RXDMT = 0x00010,
IT_RXO = 0x00040,
IT_RXT = 0x00080,
IT_MADC = 0x00200,
IT_RXCFG = 0x00400,
IT_GPI0 = 0x02000,
IT_GPI1 = 0x04000,
IT_TXDLOW = 0x08000,
IT_SRPD = 0x10000,
IT_ACK = 0x20000
};
// Receive Descriptor struct
struct RxDesc {
union {
struct {
Addr buf;
uint16_t len;
uint16_t csum;
uint8_t status;
uint8_t errors;
uint16_t vlan;
} legacy;
struct {
Addr pkt;
Addr hdr;
} adv_read;
struct {
uint16_t rss_type:4;
uint16_t pkt_type:12;
uint16_t __reserved1:5;
uint16_t header_len:10;
uint16_t sph:1;
union {
struct {
uint16_t id;
uint16_t csum;
};
uint32_t rss_hash;
};
uint32_t status:20;
uint32_t errors:12;
uint16_t pkt_len;
uint16_t vlan_tag;
} adv_wb ;
};
};
struct TxDesc {
uint64_t d1;
uint64_t d2;
};
namespace TxdOp {
const uint8_t TXD_CNXT = 0x0;
const uint8_t TXD_DATA = 0x1;
const uint8_t TXD_ADVCNXT = 0x2;
const uint8_t TXD_ADVDATA = 0x3;
inline bool isLegacy(TxDesc *d) { return !bits(d->d2,29,29); }
inline uint8_t getType(TxDesc *d) { return bits(d->d2, 23,20); }
inline bool isType(TxDesc *d, uint8_t type) { return getType(d) == type; }
inline bool isTypes(TxDesc *d, uint8_t t1, uint8_t t2) { return isType(d, t1) || isType(d, t2); }
inline bool isAdvDesc(TxDesc *d) { return !isLegacy(d) && isTypes(d, TXD_ADVDATA,TXD_ADVCNXT); }
inline bool isContext(TxDesc *d) { return !isLegacy(d) && isTypes(d,TXD_CNXT, TXD_ADVCNXT); }
inline bool isData(TxDesc *d) { return !isLegacy(d) && isTypes(d, TXD_DATA, TXD_ADVDATA); }
inline Addr getBuf(TxDesc *d) { assert(isLegacy(d) || isData(d)); return d->d1; }
inline Addr getLen(TxDesc *d) { if (isLegacy(d)) return bits(d->d2,15,0); else return bits(d->d2, 19,0); }
inline void setDd(TxDesc *d) { replaceBits(d->d2, 35, 32, 1ULL); }
inline bool ide(TxDesc *d) { return bits(d->d2, 31,31) && (getType(d) == TXD_DATA || isLegacy(d)); }
inline bool vle(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 30,30); }
inline bool rs(TxDesc *d) { return bits(d->d2, 27,27); }
inline bool ic(TxDesc *d) { assert(isLegacy(d) || isData(d)); return isLegacy(d) && bits(d->d2, 26,26); }
inline bool tse(TxDesc *d) {
if (isTypes(d, TXD_CNXT, TXD_DATA))
return bits(d->d2, 26,26);
if (isType(d, TXD_ADVDATA))
return bits(d->d2, 31, 31);
return false;
}
inline bool ifcs(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 25,25); }
inline bool eop(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 24,24); }
inline bool ip(TxDesc *d) { assert(isContext(d)); return bits(d->d2, 25,25); }
inline bool tcp(TxDesc *d) { assert(isContext(d)); return bits(d->d2, 24,24); }
inline uint8_t getCso(TxDesc *d) { assert(isLegacy(d)); return bits(d->d2, 23,16); }
inline uint8_t getCss(TxDesc *d) { assert(isLegacy(d)); return bits(d->d2, 47,40); }
inline bool ixsm(TxDesc *d) { return isData(d) && bits(d->d2, 40,40); }
inline bool txsm(TxDesc *d) { return isData(d) && bits(d->d2, 41,41); }
inline int tucse(TxDesc *d) { assert(isContext(d)); return bits(d->d1,63,48); }
inline int tucso(TxDesc *d) { assert(isContext(d)); return bits(d->d1,47,40); }
inline int tucss(TxDesc *d) { assert(isContext(d)); return bits(d->d1,39,32); }
inline int ipcse(TxDesc *d) { assert(isContext(d)); return bits(d->d1,31,16); }
inline int ipcso(TxDesc *d) { assert(isContext(d)); return bits(d->d1,15,8); }
inline int ipcss(TxDesc *d) { assert(isContext(d)); return bits(d->d1,7,0); }
inline int mss(TxDesc *d) { assert(isContext(d)); return bits(d->d2,63,48); }
inline int hdrlen(TxDesc *d) {
assert(isContext(d));
if (!isAdvDesc(d))
return bits(d->d2,47,40);
return bits(d->d2, 47,40) + bits(d->d1, 8,0) + bits(d->d1, 15, 9);
}
inline int getTsoLen(TxDesc *d) { assert(isType(d, TXD_ADVDATA)); return bits(d->d2, 63,46); }
inline int utcmd(TxDesc *d) { assert(isContext(d)); return bits(d->d2,24,31); }
} // namespace TxdOp
#define ADD_FIELD32(NAME, OFFSET, BITS) \
inline uint32_t NAME() { return bits(_data, OFFSET+BITS-1, OFFSET); } \
inline void NAME(uint32_t d) { replaceBits(_data, OFFSET+BITS-1, OFFSET,d); }
#define ADD_FIELD64(NAME, OFFSET, BITS) \
inline uint64_t NAME() { return bits(_data, OFFSET+BITS-1, OFFSET); } \
inline void NAME(uint64_t d) { replaceBits(_data, OFFSET+BITS-1, OFFSET,d); }
struct Regs {
template<class T>
struct Reg {
T _data;
T operator()() { return _data; }
const Reg<T> &operator=(T d) { _data = d; return *this;}
bool operator==(T d) { return d == _data; }
void operator()(T d) { _data = d; }
Reg() { _data = 0; }
};
struct CTRL : public Reg<uint32_t> { // 0x0000 CTRL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(fd,0,1); // full duplex
ADD_FIELD32(bem,1,1); // big endian mode
ADD_FIELD32(pcipr,2,1); // PCI priority
ADD_FIELD32(lrst,3,1); // link reset
ADD_FIELD32(tme,4,1); // test mode enable
ADD_FIELD32(asde,5,1); // Auto-speed detection
ADD_FIELD32(slu,6,1); // Set link up
ADD_FIELD32(ilos,7,1); // invert los-of-signal
ADD_FIELD32(speed,8,2); // speed selection bits
ADD_FIELD32(be32,10,1); // big endian mode 32
ADD_FIELD32(frcspd,11,1); // force speed
ADD_FIELD32(frcdpx,12,1); // force duplex
ADD_FIELD32(duden,13,1); // dock/undock enable
ADD_FIELD32(dudpol,14,1); // dock/undock polarity
ADD_FIELD32(fphyrst,15,1); // force phy reset
ADD_FIELD32(extlen,16,1); // external link status enable
ADD_FIELD32(rsvd,17,1); // reserved
ADD_FIELD32(sdp0d,18,1); // software controlled pin data
ADD_FIELD32(sdp1d,19,1); // software controlled pin data
ADD_FIELD32(sdp2d,20,1); // software controlled pin data
ADD_FIELD32(sdp3d,21,1); // software controlled pin data
ADD_FIELD32(sdp0i,22,1); // software controlled pin dir
ADD_FIELD32(sdp1i,23,1); // software controlled pin dir
ADD_FIELD32(sdp2i,24,1); // software controlled pin dir
ADD_FIELD32(sdp3i,25,1); // software controlled pin dir
ADD_FIELD32(rst,26,1); // reset
ADD_FIELD32(rfce,27,1); // receive flow control enable
ADD_FIELD32(tfce,28,1); // transmit flow control enable
ADD_FIELD32(rte,29,1); // routing tag enable
ADD_FIELD32(vme,30,1); // vlan enable
ADD_FIELD32(phyrst,31,1); // phy reset
};
CTRL ctrl;
struct STATUS : public Reg<uint32_t> { // 0x0008 STATUS Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(fd,0,1); // full duplex
ADD_FIELD32(lu,1,1); // link up
ADD_FIELD32(func,2,2); // function id
ADD_FIELD32(txoff,4,1); // transmission paused
ADD_FIELD32(tbimode,5,1); // tbi mode
ADD_FIELD32(speed,6,2); // link speed
ADD_FIELD32(asdv,8,2); // auto speed detection value
ADD_FIELD32(mtxckok,10,1); // mtx clock running ok
ADD_FIELD32(pci66,11,1); // In 66Mhz pci slot
ADD_FIELD32(bus64,12,1); // in 64 bit slot
ADD_FIELD32(pcix,13,1); // Pci mode
ADD_FIELD32(pcixspd,14,2); // pci x speed
};
STATUS sts;
struct EECD : public Reg<uint32_t> { // 0x0010 EECD Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(sk,0,1); // clack input to the eeprom
ADD_FIELD32(cs,1,1); // chip select to eeprom
ADD_FIELD32(din,2,1); // data input to eeprom
ADD_FIELD32(dout,3,1); // data output bit
ADD_FIELD32(fwe,4,2); // flash write enable
ADD_FIELD32(ee_req,6,1); // request eeprom access
ADD_FIELD32(ee_gnt,7,1); // grant eeprom access
ADD_FIELD32(ee_pres,8,1); // eeprom present
ADD_FIELD32(ee_size,9,1); // eeprom size
ADD_FIELD32(ee_sz1,10,1); // eeprom size
ADD_FIELD32(rsvd,11,2); // reserved
ADD_FIELD32(ee_type,13,1); // type of eeprom
} ;
EECD eecd;
struct EERD : public Reg<uint32_t> { // 0x0014 EERD Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(start,0,1); // start read
ADD_FIELD32(done,1,1); // done read
ADD_FIELD32(addr,2,14); // address
ADD_FIELD32(data,16,16); // data
};
EERD eerd;
struct CTRL_EXT : public Reg<uint32_t> { // 0x0018 CTRL_EXT Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(gpi_en,0,4); // enable interrupts from gpio
ADD_FIELD32(phyint,5,1); // reads the phy internal int status
ADD_FIELD32(sdp2_data,6,1); // data from gpio sdp
ADD_FIELD32(spd3_data,7,1); // data frmo gpio sdp
ADD_FIELD32(spd2_iodir,10,1); // direction of sdp2
ADD_FIELD32(spd3_iodir,11,1); // direction of sdp2
ADD_FIELD32(asdchk,12,1); // initiate auto-speed-detection
ADD_FIELD32(eerst,13,1); // reset the eeprom
ADD_FIELD32(spd_byps,15,1); // bypass speed select
ADD_FIELD32(ro_dis,17,1); // disable relaxed memory ordering
ADD_FIELD32(vreg,21,1); // power down the voltage regulator
ADD_FIELD32(link_mode,22,2); // interface to talk to the link
ADD_FIELD32(iame, 27,1); // interrupt acknowledge auto-mask ??
ADD_FIELD32(drv_loaded, 28,1);// driver is loaded and incharge of device
ADD_FIELD32(timer_clr, 29,1); // clear interrupt timers after IMS clear ??
};
CTRL_EXT ctrl_ext;
struct MDIC : public Reg<uint32_t> { // 0x0020 MDIC Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(data,0,16); // data
ADD_FIELD32(regadd,16,5); // register address
ADD_FIELD32(phyadd,21,5); // phy addresses
ADD_FIELD32(op,26,2); // opcode
ADD_FIELD32(r,28,1); // ready
ADD_FIELD32(i,29,1); // interrupt
ADD_FIELD32(e,30,1); // error
};
MDIC mdic;
struct ICR : public Reg<uint32_t> { // 0x00C0 ICR Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(txdw,0,1) // tx descr witten back
ADD_FIELD32(txqe,1,1) // tx queue empty
ADD_FIELD32(lsc,2,1) // link status change
ADD_FIELD32(rxseq,3,1) // rcv sequence error
ADD_FIELD32(rxdmt0,4,1) // rcv descriptor min thresh
ADD_FIELD32(rsvd1,5,1) // reserved
ADD_FIELD32(rxo,6,1) // receive overrunn
ADD_FIELD32(rxt0,7,1) // receiver timer interrupt
ADD_FIELD32(mdac,9,1) // mdi/o access complete
ADD_FIELD32(rxcfg,10,1) // recv /c/ ordered sets
ADD_FIELD32(phyint,12,1) // phy interrupt
ADD_FIELD32(gpi1,13,1) // gpi int 1
ADD_FIELD32(gpi2,14,1) // gpi int 2
ADD_FIELD32(txdlow,15,1) // transmit desc low thresh
ADD_FIELD32(srpd,16,1) // small receive packet detected
ADD_FIELD32(ack,17,1); // receive ack frame
ADD_FIELD32(int_assert, 31,1); // interrupt caused a system interrupt
};
ICR icr;
uint32_t imr; // register that contains the current interrupt mask
struct ITR : public Reg<uint32_t> { // 0x00C4 ITR Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(interval, 0,16); // minimum inter-interrutp inteval
// specified in 256ns interrupts
};
ITR itr;
// When CTRL_EXT.IAME and the ICR.INT_ASSERT is 1 an ICR read or write
// causes the IAM register contents to be written into the IMC
// automatically clearing all interrupts that have a bit in the IAM set
uint32_t iam;
struct RCTL : public Reg<uint32_t> { // 0x0100 RCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rst,0,1); // Reset
ADD_FIELD32(en,1,1); // Enable
ADD_FIELD32(sbp,2,1); // Store bad packets
ADD_FIELD32(upe,3,1); // Unicast Promiscuous enabled
ADD_FIELD32(mpe,4,1); // Multicast promiscuous enabled
ADD_FIELD32(lpe,5,1); // long packet reception enabled
ADD_FIELD32(lbm,6,2); //
ADD_FIELD32(rdmts,8,2); //
ADD_FIELD32(mo,12,2); //
ADD_FIELD32(mdr,14,1); //
ADD_FIELD32(bam,15,1); //
ADD_FIELD32(bsize,16,2); //
ADD_FIELD32(vfe,18,1); //
ADD_FIELD32(cfien,19,1); //
ADD_FIELD32(cfi,20,1); //
ADD_FIELD32(dpf,22,1); // discard pause frames
ADD_FIELD32(pmcf,23,1); // pass mac control frames
ADD_FIELD32(bsex,25,1); // buffer size extension
ADD_FIELD32(secrc,26,1); // strip ethernet crc from incoming packet
unsigned descSize()
{
switch(bsize()) {
case 0: return bsex() == 0 ? 2048 : 0;
case 1: return bsex() == 0 ? 1024 : 16384;
case 2: return bsex() == 0 ? 512 : 8192;
case 3: return bsex() == 0 ? 256 : 4096;
default:
return 0;
}
}
};
RCTL rctl;
struct FCTTV : public Reg<uint32_t> { // 0x0170 FCTTV
using Reg<uint32_t>::operator=;
ADD_FIELD32(ttv,0,16); // Transmit Timer Value
};
FCTTV fcttv;
struct TCTL : public Reg<uint32_t> { // 0x0400 TCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rst,0,1); // Reset
ADD_FIELD32(en,1,1); // Enable
ADD_FIELD32(bce,2,1); // busy check enable
ADD_FIELD32(psp,3,1); // pad short packets
ADD_FIELD32(ct,4,8); // collision threshold
ADD_FIELD32(cold,12,10); // collision distance
ADD_FIELD32(swxoff,22,1); // software xoff transmission
ADD_FIELD32(pbe,23,1); // packet burst enable
ADD_FIELD32(rtlc,24,1); // retransmit late collisions
ADD_FIELD32(nrtu,25,1); // on underrun no TX
ADD_FIELD32(mulr,26,1); // multiple request
};
TCTL tctl;
struct PBA : public Reg<uint32_t> { // 0x1000 PBA Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rxa,0,16);
ADD_FIELD32(txa,16,16);
};
PBA pba;
struct FCRTL : public Reg<uint32_t> { // 0x2160 FCRTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rtl,3,28); // make this bigger than the spec so we can have
// a larger buffer
ADD_FIELD32(xone, 31,1);
};
FCRTL fcrtl;
struct FCRTH : public Reg<uint32_t> { // 0x2168 FCRTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rth,3,13); // make this bigger than the spec so we can have
//a larger buffer
ADD_FIELD32(xfce, 31,1);
};
FCRTH fcrth;
struct RDBA : public Reg<uint64_t> { // 0x2800 RDBA Register
using Reg<uint64_t>::operator=;
ADD_FIELD64(rdbal,0,32); // base address of rx descriptor ring
ADD_FIELD64(rdbah,32,32); // base address of rx descriptor ring
};
RDBA rdba;
struct RDLEN : public Reg<uint32_t> { // 0x2808 RDLEN Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(len,7,13); // number of bytes in the descriptor buffer
};
RDLEN rdlen;
struct SRRCTL : public Reg<uint32_t> { // 0x280C SRRCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(pktlen, 0, 8);
ADD_FIELD32(hdrlen, 8, 8); // guess based on header, not documented
ADD_FIELD32(desctype, 25,3); // type of descriptor 000 legacy, 001 adv,
//101 hdr split
unsigned bufLen() { return pktlen() << 10; }
unsigned hdrLen() { return hdrlen() << 6; }
};
SRRCTL srrctl;
struct RDH : public Reg<uint32_t> { // 0x2810 RDH Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rdh,0,16); // head of the descriptor ring
};
RDH rdh;
struct RDT : public Reg<uint32_t> { // 0x2818 RDT Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(rdt,0,16); // tail of the descriptor ring
};
RDT rdt;
struct RDTR : public Reg<uint32_t> { // 0x2820 RDTR Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(delay,0,16); // receive delay timer
ADD_FIELD32(fpd, 31,1); // flush partial descriptor block ??
};
RDTR rdtr;
struct RXDCTL : public Reg<uint32_t> { // 0x2828 RXDCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(pthresh,0,6); // prefetch threshold, less that this
// consider prefetch
ADD_FIELD32(hthresh,8,6); // number of descriptors in host mem to
// consider prefetch
ADD_FIELD32(wthresh,16,6); // writeback threshold
ADD_FIELD32(gran,24,1); // granularity 0 = desc, 1 = cacheline
};
RXDCTL rxdctl;
struct RADV : public Reg<uint32_t> { // 0x282C RADV Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(idv,0,16); // absolute interrupt delay
};
RADV radv;
struct RSRPD : public Reg<uint32_t> { // 0x2C00 RSRPD Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(idv,0,12); // size to interrutp on small packets
};
RSRPD rsrpd;
struct TDBA : public Reg<uint64_t> { // 0x3800 TDBAL Register
using Reg<uint64_t>::operator=;
ADD_FIELD64(tdbal,0,32); // base address of transmit descriptor ring
ADD_FIELD64(tdbah,32,32); // base address of transmit descriptor ring
};
TDBA tdba;
struct TDLEN : public Reg<uint32_t> { // 0x3808 TDLEN Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(len,7,13); // number of bytes in the descriptor buffer
};
TDLEN tdlen;
struct TDH : public Reg<uint32_t> { // 0x3810 TDH Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(tdh,0,16); // head of the descriptor ring
};
TDH tdh;
struct TXDCA_CTL : public Reg<uint32_t> { // 0x3814 TXDCA_CTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(cpu_mask, 0, 5);
ADD_FIELD32(enabled, 5,1);
ADD_FIELD32(relax_ordering, 6, 1);
};
TXDCA_CTL txdca_ctl;
struct TDT : public Reg<uint32_t> { // 0x3818 TDT Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(tdt,0,16); // tail of the descriptor ring
};
TDT tdt;
struct TIDV : public Reg<uint32_t> { // 0x3820 TIDV Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(idv,0,16); // interrupt delay
};
TIDV tidv;
struct TXDCTL : public Reg<uint32_t> { // 0x3828 TXDCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(pthresh, 0,6); // if number of descriptors control has is
// below this number, a prefetch is considered
ADD_FIELD32(hthresh,8,8); // number of valid descriptors is host memory
// before a prefetch is considered
ADD_FIELD32(wthresh,16,6); // number of descriptors to keep until
// writeback is considered
ADD_FIELD32(gran, 24,1); // granulatiry of above values (0 = cacheline,
// 1 == desscriptor)
ADD_FIELD32(lwthresh,25,7); // xmit descriptor low thresh, interrupt
// below this level
};
TXDCTL txdctl;
struct TADV : public Reg<uint32_t> { // 0x382C TADV Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(idv,0,16); // absolute interrupt delay
};
TADV tadv;
/*
struct TDWBA : public Reg<uint64_t> { // 0x3838 TDWBA Register
using Reg<uint64_t>::operator=;
ADD_FIELD64(en,0,1); // enable transmit description ring address writeback
ADD_FIELD64(tdwbal,2,32); // base address of transmit descriptor ring address writeback
ADD_FIELD64(tdwbah,32,32); // base address of transmit descriptor ring
};
TDWBA tdwba;*/
uint64_t tdwba;
struct RXCSUM : public Reg<uint32_t> { // 0x5000 RXCSUM Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(pcss,0,8);
ADD_FIELD32(ipofld,8,1);
ADD_FIELD32(tuofld,9,1);
ADD_FIELD32(pcsd, 13,1);
};
RXCSUM rxcsum;
uint32_t rlpml; // 0x5004 RLPML probably maximum accepted packet size
struct RFCTL : public Reg<uint32_t> { // 0x5008 RFCTL Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(iscsi_dis,0,1);
ADD_FIELD32(iscsi_dwc,1,5);
ADD_FIELD32(nfsw_dis,6,1);
ADD_FIELD32(nfsr_dis,7,1);
ADD_FIELD32(nfs_ver,8,2);
ADD_FIELD32(ipv6_dis,10,1);
ADD_FIELD32(ipv6xsum_dis,11,1);
ADD_FIELD32(ackdis,13,1);
ADD_FIELD32(ipfrsp_dis,14,1);
ADD_FIELD32(exsten,15,1);
};
RFCTL rfctl;
struct MANC : public Reg<uint32_t> { // 0x5820 MANC Register
using Reg<uint32_t>::operator=;
ADD_FIELD32(smbus,0,1); // SMBus enabled #####
ADD_FIELD32(asf,1,1); // ASF enabled #####
ADD_FIELD32(ronforce,2,1); // reset of force
ADD_FIELD32(rsvd,3,5); // reserved
ADD_FIELD32(rmcp1,8,1); // rcmp1 filtering
ADD_FIELD32(rmcp2,9,1); // rcmp2 filtering
ADD_FIELD32(ipv4,10,1); // enable ipv4
ADD_FIELD32(ipv6,11,1); // enable ipv6
ADD_FIELD32(snap,12,1); // accept snap
ADD_FIELD32(arp,13,1); // filter arp #####
ADD_FIELD32(neighbor,14,1); // neighbor discovery
ADD_FIELD32(arp_resp,15,1); // arp response
ADD_FIELD32(tcorst,16,1); // tco reset happened
ADD_FIELD32(rcvtco,17,1); // receive tco enabled ######
ADD_FIELD32(blkphyrst,18,1);// block phy resets ########
ADD_FIELD32(rcvall,19,1); // receive all
ADD_FIELD32(macaddrfltr,20,1); // mac address filtering ######
ADD_FIELD32(mng2host,21,1); // mng2 host packets #######
ADD_FIELD32(ipaddrfltr,22,1); // ip address filtering
ADD_FIELD32(xsumfilter,23,1); // checksum filtering
ADD_FIELD32(brfilter,24,1); // broadcast filtering
ADD_FIELD32(smbreq,25,1); // smb request
ADD_FIELD32(smbgnt,26,1); // smb grant
ADD_FIELD32(smbclkin,27,1); // smbclkin
ADD_FIELD32(smbdatain,28,1); // smbdatain
ADD_FIELD32(smbdataout,29,1); // smb data out
ADD_FIELD32(smbclkout,30,1); // smb clock out
};
MANC manc;
struct SWSM : public Reg<uint32_t> { // 0x5B50 SWSM register
using Reg<uint32_t>::operator=;
ADD_FIELD32(smbi,0,1); // Semaphone bit
ADD_FIELD32(swesmbi, 1,1); // Software eeporm semaphore
ADD_FIELD32(wmng, 2,1); // Wake MNG clock
ADD_FIELD32(reserved, 3, 29);
};
SWSM swsm;
struct FWSM : public Reg<uint32_t> { // 0x5B54 FWSM register
using Reg<uint32_t>::operator=;
ADD_FIELD32(eep_fw_semaphore,0,1);
ADD_FIELD32(fw_mode, 1,3);
ADD_FIELD32(ide, 4,1);
ADD_FIELD32(sol, 5,1);
ADD_FIELD32(eep_roload, 6,1);
ADD_FIELD32(reserved, 7,8);
ADD_FIELD32(fw_val_bit, 15, 1);
ADD_FIELD32(reset_cnt, 16, 3);
ADD_FIELD32(ext_err_ind, 19, 6);
ADD_FIELD32(reserved2, 25, 7);
};
FWSM fwsm;
uint32_t sw_fw_sync;
};
} // namespace iGbReg
# Copyright 2021 Max Planck Institute for Software Systems, and
# National University of Singapore
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
include mk/subdir_pre.mk
bin_e1000_gem5 := $(d)e1000_gem5
OBJS := $(addprefix $(d),e1000_gem5.o i8254xGBe.o gem5/bitfield.o gem5/inet.o \
gem5/pktfifo.o)
$(bin_e1000_gem5): $(OBJS) $(lib_nicbm) $(lib_nicif)
CLEAN := $(bin_e1000_gem5) $(OBJS)
ALL := $(bin_e1000_gem5)
include mk/subdir_post.mk
#ifndef SIMS_NIC_E1000_GEM5_SUPPORT_H_
#define SIMS_NIC_E1000_GEM5_SUPPORT_H_
#include <arpa/inet.h>
#include <functional>
#include <memory>
#include <simbricks/nicbm/nicbm.h>
#define DNET_LIL_ENDIAN 42
#define DNET_BYTESEX DNET_LIL_ENDIAN
//#define DEBUG_E1000
#ifdef DEBUG_E1000
# define DPRINTF(x,y...) fprintf(stderr, #x ": " y)
#else
# define DPRINTF(x,y...) do { } while (0)
#endif
typedef uint64_t Addr;
typedef uint64_t Tick;
#define ETH_ADDR_LEN 6
class Gem5TimerEv;
class EthPacketData {
public:
unsigned length;
uint8_t *data;
EthPacketData(unsigned len) : length(0), data(new uint8_t[len]) { }
~EthPacketData() { delete[] data; }
};
typedef std::shared_ptr<EthPacketData> EthPacketPtr;
class EventFunctionWrapper : public nicbm::TimedEvent {
public:
bool sched;
std::function<void(void)> callback;
std::string _name;
EventFunctionWrapper(const std::function<void(void)> &callback,
const std::string &name)
: sched(false), callback(callback), _name(name)
{ }
virtual ~EventFunctionWrapper() = default;
bool scheduled() { return sched; }
};
static inline uint16_t htobe(uint16_t x) {
return htons(x);
}
static inline uint16_t htole(uint16_t x) {
return x;
}
void warn(const char *fmt, ...);
void panic(const char *fmt, ...);
#endif // SIMS_NIC_E1000_GEM5_SUPPORT_H_
\ No newline at end of file
...@@ -24,6 +24,7 @@ include mk/subdir_pre.mk ...@@ -24,6 +24,7 @@ include mk/subdir_pre.mk
$(eval $(call subdir,corundum)) $(eval $(call subdir,corundum))
$(eval $(call subdir,corundum_bm)) $(eval $(call subdir,corundum_bm))
$(eval $(call subdir,e1000_gem5))
$(eval $(call subdir,i40e_bm)) $(eval $(call subdir,i40e_bm))
include mk/subdir_post.mk include mk/subdir_post.mk
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment