diff --git a/Makefile b/Makefile index da723fb98b..378562ce97 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ CSRC = $(wildcard src/c/*.c) COBJ = $(CSRC:.c=.o) PREFIX = /usr/local -LUAJIT_CFLAGS := -include $(CURDIR)/gcc-preinclude.h +LUAJIT_CFLAGS := -include $(CURDIR)/gcc-preinclude.h -DLUAJIT_ENABLE_LUA52COMPAT all: $(LUAJIT) $(SYSCALL) $(PFLUA) # LuaJIT diff --git a/src/apps/interlink/receiver.lua b/src/apps/interlink/receiver.lua index 07afa17965..8cfefd1e65 100644 --- a/src/apps/interlink/receiver.lua +++ b/src/apps/interlink/receiver.lua @@ -5,11 +5,22 @@ module(...,package.seeall) local shm = require("core.shm") local interlink = require("lib.interlink") -local Receiver = {name="apps.interlink.Receiver"} +local Receiver = { + name = "apps.interlink.Receiver", + config = { + queue = {}, + size = {default=1024} + } +} -function Receiver:new (queue) +function Receiver:new (conf) + local self = { + attached = false, + queue = conf.queue, + size = conf.size + } packet.enable_group_freelist() - return setmetatable({attached=false, queue=queue}, {__index=Receiver}) + return setmetatable(self, {__index=Receiver}) end function Receiver:link () @@ -17,7 +28,7 @@ function Receiver:link () if not self.attached then self.shm_name = "group/interlink/"..queue..".interlink" self.backlink = "interlink/receiver/"..queue..".interlink" - self.interlink = interlink.attach_receiver(self.shm_name) + self.interlink = interlink.attach_receiver(self.shm_name, self.size) shm.alias(self.backlink, self.shm_name) self.attached = true end diff --git a/src/apps/interlink/transmitter.lua b/src/apps/interlink/transmitter.lua index 9e4afd8ba5..e3f8448776 100644 --- a/src/apps/interlink/transmitter.lua +++ b/src/apps/interlink/transmitter.lua @@ -5,11 +5,22 @@ module(...,package.seeall) local shm = require("core.shm") local interlink = require("lib.interlink") -local Transmitter = {name="apps.interlink.Transmitter"} +local Transmitter = { + name = "apps.interlink.Transmitter", + config = { + queue = {}, + size = {default=1024} + } +} -function Transmitter:new (queue) +function Transmitter:new (conf) + local self = { + attached = false, + queue = conf.queue, + size = conf.size + } packet.enable_group_freelist() - return setmetatable({attached=false, queue=queue}, {__index=Transmitter}) + return setmetatable(self, {__index=Transmitter}) end function Transmitter:link () @@ -17,7 +28,7 @@ function Transmitter:link () if not self.attached then self.shm_name = "group/interlink/"..queue..".interlink" self.backlink = "interlink/transmitter/"..queue..".interlink" - self.interlink = interlink.attach_transmitter(self.shm_name) + self.interlink = interlink.attach_transmitter(self.shm_name, self.size) shm.alias(self.backlink, self.shm_name) self.attached = true end diff --git a/src/apps/lwaftr/binding_table.lua b/src/apps/lwaftr/binding_table.lua index 709f328160..38ce5ec358 100644 --- a/src/apps/lwaftr/binding_table.lua +++ b/src/apps/lwaftr/binding_table.lua @@ -69,6 +69,13 @@ local ipv4_ntop = require("lib.yang.util").ipv4_ntop local band, lshift, rshift = bit.band, bit.lshift, bit.rshift +softwire_key_t = ffi.typeof[[ + struct { uint32_t ipv4; uint16_t psid; } +]] +softwire_value_t = ffi.typeof[[ + struct { uint8_t b4_ipv6[16], br_address[16]; } +]] + psid_map_key_t = ffi.typeof[[ struct { uint32_t addr; } ]] @@ -134,32 +141,32 @@ function BTLookupQueue:reset_queue() end local BindingTable = {} -local lookup_key function BindingTable.new(psid_map, softwires) local ret = { psid_map = assert(psid_map), softwires = assert(softwires), + entry = softwires.entry_type() } - lookup_key = ret.softwires.entry_type().key return setmetatable(ret, {__index=BindingTable}) end function BindingTable:add_softwire_entry(entry_blob) - local entry = self.softwires.entry_type() + local entry = self.entry assert(ffi.sizeof(entry) == ffi.sizeof(entry_blob)) - ffi.copy(entry, entry_blob, ffi.sizeof(entry_blob)) + ffi.copy(entry, entry_blob, ffi.sizeof(entry)) self.softwires:add(entry.key, entry.value) end function BindingTable:remove_softwire_entry(entry_key_blob) - local entry = self.softwires.entry_type() + local entry = self.entry assert(ffi.sizeof(entry.key) == ffi.sizeof(entry_key_blob)) - ffi.copy(entry.key, entry_key_blob, ffi.sizeof(entry_key_blob)) + ffi.copy(entry.key, entry_key_blob, ffi.sizeof(entry.key)) self.softwires:remove(entry.key) end function BindingTable:lookup(ipv4, port) + local lookup_key = self.entry.key local psid = self:lookup_psid(ipv4, port) lookup_key.ipv4 = ipv4 lookup_key.psid = psid @@ -226,7 +233,7 @@ function BindingTable:iterate_softwires() end function pack_psid_map_entry (softwire) - local port_set = assert(softwire.value.port_set) + local port_set = assert(softwire.port_set) local psid_length = port_set.psid_length local shift = 16 - psid_length - (port_set.reserved_ports_bit_count or 0) @@ -235,7 +242,7 @@ function pack_psid_map_entry (softwire) ("psid_length %s + shift %s should not exceed 16"): format(psid_length, shift)) - local key = softwire.key.ipv4 + local key = softwire.ipv4 local value = {psid_length = psid_length, shift = shift} return key, value @@ -262,18 +269,33 @@ function load (conf) self.keys[key] = value end - for entry in conf.softwire:iterate() do + local softwires = ctable.new{ + key_type = softwire_key_t, + value_type = softwire_value_t, + max_occupancy_rate = 0.4 + } + + local key, value = softwire_key_t(), softwire_value_t() + for _, entry in ipairs(conf.softwire) do + -- Add entry to binding table + key.ipv4 = entry.ipv4 + key.psid = entry.psid + value.b4_ipv6 = entry.b4_ipv6 + value.br_address = entry.br_address + softwires:add(key, value) + -- Check that the map either hasn't been added or that -- it's the same value as one which has. local psid_key, psid_value = pack_psid_map_entry(entry) if not inter_psid_map:exists(psid_key, psid_value) then inter_psid_map:add(psid_key, psid_value) - psid_builder:add(entry.key.ipv4, psid_value) + psid_builder:add(entry.ipv4, psid_value) end end local psid_map = psid_builder:build(psid_map_value_t(), true) - return BindingTable.new(psid_map, conf.softwire) + + return BindingTable.new(psid_map, softwires) end function selftest() diff --git a/src/apps/lwaftr/lwutil.lua b/src/apps/lwaftr/lwutil.lua index 5556f2f9f8..48a4816d92 100644 --- a/src/apps/lwaftr/lwutil.lua +++ b/src/apps/lwaftr/lwutil.lua @@ -6,7 +6,6 @@ local S = require("syscall") local bit = require("bit") local ffi = require("ffi") local lib = require("core.lib") -local cltable = require("lib.cltable") local binary = require("lib.yang.binary") local band = bit.band diff --git a/src/core/group_freelist.lua b/src/core/group_freelist.lua index 142a2fbc18..2d233a99b3 100644 --- a/src/core/group_freelist.lua +++ b/src/core/group_freelist.lua @@ -17,12 +17,11 @@ local band = bit.band -- -- NB: assumes 32-bit wide loads/stores are atomic (as is the fact on x86_64)! --- Group freelist holds up to SIZE chunks of chunksize packets each +-- Group freelist holds up to n chunks of chunksize packets each chunksize = 2048 --- (SIZE=1024)*(chunksize=2048) == roughly two million packets -local SIZE = 1024 -- must be a power of two -local MAX = SIZE - 1 +-- (default_size=1024)*(chunksize=2048) == roughly two million packets +local default_size = 1024 -- must be a power of two local CACHELINE = 64 -- XXX - make dynamic local INT = ffi.sizeof("uint32_t") @@ -35,47 +34,53 @@ struct group_freelist_chunk { ffi.cdef([[ struct group_freelist { - uint32_t enqueue_pos[1]; - uint8_t pad_enqueue_pos[]]..CACHELINE-1*INT..[[]; + uint32_t enqueue_pos[1], enqueue_mask; + uint8_t pad_enqueue_pos[]]..CACHELINE-2*INT..[[]; - uint32_t dequeue_pos[1]; - uint8_t pad_dequeue_pos[]]..CACHELINE-1*INT..[[]; + uint32_t dequeue_pos[1], dequeue_mask; + uint8_t pad_dequeue_pos[]]..CACHELINE-2*INT..[[]; - struct group_freelist_chunk chunk[]]..SIZE..[[]; + uint32_t size, state[1]; - uint32_t state[1]; + struct group_freelist_chunk chunk[?]; } __attribute__((packed, aligned(]]..CACHELINE..[[)))]]) -- Group freelists states local CREATE, INIT, READY = 0, 1, 2 -function freelist_create (name) - local fl = shm.create(name, "struct group_freelist") +function freelist_create (name, size) + size = size or default_size + assert(band(size, size-1) == 0, "size is not a power of two") + + local fl = shm.create(name, "struct group_freelist", size) if sync.cas(fl.state, CREATE, INIT) then - for i = 0, MAX do + fl.size = size + local mask = size - 1 + fl.enqueue_mask, fl.dequeue_mask = mask, mask + for i = 0, fl.size-1 do fl.chunk[i].sequence[0] = i end - fl.state[0] = READY + assert(sync.cas(fl.state, INIT, READY)) + return fl else - waitfor(function () return fl.state[0] == READY end) + shm.unmap(fl) + return freelist_open(name) end - return fl end function freelist_open (name, readonly) - local fl = shm.open(name, "struct group_freelist", readonly) + local fl = shm.open(name, "struct group_freelist", 'read-only', 1) waitfor(function () return fl.state[0] == READY end) - return fl -end - -local function mask (i) - return band(i, MAX) + local size = fl.size + shm.unmap(fl) + return shm.open(name, "struct group_freelist", readonly, size) end function start_add (fl) local pos = fl.enqueue_pos[0] + local mask = fl.enqueue_mask while true do - local chunk = fl.chunk[mask(pos)] + local chunk = fl.chunk[band(pos, mask)] local seq = chunk.sequence[0] local dif = seq - pos if dif == 0 then @@ -93,13 +98,14 @@ end function start_remove (fl) local pos = fl.dequeue_pos[0] + local mask = fl.dequeue_mask while true do - local chunk = fl.chunk[mask(pos)] + local chunk = fl.chunk[band(pos, mask)] local seq = chunk.sequence[0] local dif = seq - (pos+1) if dif == 0 then if sync.cas(fl.dequeue_pos, pos, pos+1) then - return chunk, pos+MAX+1 + return chunk, pos+mask+1 end elseif dif < 0 then return @@ -114,8 +120,25 @@ function finish (chunk, seq) chunk.sequence[0] = seq end +local function occupied_chunks (fl) + local enqueue, dequeue = fl.enqueue_pos[0], fl.dequeue_pos[0] + if dequeue > enqueue then + return enqueue + fl.size - dequeue + else + return enqueue - dequeue + end +end + +-- Register struct group_freelist as an abstract SHM object type so that +-- the group freelist can be recognized by shm.open_frame and described +-- with tostring(). +shm.register('group_freelist', {open=freelist_open}) +ffi.metatype("struct group_freelist", {__tostring = function (fl) + return ("%d/%d"):format(occupied_chunks(fl)*chunksize, fl.size*chunksize) +end}) + function selftest () - local fl = freelist_create("test_freelist") + local fl = freelist_create("test.group_freelist") assert(not start_remove(fl)) -- empty local w1, sw1 = start_add(fl) @@ -133,13 +156,13 @@ function selftest () finish(r2, sr2) assert(not start_remove(fl)) -- empty - for i=1,SIZE do + for i=1,fl.size do local w, sw = start_add(fl) assert(w) finish(w, sw) end assert(not start_add(fl)) -- full - for i=1,SIZE do + for i=1,fl.size do local r, sr = start_remove(fl) assert(r) finish(r, sr) @@ -148,7 +171,7 @@ function selftest () local w = {} for _=1,10000 do - for _=1,math.random(SIZE) do + for _=1,math.random(fl.size) do local w1, sw = start_add(fl) if not w1 then break end finish(w1, sw) @@ -160,4 +183,9 @@ function selftest () finish(r, sr) end end + + local flro = freelist_open("test.group_freelist", 'read-only') + assert(flro.size == fl.size) + local objsize = ffi.sizeof("struct group_freelist", fl.size) + assert(ffi.C.memcmp(fl, flro, objsize) == 0) end \ No newline at end of file diff --git a/src/core/lib.lua b/src/core/lib.lua index afb1129679..52e135d122 100644 --- a/src/core/lib.lua +++ b/src/core/lib.lua @@ -20,6 +20,8 @@ function equal (x, y) elseif x == y then return true elseif type(x) == 'table' then + if getmetatable(x) then return false end + if getmetatable(y) then return false end for k, v in pairs(x) do if not equal(v, y[k]) then return false end end diff --git a/src/core/main.lua b/src/core/main.lua index 433fa3c09a..c6ca9f00e8 100644 --- a/src/core/main.lua +++ b/src/core/main.lua @@ -168,8 +168,16 @@ function initialize () end function handler (reason) - print(reason) - print(STP.stacktrace()) + local ok, bt = pcall(STP.stacktrace) + if ok and bt then + io.stderr:write(reason) + io.stderr:write("\n") + io.stderr:write(bt) + io.stderr:write("\n") + else + io.stderr:write(debug.traceback(reason)) + io.stderr:write("\n") + end if debug_on_error then debug.debug() end os.exit(1) end diff --git a/src/core/packet.lua b/src/core/packet.lua index f282357f91..7c9a7e34ca 100644 --- a/src/core/packet.lua +++ b/src/core/packet.lua @@ -52,24 +52,28 @@ end -- Freelist containing empty packets ready for use. -local max_packets = 1e6 +local default_max_packets = 1e6 ffi.cdef([[ struct freelist { int nfree; int max; - struct packet *list[]]..max_packets..[[]; + struct packet *list[?]; }; ]]) -local function freelist_create(name) - local fl = shm.create(name, "struct freelist") +local function freelist_create(name, max_packets) + max_packets = max_packets or default_max_packets + local fl = shm.create(name, "struct freelist", max_packets) fl.max = max_packets return fl end local function freelist_open(name, readonly) - return shm.open(name, "struct freelist", readonly) + local fl = shm.open(name, "struct freelist", 'read-only', 1) + local max = fl.max + shm.unmap(fl) + return shm.open(name, "struct freelist", readonly, max) end local function freelist_full(freelist) @@ -104,14 +108,21 @@ local packets_allocated = 0 local packets_fl, group_fl -- Call to ensure packet freelist is enabled. -function initialize () - packets_fl = freelist_create("engine/packets.freelist") +function initialize (max_packets) + if packets_fl then + assert(packets_fl.nfree == 0, "freelist is already in use") + shm.unmap(packets_fl) + shm.unlink("engine/packets.freelist") + end + packets_fl = freelist_create("engine/packets.freelist", max_packets) end -- Call to ensure group freelist is enabled. -function enable_group_freelist () +function enable_group_freelist (nchunks) if not group_fl then - group_fl = group_freelist.freelist_create("group/packets.freelist") + group_fl = group_freelist.freelist_create( + "group/packets.group_freelist", nchunks + ) end end @@ -147,12 +158,9 @@ function reclaim_step () end end --- Register struct freelist as an abstract SHM object type so that the group +-- Register struct freelist as an abstract SHM object type so that the -- freelist can be recognized by shm.open_frame and described with tostring(). -shm.register( - 'freelist', - {open = function (name) return shm.open(name, "struct freelist") end} -) +shm.register('freelist', {open=freelist_open}) ffi.metatype("struct freelist", {__tostring = function (freelist) return ("%d/%d"):format(freelist.nfree, freelist.max) end}) @@ -176,7 +184,7 @@ end -- process termination. function shutdown (pid) local in_group, group_fl = pcall( - group_freelist.freelist_open, "/"..pid.."/group/packets.freelist" + group_freelist.freelist_open, "/"..pid.."/group/packets.group_freelist" ) if in_group then local packets_fl = freelist_open("/"..pid.."/engine/packets.freelist") @@ -308,7 +316,7 @@ end function preallocate_step() assert(packets_allocated + packet_allocation_step - <= max_packets - group_fl_chunksize, + <= packets_fl.max - group_fl_chunksize, "packet allocation overflow") for i=1, packet_allocation_step do @@ -319,6 +327,12 @@ function preallocate_step() end function selftest () + initialize(10000) + assert(packets_fl.max == 10000) + allocate() + local ok, err = pcall(initialize) + assert(not ok and err:match("freelist is already in use")) + assert(is_aligned(0, 1)) assert(is_aligned(1, 1)) assert(is_aligned(2, 1)) diff --git a/src/core/shm.lua b/src/core/shm.lua index 37f65af4ac..989fe1750f 100644 --- a/src/core/shm.lua +++ b/src/core/shm.lua @@ -16,13 +16,13 @@ root = os.getenv("SNABB_SHM_ROOT") or "/var/run/snabb" mappings = {} -- Map an object into memory. -local function map (name, type, readonly, create) +local function map (name, type, readonly, create, ...) local path = resolve(name) local mapmode = readonly and 'read' or 'read, write' local ctype = ffi.typeof(type) - local size = ffi.sizeof(ctype) + local size = ffi.sizeof(ctype, ...) local stat = S.stat(root..'/'..path) - if stat and stat.size ~= size then + if stat and stat.size < size then print(("shm warning: resizing %s from %d to %d bytes") :format(path, stat.size, size)) end @@ -38,7 +38,7 @@ local function map (name, type, readonly, create) if create then assert(fd:ftruncate(size), "shm: ftruncate failed") else - assert(fd:fstat().size == size, "shm: unexpected size") + assert(fd:fstat().size >= size, "shm: unexpected size") end local mem, err = S.mmap(nil, size, mapmode, "shared", fd, 0) fd:close() @@ -47,12 +47,12 @@ local function map (name, type, readonly, create) return ffi.cast(ffi.typeof("$&", ctype), mem) end -function create (name, type) - return map(name, type, false, true) +function create (name, type, ...) + return map(name, type, false, true, ...) end -function open (name, type, readonly) - return map(name, type, readonly, false) +function open (name, type, readonly, ...) + return map(name, type, readonly, false, ...) end function exists (name) @@ -215,6 +215,17 @@ function selftest () unmap(p1) assert(not exists(name)) + -- Checking parameterized types + print("checking parameterized types..") + local name = "shm/selftest/parameterized" + local p1 = create(name, "struct { int x; int xs[?]; }", 10) + local p2 = open(name, "struct { int x; int xs[?]; }", 'read-only', 10) + p1.xs[9] = 42 + assert(p2.xs[9] == 42) + unmap(p2) + unmap(p1) + assert(unlink(name)) + -- Test that we can open and cleanup many objects print("checking many objects..") local path = 'shm/selftest/manyobj' diff --git a/src/lib/cpuset.lua b/src/lib/cpuset.lua index 0a7e164fd0..ce5f475784 100644 --- a/src/lib/cpuset.lua +++ b/src/lib/cpuset.lua @@ -88,11 +88,11 @@ function CPUSet:list () return list end -function CPUSet:acquire_for_pci_addresses(addrs) - return self:acquire(numa.choose_numa_node_for_pci_addresses(addrs)) +function CPUSet:acquire_for_pci_addresses(addrs, worker) + return self:acquire(numa.choose_numa_node_for_pci_addresses(addrs), worker) end -function CPUSet:acquire(on_node) +function CPUSet:acquire(on_node, worker) for node, cpus in pairs(self.by_node) do if on_node == nil or on_node == node then for cpu, avail in pairs(cpus) do @@ -117,11 +117,11 @@ function CPUSet:acquire(on_node) end for node, cpus in pairs(self.by_node) do print(("Warning: All assignable CPUs in use; ".. - "leaving data-plane PID %d without assigned CPU."):format(S.getpid())) + "leaving data-plane worker '%s' without assigned CPU."):format(worker)) return end print(("Warning: No assignable CPUs declared; ".. - "leaving data-plane PID %d without assigned CPU."):format(S.getpid())) + "leaving data-plane worker '%s' without assigned CPU."):format(worker)) end function CPUSet:release(cpu) diff --git a/src/lib/interlink.lua b/src/lib/interlink.lua index d3f0b066ff..1dc26fb0db 100644 --- a/src/lib/interlink.lua +++ b/src/lib/interlink.lua @@ -63,24 +63,23 @@ local band = require("bit").band local waitfor = require("core.lib").waitfor local sync = require("core.sync") -local SIZE = 1024 local CACHELINE = 64 -- XXX - make dynamic -local INT = ffi.sizeof("int") - -assert(band(SIZE, SIZE-1) == 0, "SIZE is not a power of two") +local INT = ffi.sizeof("uint32_t") -- Based on MCRingBuffer, see -- http://www.cse.cuhk.edu.hk/%7Epclee/www/pubs/ipdps10.pdf -ffi.cdef([[ struct interlink { - int read, write, state[1]; - char pad1[]]..CACHELINE-3*INT..[[]; - int lwrite, nread; - char pad2[]]..CACHELINE-2*INT..[[]; - int lread, nwrite; - char pad3[]]..CACHELINE-2*INT..[[]; - struct packet *packets[]]..SIZE..[[]; -} __attribute__((packed, aligned(]]..CACHELINE..[[)))]]) +ffi.cdef([[ + struct interlink { + uint32_t read, write, size, state[1]; + char pad1[]]..CACHELINE-4*INT..[[]; + uint32_t lwrite, nread, rmask; + char pad2[]]..CACHELINE-3*INT..[[]; + uint32_t lread, nwrite, wmask; + char pad3[]]..CACHELINE-3*INT..[[]; + struct packet *packets[?]; + } __attribute__((packed, aligned(]]..CACHELINE..[[))) +]]) -- The life cycle of an interlink is managed using a state machine. This is -- necessary because we allow receiving and transmitting processes to attach @@ -92,13 +91,15 @@ ffi.cdef([[ struct interlink { -- once the former receiver has detached while the transmitter stays attached -- throughout, and vice-versa. -- --- Interlinks can be in one of five states: +-- Interlinks can be in one of six states: -local FREE = 0 -- Implicit initial state due to 0 value. -local RXUP = 1 -- Receiver has attached. -local TXUP = 2 -- Transmitter has attached. -local DXUP = 3 -- Both ends have attached. -local DOWN = 4 -- Both ends have detached; must be re-allocated. +local INIT = 0 -- Implicit initial state due to 0 value. +local CONF = 1 -- Queue size is being configured. +local FREE = 2 -- Queue is in free state, ready to attach. +local RXUP = 3 -- Receiver has attached. +local TXUP = 4 -- Transmitter has attached. +local DXUP = 5 -- Both ends have attached. +local DOWN = 6 -- Both ends have detached; must be re-allocated. -- If at any point both ends have detached from an interlink it stays in the -- DOWN state until it is deallocated. @@ -107,7 +108,9 @@ local DOWN = 4 -- Both ends have detached; must be re-allocated. -- -- Who Change Why -- ------ ------------- --------------------------------------------------- --- (any) none -> FREE A process creates the queue (initial state). +-- (any) none -> INIT A process creates the queue (initial state). +-- (any) INIT -> CONF A process has started configuring the queue. +-- (any) CONF -> FREE A process has initialized and configured the queue. -- recv. FREE -> RXUP Receiver attaches to free queue. -- recv. TXUP -> DXUP Receiver attaches to queue with ready transmitter. -- recv. DXUP -> TXUP Receiver detaches from queue. @@ -121,6 +124,10 @@ local DOWN = 4 -- Both ends have detached; must be re-allocated. -- -- Who Change Why *PROHIBITED* -- ------ ----------- -------------------------------------------------------- +-- recv. INIT->RXUP Can not attach to uninitialized queue. +-- trans. INIT->TXUP Can not attach to uninitialized queue. +-- recv. CONF->RXUP Can not attach to unconfigured queue. +-- trans. CONF->TXUP Can not attach to unconfigured queue. -- (any) FREE->DEAD Cannot shutdown before having attached. -- (any) *->FREE Cannot transition to FREE except by reallocating. -- recv. TXUP->DEAD Receiver cannot mutate queue after it has detached. @@ -130,15 +137,24 @@ local DOWN = 4 -- Both ends have detached; must be re-allocated. -- (any) DXUP->DOWN Cannot shutdown queue while it is in use. -- (any) DOWN->* Cannot transition from DOWN (must create new queue.) -local function attach (name, initialize) +local function attach (name, size, transitions) + assert(band(size, size-1) == 0, "size is not a power of two") local r local first_try = true waitfor( function () -- Create/open the queue. - r = shm.create(name, "struct interlink") - -- Return if we succeed to initialize it. - if initialize(r) then return true end + r = shm.create(name, "struct interlink", size) + -- Initialize queue and configure its size + -- (only one process can set size). + if sync.cas(r.state, INIT, CONF) then + r.size = size + local mask = size - 1 + r.rmask, r.wmask = mask, mask + assert(sync.cas(r.state, CONF, FREE)) + end + -- Return if we succeed to attach. + if transitions(r) then return true end -- We failed; handle error and try again. shm.unmap(r) if first_try then @@ -147,20 +163,22 @@ local function attach (name, initialize) end end ) + -- Make sure we agree on the queue size. + assert(r.size == size, "interlink: queue size mismatch on: "..name) -- Ready for action :) return r end -function attach_receiver (name) - return attach(name, +function attach_receiver (name, size) + return attach(name, size, -- Attach to free queue as receiver (FREE -> RXUP) -- or queue with ready transmitter (TXUP -> DXUP.) function (r) return sync.cas(r.state, FREE, RXUP) or sync.cas(r.state, TXUP, DXUP) end) end -function attach_transmitter (name) - return attach(name, +function attach_transmitter (name, size) + return attach(name, size, -- Attach to free queue as transmitter (FREE -> TXUP) -- or queue with ready receiver (RXUP -> DXUP.) function (r) return sync.cas(r.state, FREE, TXUP) @@ -206,12 +224,12 @@ end -- Queue operations follow below. -local function NEXT (i) - return band(i + 1, SIZE - 1) +local function NEXT (mask, i) + return band(i + 1, mask) end function full (r) - local after_nwrite = NEXT(r.nwrite) + local after_nwrite = NEXT(r.wmask, r.nwrite) if after_nwrite == r.lread then if after_nwrite == r.read then return true @@ -222,7 +240,7 @@ end function insert (r, p) r.packets[r.nwrite] = p - r.nwrite = NEXT(r.nwrite) + r.nwrite = NEXT(r.wmask, r.nwrite) end function push (r) @@ -241,7 +259,7 @@ end function extract (r) local p = r.packets[r.nread] - r.nread = NEXT(r.nread) + r.nread = NEXT(r.rmask, r.nread) return p end @@ -258,24 +276,29 @@ end shm.register('interlink', getfenv()) function open (name, readonly) - return shm.open(name, "struct interlink", readonly) + local r = shm.open(name, "struct interlink", 'read-only', 1) + local size = r.size + shm.unmap(r) + return shm.open(name, "struct interlink", readonly, size) end local function describe (r) local function queue_fill (r) - local read, write = r.read, r.write - return read > write and write + SIZE - read or write - read + local read, write, size = r.read, r.write, r.size + return read > write and write + size - read or write - read end local function status (r) return ({ - [FREE] = "initializing", + [INIT] = "being initialized", + [CONF] = "being configuring", + [FREE] = "free to attach", [RXUP] = "waiting for transmitter", [TXUP] = "waiting for receiver", [DXUP] = "in active use", [DOWN] = "deallocating" })[r.state[0]] end - return ("%d/%d (%s)"):format(queue_fill(r), SIZE - 1, status(r)) + return ("%d/%d (%s)"):format(queue_fill(r), size - 1, status(r)) end -ffi.metatype(ffi.typeof("struct interlink"), {__tostring=describe}) +ffi.metatype("struct interlink", {__tostring=describe}) diff --git a/src/lib/lua/StackTracePlus.lua b/src/lib/lua/StackTracePlus.lua index 0ea1344a05..135129d10e 100644 --- a/src/lib/lua/StackTracePlus.lua +++ b/src/lib/lua/StackTracePlus.lua @@ -42,6 +42,10 @@ add_known_module("bit32", "bit32 module") -- luajit add_known_module("bit", "bit module") add_known_module("jit", "jit module") +-- lua5.3 +if _VERSION >= "Lua 5.3" then + add_known_module("utf8", "utf8 module") +end local m_user_known_tables = {} @@ -72,7 +76,7 @@ for _, name in ipairs{ "tostring", "type", "xpcall", - + -- Lua 5.1 "gcinfo", "getfenv", @@ -98,7 +102,7 @@ local function safe_tostring (value) end -- Private: --- Parses a line, looking for possible function definitions (in a very na�ve way) +-- Parses a line, looking for possible function definitions (in a very na�ve way) -- Returns '(anonymous)' if no function name was found in the line local function ParseLine(line) assert(type(line) == "string") @@ -139,7 +143,7 @@ local function GuessFunctionName(info) return "?" end local line - for i = 1, info.linedefined do + for _ = 1, info.linedefined do line = file:read("*l") end if not line then @@ -224,7 +228,7 @@ function Dumper:DumpLocals (level) if self.dumping_same_thread then level = level + 1 end - + local name, value = self.getlocal(level, i) if not name then return @@ -266,7 +270,7 @@ function Dumper:DumpLocals (level) else local source = info.short_src if source:sub(2,7) == "string" then - source = source:sub(9) -- uno m�s, por el espacio que viene (string "Baragent.Main", por ejemplo) + source = source:sub(9) -- uno m�s, por el espacio que viene (string "Baragent.Main", por ejemplo) end --for k,v in pairs(info) do print(k,v) end fun_name = fun_name or GuessFunctionName(info) @@ -305,7 +309,7 @@ function _M.stacktrace(thread, message, level) local dumper = Dumper.new(thread) local original_error - + if type(message) == "table" then dumper:add("an error object {\r\n") local first = true @@ -326,14 +330,14 @@ function _M.stacktrace(thread, message, level) dumper:add(message) original_error = message end - + dumper:add("\r\n") dumper:add[[ Stack Traceback =============== ]] --print(error_message) - + local level_to_show = level if dumper.dumping_same_thread then level = level + 1 end @@ -381,12 +385,12 @@ Stack Traceback else dumper:add_f("(%d) unknown frame %s\r\n", level_to_show, info.what) end - + level = level + 1 level_to_show = level_to_show + 1 info = dumper.getinfo(level, "nSlf") end - + return dumper:concat_lines(), original_error end diff --git a/src/lib/ptree/ptree.lua b/src/lib/ptree/ptree.lua index f1add44a15..0f17b3a20b 100644 --- a/src/lib/ptree/ptree.lua +++ b/src/lib/ptree/ptree.lua @@ -10,7 +10,6 @@ local lib = require("core.lib") local shm = require("core.shm") local timer = require("core.timer") local worker = require("core.worker") -local cltable = require("lib.cltable") local cpuset = require("lib.cpuset") local rrd = require("lib.rrd") local scheduling = require("lib.scheduling") @@ -186,6 +185,7 @@ end function Manager:start () if self.name then engine.claim_name(self.name) end + self:info(("Manager has started (PID %d)"):format(S.getpid())) self.cpuset:bind_to_numa_node() require('lib.fibers.file').install_poll_io_handler() self.sched = fiber.current_scheduler @@ -369,7 +369,7 @@ function Manager:acquire_cpu_for_worker(id, app_graph) end end end - return self.cpuset:acquire_for_pci_addresses(pci_addresses) + return self.cpuset:acquire_for_pci_addresses(pci_addresses, id) end function Manager:compute_scheduling_for_worker(id, app_graph) diff --git a/src/lib/ptree/support.lua b/src/lib/ptree/support.lua index aa14fc3f62..5488deec49 100644 --- a/src/lib/ptree/support.lua +++ b/src/lib/ptree/support.lua @@ -8,7 +8,6 @@ local path_mod = require("lib.yang.path") local path_data = require("lib.yang.path_data") local yang = require("lib.yang.yang") local data = require("lib.yang.data") -local cltable = require("lib.cltable") function compute_parent_paths(path) local function sorted_keys(t) @@ -43,19 +42,16 @@ local function add_child_objects(accum, grammar, config) table.insert(accum, config) return visit(grammar, config) end - function visitor.table(grammar, config) + function visitor.list(grammar, config) + if grammar.value_ctype then + -- List entries are raw data and do not contain children with + -- distinct identities. + return + end local child_grammar = {type="struct", members=grammar.values, ctype=grammar.value_ctype} - if not grammar.key_ctype or grammar.native_key then - for k, v in pairs(config) do visit_child(child_grammar, v) end - elseif grammar.key_ctype and grammar.value_ctype then - -- Ctables are raw data, and raw data doesn't contain children - -- with distinct identity. - return - elseif grammar.key_ctype then - for k, v in cltable.pairs(config) do visit_child(child_grammar, v) end - else - error("unreachable") + for _, entry in ipairs(config) do + visit_child(child_grammar, entry) end end function visitor.array(grammar, config) @@ -88,7 +84,7 @@ local function compute_objects_maybe_updated_in_place (schema, config, if subgrammar.type == 'scalar' then return objs end table.insert(objs, getter(config)) -- Members of raw data can't be updated in place either. - if subgrammar.type == 'table' then + if subgrammar.type == 'list' then if subgrammar.key_ctype and subgrammar.value_ctype then return objs end elseif subgrammar.type == 'struct' then if subgrammar.ctype then return objs end diff --git a/src/lib/ptree/support/snabb-snabbflow-v1.lua b/src/lib/ptree/support/snabb-snabbflow-v1.lua index 134a936606..4e04da2ad4 100644 --- a/src/lib/ptree/support/snabb-snabbflow-v1.lua +++ b/src/lib/ptree/support/snabb-snabbflow-v1.lua @@ -1,6 +1,7 @@ -- Use of this source code is governed by the Apache 2.0 license; see COPYING. module(..., package.seeall) +local path_data = require("lib.yang.path_data") local support = require("lib.ptree.support") local shm = require("core.shm") local counter = require("core.counter") @@ -94,16 +95,13 @@ end function collect_rss_states (pid, rss_links) local states = {} local id - for _, app in ipairs(shm.children("/"..pid.."/apps")) do - local rss_group = tonumber(app:match("^rss(%d+)$")) + for _, link in ipairs(shm.children("/"..pid.."/links")) do + local rss_group = tonumber(link:match("^rss(%d+)%.")) if rss_group then id = tonumber(rss_group) - break end - end - for _, link in ipairs(shm.children("/"..pid.."/links")) do for rss_link, _ in pairs(rss_links) do - if (link:match("^"..rss_link) and link:match("^rss%d+.")) -- embedded link + if (link:match("^"..rss_link) and link:match("^rss%d+%.")) -- embedded link or (link:match("-> *"..rss_link:gsub("%.output$", ".input").."$")) -- interlink then local stats = shm.open_frame("/"..pid.."/links/"..link) @@ -132,6 +130,12 @@ local function compute_pid_reader () return function (pid) return pid end end +local function get_state_grammar (path) + return path_data.grammar_for_schema_by_name( + 'snabb-snabbflow-v1', '/snabbflow-state/'..path, false + ) +end + local function process_states (pids) local state = { interface = {}, @@ -216,6 +220,49 @@ local function process_states (pids) instances[ipfix_state.id] = ipfix_state end end + -- Convert tables to lists as defined in schema + local interfaces = state.interface + state.interface = get_state_grammar('interface').list.new() + for device, interface in pairs(interfaces) do + state.interface[device] = interface + end + local exporters = state.exporter + state.exporter = get_state_grammar('exporter').list.new() + for name, exporter in pairs(exporters) do + local templates = exporter.template + exporter.template = get_state_grammar('exporter/template').list.new() + for id, template in pairs(templates) do + exporter.template[id] = template + end + state.exporter[name] = exporter + end + local rss_groups = state.rss_group + state.rss_group = get_state_grammar('rss-group').list.new() + for id, rss_group in pairs(rss_groups) do + local queues = rss_group.queue + rss_group.queue = get_state_grammar('rss-group/queue').list.new() + for device, queue in pairs(queues) do + rss_group.queue[device] = queue + end + local exporters = rss_group.exporter + rss_group.exporter = get_state_grammar('rss-group/exporter').list.new() + for name, exporter in pairs(exporters) do + local instances = exporter.instance + exporter.instance = + get_state_grammar('rss-group/exporter/instance').list.new() + for id, instance in pairs(instances) do + local templates = instance.template + instance.template = + get_state_grammar('rss-group/exporter/instance/template').list.new() + for id, template in pairs(templates) do + instance.template[id] = template + end + exporter.instance[id] = instance + end + rss_group.exporter[name] = exporter + end + state.rss_group[id] = rss_group + end return {snabbflow_state=state} end diff --git a/src/lib/ptree/support/snabb-softwire-v3.lua b/src/lib/ptree/support/snabb-softwire-v3.lua index 8b3c9c6b81..3a2de16c77 100644 --- a/src/lib/ptree/support/snabb-softwire-v3.lua +++ b/src/lib/ptree/support/snabb-softwire-v3.lua @@ -7,8 +7,7 @@ local equal = require('core.lib').equal local dirname = require('core.lib').dirname local mem = require('lib.stream.mem') local ipv6 = require('lib.protocol.ipv6') -local ctable = require('lib.ctable') -local cltable = require('lib.cltable') +local list = require('lib.yang.list') local data = require('lib.yang.data') local state = require('lib.yang.state') local yang_util = require('lib.yang.util') @@ -19,6 +18,40 @@ local path_data = require('lib.yang.path_data') local generic = require('lib.ptree.support').generic_schema_config_support local binding_table = require("apps.lwaftr.binding_table") +local function snabb_softwire_getter(path, is_config) + local grammar = path_data.grammar_for_schema_by_name( + 'snabb-softwire-v3', '/', is_config + ) + return path_data.resolver(grammar, path) +end + +local function ietf_softwire_br_getter(path, is_config) + local grammar = path_data.grammar_for_schema_by_name( + 'ietf-softwire-br', '/', is_config + ) + return path_data.resolver(grammar, path) +end + +local function get_softwire_grammar(is_config) + return path_data.grammar_for_schema_by_name( + 'snabb-softwire-v3', '/', is_config + ) +end + +local function get_ietf_bind_instance_grammar(is_config) + return path_data.grammar_for_schema_by_name( + 'ietf-softwire-br', '/br-instances/binding/bind-instance', is_config + ) +end + +local function get_ietf_softwire_grammar(is_config) + return path_data.grammar_for_schema_by_name( + 'ietf-softwire-br', + '/br-instances/binding/bind-instance/binding-table/binding-entry', + is_config + ) +end + -- Packs snabb-softwire-v3 softwire entry into softwire and PSID blob -- -- The data plane stores a separate table of psid maps and softwires. It @@ -28,22 +61,19 @@ local binding_table = require("apps.lwaftr.binding_table") local function pack_softwire(app_graph, bt, entry) assert(app_graph.apps['lwaftr']) assert(entry.value.port_set, "Softwire lacks port-set definition") - local key, value = entry.key, entry.value - - local softwire_t = bt.softwires.entry_type() - psid_map_t = bt.psid_map.entry_type() -- Now lets pack the stuff! - local packed_softwire = ffi.new(softwire_t) - packed_softwire.key.ipv4 = key.ipv4 - packed_softwire.key.psid = key.psid - packed_softwire.value.b4_ipv6 = value.b4_ipv6 - packed_softwire.value.br_address = value.br_address - - local packed_psid_map = ffi.new(psid_map_t) - packed_psid_map.key.addr = key.ipv4 - if value.port_set.psid_length then - packed_psid_map.value.psid_length = value.port_set.psid_length + local packed_softwire = bt.softwires.entry_type() + packed_softwire.key.ipv4 = entry.ipv4 + packed_softwire.key.psid = entry.psid + packed_softwire.value.b4_ipv6 = entry.b4_ipv6 + packed_softwire.value.br_address = entry.br_address + + local packed_psid_map = bt.psid_map.entry_type() + packed_psid_map.key.addr = entry.ipv4 + if entry.port_set then + packed_psid_map.value.psid_length = entry.port_set.psid_length + packed_psid_map.value.shift = entry.port_set.shift end return packed_softwire, packed_psid_map @@ -51,7 +81,7 @@ end local function add_softwire_entry_actions(app_graph, bt, entries) local ret = {} - for entry in entries:iterate() do + for _, entry in ipairs(entries) do local psoftwire, ppsid = pack_softwire(app_graph, bt, entry) assert(bt:is_managed_ipv4_address(psoftwire.key.ipv4)) @@ -62,24 +92,10 @@ local function add_softwire_entry_actions(app_graph, bt, entries) return ret end -local softwire_grammar -local function get_softwire_grammar() - if not softwire_grammar then - local schema = yang.load_schema_by_name('snabb-softwire-v3') - local grammar = data.config_grammar_from_schema(schema) - softwire_grammar = - assert(grammar.members['softwire-config']. - members['binding-table'].members['softwire']) - end - return softwire_grammar -end - local function remove_softwire_entry_actions(app_graph, path) assert(app_graph.apps['lwaftr']) - path = path_mod.parse_path(path) - local grammar = get_softwire_grammar() - local key = path_data.prepare_table_lookup( - grammar.keys, grammar.key_ctype, path[#path].query) + path = path_mod.parse_path(path, get_softwire_grammar()) + local key = binding_table.softwire_key_t(path[#path].key) local args = {'lwaftr', 'remove_softwire_entry', key} -- If it's the last softwire for the corresponding psid entry, remove it. -- TODO: check if last psid entry and then remove. @@ -89,17 +105,12 @@ end local function compute_config_actions(get_binding_table_instance, old_graph, new_graph, to_restart, verb, path, arg) - -- If the binding cable changes, remove our cached version. - if path ~= nil and path:match("^/softwire%-config/binding%-table") then - binding_table_instance = nil - end - if verb == 'add' and path == '/softwire-config/binding-table/softwire' then if to_restart == false then assert(new_graph.apps['lwaftr']) local bt_conf = app_graph.apps.lwaftr.arg.softwire_config.binding_table local bt = get_binding_table_instance(bt_conf) - return add_softwire_entry_actions(new_graph, bt, arg) + return add_softwire_entry_actions(new_graph, bt, arg) end elseif (verb == 'remove' and path:match('^/softwire%-config/binding%-table/softwire')) then @@ -132,10 +143,10 @@ local function compute_apps_to_restart_after_configuration_update( -- restart unfortunately. If not we can just add the softwire. local bt = get_binding_table_instance(configuration.softwire_config.binding_table) local to_restart = false - for entry in arg:iterate() do - to_restart = (bt:is_managed_ipv4_address(entry.key.ipv4) == false) or false + for _, entry in ipairs(arg) do + to_restart = not bt:is_managed_ipv4_address(entry.ipv4) end - if to_restart == false then return {} end + if to_restart then return {} end elseif (verb == 'remove' and path:match('^/softwire%-config/binding%-table/softwire')) then return {} @@ -146,73 +157,27 @@ local function compute_apps_to_restart_after_configuration_update( schema_name, configuration, verb, path, in_place_dependencies, arg) end -local function memoize1(f) - local memoized_arg, memoized_result - return function(arg) - if arg == memoized_arg then return memoized_result end - memoized_result = f(arg) - memoized_arg = arg - return memoized_result - end -end - -local function table_for_grammar(grammar) - if grammar.native_key then - return {}, function (key) return key[grammar.native_key] end - elseif grammar.key_ctype and not grammar.value_ctype then - local key_t = data.typeof(grammar.key_ctype) - return cltable.new({key_type=key_t}), key_t - else - error("Unsupported table type") - end -end - -local ietf_bind_instance_grammar -local function get_ietf_bind_instance_grammar() - if not ietf_bind_instance_grammar then - local schema = yang.load_schema_by_name('ietf-softwire-br') - local grammar = data.config_grammar_from_schema(schema) - grammar = assert(grammar.members['br-instances']) - grammar = assert(grammar.members['br-type']) - grammar = assert(grammar.choices['binding'].binding) - grammar = assert(grammar.members['bind-instance']) - ietf_bind_instance_grammar = grammar - end - return ietf_bind_instance_grammar -end - -local ietf_softwire_grammar -local function get_ietf_softwire_grammar() - if not ietf_softwire_grammar then - local grammar = get_ietf_bind_instance_grammar() - grammar = assert(grammar.values['binding-table']) - grammar = assert(grammar.members['binding-entry']) - ietf_softwire_grammar = grammar - end - return ietf_softwire_grammar -end - local function ietf_binding_table_from_native(bt) - local ret, key_t = table_for_grammar(get_ietf_softwire_grammar()) + local ret = get_ietf_softwire_grammar().list.new() local warn_lossy = false - for softwire in bt.softwire:iterate() do - local k = key_t({ binding_ipv6info = softwire.value.b4_ipv6 }) - if ret[k] ~= nil then + for i, softwire in ipairs(bt.softwire) do + if ret[softwire.b4_ipv6] then -- If two entries in the native softwire table have the same key in -- the ietf-softwire-br schema, we omit the duplicate entry and print -- a load warning to inform the user of this issue. - warn_lossy = warn_lossy or ret[k] + warn_lossy = warn_lossy or i else - local v = { - binding_ipv4_addr = softwire.key.ipv4, + local entry = { + binding_ipv6info = softwire.b4_ipv6, + binding_ipv4_addr = softwire.ipv4, port_set = { - psid_offset = softwire.value.port_set.reserved_ports_bit_count, - psid_len = softwire.value.port_set.psid_length, - psid = softwire.key.psid + psid_offset = softwire.port_set.reserved_ports_bit_count, + psid_len = softwire.port_set.psid_length, + psid = softwire.psid }, - br_ipv6_addr = softwire.value.br_address, + br_ipv6_addr = softwire.br_address, } - ret[k] = v + ret[softwire.b4_ipv6] = entry end end if warn_lossy then @@ -227,39 +192,23 @@ local function ietf_binding_table_from_native(bt) return ret end -local function schema_getter(schema_name, path) - local schema = yang.load_schema_by_name(schema_name) - local grammar = data.config_grammar_from_schema(schema) - return path_data.resolver(grammar, path) -end - -local function snabb_softwire_getter(path) - return schema_getter('snabb-softwire-v3', path) -end - -local function ietf_softwire_br_getter(path) - return schema_getter('ietf-softwire-br', path) -end - local function native_binding_table_from_ietf(ietf) local _, softwire_grammar = snabb_softwire_getter('/softwire-config/binding-table/softwire') - local softwire_key_t = data.typeof(softwire_grammar.key_ctype) - local softwire_value_t = data.typeof(softwire_grammar.value_ctype) - local softwire = ctable.new({key_type=softwire_key_t, - value_type=softwire_value_t}) - for k,v in cltable.pairs(ietf) do - local softwire_key = - softwire_key_t({ipv4=v.binding_ipv4_addr, psid=v.port_set.psid}) - local softwire_value = softwire_value_t({ - br_address=v.br_ipv6_addr, - b4_ipv6=k.binding_ipv6info, + local softwire = softwire_grammar.list.new() + local l = list.object(softwire) + for _, entry in ipairs(ietf) do + l:add_entry{ + ipv4=assert(entry.binding_ipv4_addr), + psid=entry.port_set.psid, + br_address=entry.br_ipv6_addr, + b4_ipv6=entry.binding_ipv6info, port_set={ - psid_length=v.port_set.psid_len, - reserved_ports_bit_count=v.port_set.psid_offset - } - }) - softwire:add(softwire_key, softwire_value) + psid_length=entry.port_set.psid_len, + reserved_ports_bit_count=entry.port_set.psid_offset + }, + padding = 0 + } end return {softwire=softwire} end @@ -270,12 +219,6 @@ local function serialize_binding_table(bt) return mem.call_with_output_string(printer, bt) end -local uint64_ptr_t = ffi.typeof('uint64_t*') -function ipv6_equals(a, b) - local x, y = ffi.cast(uint64_ptr_t, a), ffi.cast(uint64_ptr_t, b) - return x[0] == y[0] and x[1] == y[1] -end - local function instance_name (config) return config.softwire_config.name or 'unnamed' end @@ -319,7 +262,8 @@ local function ietf_softwire_br_translator () local int_err = int.error_rate_limiting local ext = native_config.softwire_config.external_interface local ext_err = ext.error_rate_limiting - local instance = { + local bind_instance = get_ietf_bind_instance_grammar().list.new() + bind_instance[instance_name(native_config)] = { softwire_payload_mtu = int.mtu, softwire_path_mru = ext.mtu, -- FIXME: There's no equivalent of softwire-num-max in @@ -345,9 +289,7 @@ local function ietf_softwire_br_translator () cached_config = { br_instances = { binding = { - bind_instance = { - [instance_name(native_config)] = instance - } + bind_instance = bind_instance } } } @@ -383,14 +325,14 @@ local function ietf_softwire_br_translator () hairpin_ipv4_packets = c.hairpin_ipv4_packets, active_softwire_num = 0, -- FIXME } + local bind_instance = get_ietf_bind_instance_grammar(false).list.new() + bind_instance[instance_name(native_config)] = { + traffic_stat = traffic_stat + } return { br_instances = { binding = { - bind_instance = { - [instance_name(native_config)] = { - traffic_stat = traffic_stat - } - } + bind_instance = bind_instance } } } @@ -589,11 +531,10 @@ local function ietf_softwire_br_translator () local psid_map_path = '/softwire-config/binding-table/psid-map' -- Add softwires. local additions = {} - for entry in new_bt.softwire:iterate() do - local key, value = entry.key, entry.value - if old_bt.softwire:lookup_ptr(key) ~= nil then + for _, entry in ipairs(new_bt.softwire) do + if old_bt.softwire[entry] then error('softwire already present in table: '.. - ipv4_ntop(key.ipv4)..'/'..key.psid) + ipv4_ntop(entry.ipv4)..'/'..entry.psid) end local config_str = string.format([[{ ipv4 %s; @@ -604,11 +545,11 @@ local function ietf_softwire_br_translator () psid-length %s; reserved-ports-bit-count %s; } - }]], ipv4_ntop(key.ipv4), key.psid, - ipv6:ntop(value.br_address), - ipv6:ntop(value.b4_ipv6), - value.port_set.psid_length, - value.port_set.reserved_ports_bit_count + }]], ipv4_ntop(entry.ipv4), entry.psid, + ipv6:ntop(entry.br_address), + ipv6:ntop(entry.b4_ipv6), + entry.port_set.psid_length, + entry.port_set.reserved_ports_bit_count ) table.insert(additions, config_str) end @@ -662,11 +603,15 @@ end local function compute_state_reader(schema_name) -- The schema has two lists which we want to look in. - local schema = yang.load_schema_by_name(schema_name) - local grammar = data.data_grammar_from_schema(schema, false) - - local instance_list_gmr = grammar.members["softwire-config"].members.instance - local instance_state_gmr = instance_list_gmr.values["softwire-state"] + local grammar = path_data.grammar_for_schema_by_name( + schema_name, '/', false + ) + local instance_list_gmr = path_data.grammar_for_schema_by_name( + schema_name, '/softwire-config/instance', false + ) + local instance_state_gmr = path_data.grammar_for_schema_by_name( + schema_name, '/softwire-config/instance/softwire-state', false + ) local base_reader = state.state_reader_from_grammar(grammar) local instance_state_reader = state.state_reader_from_grammar(instance_state_gmr) @@ -674,12 +619,13 @@ local function compute_state_reader(schema_name) return function(pid, data) local counters = state.counters_for_pid(pid) local ret = base_reader(counters) - ret.softwire_config.instance = {} + ret.softwire_config.instance = instance_list_gmr.list.new() for device, instance in pairs(data.softwire_config.instance) do local instance_state = instance_state_reader(counters) - ret.softwire_config.instance[device] = {} - ret.softwire_config.instance[device].softwire_state = instance_state + ret.softwire_config.instance[device] = { + softwire_state = instance_state + } -- TODO: Copy queue[id].external_interface.next_hop.ip.resolved_mac. -- TODO: Copy queue[id].internal_interface.next_hop.ip.resolved_mac. end @@ -692,9 +638,16 @@ local function process_states(discontinuity_time, states) -- We need to create a summation of all the states as well as adding all the -- instance specific state data to create a total in software-state. + local instance_list_gmr = path_data.grammar_for_schema_by_name( + 'snabb-softwire-v3', '/softwire-config/instance', false + ) + local unified = { - softwire_config = {instance = {}}, - softwire_state = {} + softwire_config = {instance = instance_list_gmr.list.new()}, + softwire_state = { + discontinuity_time = + yang_util.format_date_as_iso_8601(discontinuity_time) + } } local function total_counter(name, softwire_stats, value) @@ -706,15 +659,13 @@ local function process_states(discontinuity_time, states) end for _, inst_config in ipairs(states) do - local name, instance = next(inst_config.softwire_config.instance) - unified.softwire_config.instance[name] = instance - - unified.softwire_state.discontinuity_time = - yang_util.format_date_as_iso_8601(discontinuity_time) - - for name, value in pairs(instance.softwire_state) do - unified.softwire_state[name] = total_counter( - name, unified.softwire_state, value) + for name, instance in pairs(inst_config.softwire_config.instance) do + unified.softwire_config.instance[name] = instance + for name, value in pairs(instance.softwire_state) do + unified.softwire_state[name] = total_counter( + name, unified.softwire_state, value) + end + break end end diff --git a/src/lib/scheduling.lua b/src/lib/scheduling.lua index ef4bf81b28..e3a4769e01 100644 --- a/src/lib/scheduling.lua +++ b/src/lib/scheduling.lua @@ -21,6 +21,7 @@ local scheduling_opts = { jit_opt = {default=default_jit_opt}, -- JIT options. cpu = {}, -- CPU index (integer). real_time = {}, -- Boolean. + max_packets = {}, -- Positive integer. ingress_drop_monitor = {}, -- Action string: one of 'flush' or 'warn'. profile = {default=true}, -- Boolean. busywait = {default=true}, -- Boolean. @@ -54,6 +55,10 @@ function sched_apply.real_time (real_time) end end +function sched_apply.max_packets (max_packets) + packet.initialize(max_packets) +end + function sched_apply.busywait (busywait) engine.busywait = busywait end diff --git a/src/lib/yang/README.md b/src/lib/yang/README.md index 0df8788d47..c3b13aea0c 100644 --- a/src/lib/yang/README.md +++ b/src/lib/yang/README.md @@ -315,28 +315,23 @@ type, but on the Lua side is given the normal 1-based indexing and support for the `#len` length operator via a wrapper. A non-fixed `leaf-list` is just a Lua array (a table with indexes starting from 1). -Instances of `list` nodes can have one of several representations. -(Recall that in YANG, `list` is not a list in the sense that we normally -think of it in programming languages, but rather is a kind of hash map.) - -If there is only one key leaf, and that leaf has a string type, then a -configuration list is represented as a normal Lua table whose keys are -the key strings, and whose values are Lua structures holding the leaf -values, as in containers. (In fact, it could be that the value of a -string-key struct is represented as a C struct, as in raw containers.) - -If all key and value types are fixed, then a `list` configuration -compiles to an efficient [`ctable`](../README.ctable.md). - -If all keys are fixed but values are not, then a `list` configuration -compiles to a [`cltable`](../README.cltable.md). - -Otherwise, a `list` configuration compiles to a Lua table whose keys are -Lua tables containing the keys. This sounds good on the surface but -really it's a pain, because you can't simply look up a value in the -table like `foo[{key1=42,key2=50}]`, because lookup in such a table is -by identity and not be value. Oh well. You can still do `for k,v in -pairs(foo)`, which is often good enough in this case. +Instances of `list` nodes are compiled to `lib.yang.list` objects. These behave mostly like regular Lua tables but are really hash tries underneath and support multi-key lookup and are ordered. A list with a single key *foo* behaves just like a Lua table: + +```lua +list[x] -- entry with foo==x +list[x] = y -- add or update entry with foo==x +``` + +A List with two keys *foo* and *bar* can be used like so: + +```lua +list[{foo=x, bar=y}] -- entry with key {foo=x, bar=y} +list[{foo=x, bar=y}] = z -- add or update entry +``` + +In both cases the lists can be iterated in order with `ipairs` and `pairs`. +For single-key lists `pairs` works as expected. +For multi-key lists `pairs` is identical to `ipairs` (keys are included in the entry table.) Note that there are a number of value types that are not implemented, including some important ones like `union`. diff --git a/src/lib/yang/binary.lua b/src/lib/yang/binary.lua index 91b9a37ddb..bab08f7a14 100644 --- a/src/lib/yang/binary.lua +++ b/src/lib/yang/binary.lua @@ -11,11 +11,11 @@ local schema = require("lib.yang.schema") local util = require("lib.yang.util") local value = require("lib.yang.value") local data = require('lib.yang.data') -local ctable = require('lib.ctable') -local cltable = require('lib.cltable') +local cdata = require('lib.yang.ctype') +local list = require("lib.yang.list") local MAGIC = "yangconf" -local VERSION = 0x0000f000 +local VERSION = 0x0000f300 local header_t = ffi.typeof([[ struct { @@ -85,7 +85,7 @@ end local value_emitters = {} local function value_emitter(ctype) if value_emitters[ctype] then return value_emitters[ctype] end - local type = data.typeof(ctype) + local type = cdata.typeof(ctype) local align = ffi.alignof(type) local size = ffi.sizeof(type) local buf = ffi.typeof('$[1]', type)() @@ -153,7 +153,7 @@ local function data_emitter(production) end table.sort(member_keys, order_predicate) if production.ctype then - local data_t = data.typeof(production.ctype) + local data_t = cdata.typeof(production.ctype) return function(data, stream) stream:write_stringref('cstruct') stream:write_stringref(production.ctype) @@ -203,7 +203,7 @@ local function data_emitter(production) end function handlers.array(production) if production.ctype then - local data_t = data.typeof(production.ctype) + local data_t = cdata.typeof(production.ctype) return function(data, stream) stream:write_stringref('carray') stream:write_stringref(production.ctype) @@ -220,75 +220,58 @@ local function data_emitter(production) end end end - function handlers.table(production) - if production.native_key then - local emit_value = visit1({type='struct', members=production.values, - ctype=production.value_ctype}) - -- FIXME: sctable if production.value_ctype? - return function(data, stream) - -- A string-keyed table is the same as a tagged struct. - stream:write_stringref('lstruct') - local number_keyed_members = {} - for k, v in pairs(data) do - if type(k) == 'number' then - assert(ffi.cast("uint32_t", k) == k) - number_keyed_members[k] = v - end - end - stream:write_scalar(uint32_t, table_size(number_keyed_members)) - for k,v in pairs(number_keyed_members) do - stream:write_scalar(uint32_t, k) - emit_value(v, stream) - end - local string_keyed_members = {} - for k, v in pairs(data) do - if type(k) == 'string' then - string_keyed_members[k] = v - end - end - stream:write_scalar(uint32_t, table_size(string_keyed_members)) - for k,v in pairs(string_keyed_members) do - stream:write_stringref(k) - emit_value(v, stream) - end - end - elseif production.key_ctype and production.value_ctype then - return function(data, stream) - stream:write_stringref('ctable') - stream:write_stringref(production.key_ctype) - stream:write_stringref(production.value_ctype) - data:save(stream) + function handlers.list(production) + local fieldspec_production = { + type = 'struct', + members = { + type = { + type = 'scalar', + argument_type = { primitive_type = 'string' } + }, + ctype = { + type = 'scalar', + argument_type = { primitive_type = 'string' } + }, + optional = { + type = 'scalar', + argument_type = { primitive_type = 'boolean' }, + ctype = value.types.boolean.ctype + } + } + } + local function spec_production (spec) + local p = {type='struct', members={}} + for name in pairs(spec) do + p.members[name] = fieldspec_production end - elseif production.key_ctype then - local emit_keys = visit1({type='table', key_ctype=production.key_ctype, - value_ctype='uint32_t'}) - local emit_value = visit1({type='struct', members=production.values}) - return function(data, stream) - stream:write_stringref('cltable') - emit_keys(data.keys, stream) - for i, value in pairs(data.values) do + return p + end + local emit_list_keys = + handlers.struct(spec_production(production.list.keys)) + local emit_list_members = + handlers.struct(spec_production(production.list.members)) + local emit_member = visitn(production.values) + for k, emit in pairs(emit_member) do + emit_member[data.normalize_id(k)] = emit + end + return function(data, stream) + stream:write_stringref('list') + local l = assert(list.object(data)) + emit_list_keys(l.keys, stream) + emit_list_members(l.members, stream) + for k, values in pairs(l.lvalues) do + stream:write_stringref(k) + for i,v in pairs(values) do + assert(i < SPARSE_ARRAY_END) stream:write_scalar(uint32_t, i) - emit_value(value, stream) + emit_member[k](v, stream) end stream:write_scalar(uint32_t, SPARSE_ARRAY_END) end - else - local emit_key = visit1({type='struct', members=production.keys, - ctype=production.key_ctype}) - local emit_value = visit1({type='struct', members=production.values, - ctype=production.value_ctype}) - -- FIXME: lctable if production.value_ctype? - return function(data, stream) - stream:write_stringref('lltable') - stream:write_scalar(uint32_t, table_size(data)) - for k,v in pairs(data) do - emit_key(k, stream) - emit_value(v, stream) - end - end + l:save(stream) end end - local native_types = lib.set('enumeration', 'identityref', 'string') + local native_types = lib.set('enumeration', 'identityref', 'leafref', 'string') function handlers.scalar(production) local primitive_type = production.argument_type.primitive_type local type = assert(value.types[primitive_type], "unsupported type: "..primitive_type) @@ -442,7 +425,7 @@ local function read_compiled_data(stream, strtab) return ret end function readers.carray() - local ctype = data.typeof(read_string()) + local ctype = cdata.typeof(read_string()) local count = stream:read_scalar(nil, uint32_t) return util.ffi_array(stream:read_array(nil, ctype, count), ctype, count) end @@ -451,39 +434,32 @@ local function read_compiled_data(stream, strtab) for i=1,stream:read_scalar(nil, uint32_t) do table.insert(ret, read1()) end return ret end - function readers.ctable() - local key_ctype = read_string() - local value_ctype = read_string() - local key_t, value_t = data.typeof(key_ctype), data.typeof(value_ctype) - return ctable.load(stream, {key_type=key_t, value_type=value_t}) - end - function readers.cltable() + function readers.list() local keys = read1() - local values = {} - while true do - local i = stream:read_scalar(nil, uint32_t) - if i == SPARSE_ARRAY_END then break end - values[i] = read1() - end - return cltable.build(keys, values) - end - function readers.lltable() - local ret = {} - for i=1,stream:read_scalar(nil, uint32_t) do - local k = read1() - ret[k] = read1() + local members = read1() + local lvalues = {} + for _, spec in pairs(members) do + if spec.type == 'lvalue' then + local name = read_string() + lvalues[name] = {} + while true do + local i = stream:read_scalar(nil, uint32_t) + if i == SPARSE_ARRAY_END then break end + lvalues[name][i] = read1() + end + end end - return ret + return list.load(stream, keys, members, lvalues) end function readers.stringref() return read_string() end function readers.cstruct() - local ctype = data.typeof(read_string()) + local ctype = cdata.typeof(read_string()) return stream:read_struct(nil, ctype) end function readers.cscalar() - local ctype = data.typeof(read_string()) + local ctype = cdata.typeof(read_string()) return stream:read_scalar(nil, ctype) end function readers.flag() @@ -601,6 +577,9 @@ function selftest() key addr; leaf addr { type inet:ipv4-address; mandatory true; } leaf port { type uint8 { range 0..11; } mandatory true; } + container metadata { + leaf info { type string; } + } } leaf severity { type severity; @@ -638,8 +617,8 @@ function selftest() addrs 5.4.3.2; routes { route { addr 1.2.3.4; port 1; } - route { addr 2.3.4.5; port 10; } - route { addr 3.4.5.6; port 2; } + route { addr 2.3.4.5; port 10; metadata { info "bar"; } } + route { addr 3.4.5.6; port 2; metadata { info "foo"; } } severity minor; } next-hop { @@ -662,13 +641,9 @@ function selftest() assert(data.addrs[1]==util.ipv4_pton('4.3.2.1')) assert(data.addrs[2]==util.ipv4_pton('5.4.3.2')) local routing_table = data.routes.route - local key = ffi.new('struct { uint32_t addr; }') - key.addr = util.ipv4_pton('1.2.3.4') - assert(routing_table:lookup_ptr(key).value.port == 1) - key.addr = util.ipv4_pton('2.3.4.5') - assert(routing_table:lookup_ptr(key).value.port == 10) - key.addr = util.ipv4_pton('3.4.5.6') - assert(routing_table:lookup_ptr(key).value.port == 2) + assert(routing_table[util.ipv4_pton('1.2.3.4')].port == 1) + assert(routing_table[util.ipv4_pton('2.3.4.5')].port == 10) + assert(routing_table[util.ipv4_pton('3.4.5.6')].port == 2) assert( data.next_hop.ipv4 == util.ipv4_pton('5.6.7.8'), "Choice type test failed (round: "..i..")" diff --git a/src/lib/yang/ctype.lua b/src/lib/yang/ctype.lua new file mode 100644 index 0000000000..0b1b526f46 --- /dev/null +++ b/src/lib/yang/ctype.lua @@ -0,0 +1,99 @@ +-- Use of this source code is governed by the Apache 2.0 license; see +-- COPYING. +module(..., package.seeall) + +local ffi = require("ffi") + +-- Helper for parsing C type declarations. +local function parse_type(str, start, is_member) + local function err(msg, pos) + io.stderr:write('ERROR: While parsing type:\n') + io.stderr:write('ERROR: '..str..'\n') + io.stderr:write('ERROR: '..string.rep(' ', pos - 1)..'^\n') + io.stderr:write('ERROR: '..msg..'\n') + error(msg, 2) + end + local function assert_match(str, pat, pos, what) + local ret = { str:match(pat, pos) } + if not ret[1] then err('bad '..what, pos) end + return unpack(ret) + end + local t, array, member, pos + -- See if it's a struct. + t, pos = str:match('^%s*(struct%s*%b{})%s*()', start) + -- Otherwise it might be a scalar. + if not t then t, pos = str:match('^%s*([%a_][%w_]*)%s*()', start) end + -- We don't do unions currently. + if not t then err('invalid type', start) end + -- If we're parsing a struct or union member, get the name. + if is_member then + member, pos = assert_match(str, '^([%a_][%w_]*)%s*()', pos, 'member name') + end + -- Parse off the array suffix, if any. + if str:match('^%[', pos) then + array, pos = assert_match(str, '^(%b[])%s*()', pos, 'array component') + end + if is_member then + -- Members should have a trailing semicolon. + pos = assert_match(str, '^;%s*()', pos, 'semicolon') + else + -- Nonmembers should parse to the end of the string. + assert_match(str, '^()$', pos, 'suffix') + end + return t, array, member, pos +end + +-- We want structural typing, not nominal typing, for Yang data. The +-- "foo" member in "struct { struct { uint16 a; } foo; }" should not +-- have a unique type; we want to be able to instantiate a "struct { +-- uint16 a; }" and get a compatible value. To do this, we parse out +-- nested "struct" types and only ever make one FFI type for each +-- compatible struct kind. The user-facing interface is the "typeof" +-- function below; the "compile_type" helper handles nesting. +-- +-- It would be possible to avoid this complexity by having the grammar +-- generate something other than a string "ctype" representation, but +-- then we don't have a type name to serialize into binary data. We +-- might as well embrace the type strings. +local function compile_type(name) + local function maybe_array_type(t, array) + -- If ARRAY is something like "[10]", make a corresponding type. + -- Otherwise just return T. + if array then return ffi.typeof('$'..array, t) end + return t + end + local parsed, array = parse_type(name, 1, false) + local ret + if parsed:match('^struct[%s{]') then + -- It's a struct type; parse out the members and rebuild. + local struct_type = 'struct { ' + local struct_type_args = {} + local function add_member(member_type, member_name) + struct_type = struct_type..'$ '..member_name..'; ' + table.insert(struct_type_args, member_type) + end + -- Loop from initial "struct {" to final "}". + local pos = assert(parsed:match('^struct%s*{%s*()')) + while not parsed:match('^}$', pos) do + local mtype, mname, marray + mtype, marray, mname, pos = parse_type(parsed, pos, true) + -- Recurse on mtype by calling the caching "typeof" defined + -- below. + add_member(maybe_array_type(typeof(mtype), marray), mname) + end + struct_type = struct_type..'}' + ret = ffi.typeof(struct_type, unpack(struct_type_args)) + else + -- Otherwise the type is already structural and we can just use + -- ffi.typeof. + ret = ffi.typeof(parsed) + end + return maybe_array_type(ret, array) +end + +local type_cache = {} +function typeof(name) + assert(type(name) == 'string') + if not type_cache[name] then type_cache[name] = compile_type(name) end + return type_cache[name] +end diff --git a/src/lib/yang/data.lua b/src/lib/yang/data.lua index 6b6c9ba363..c1e9318a24 100644 --- a/src/lib/yang/data.lua +++ b/src/lib/yang/data.lua @@ -7,9 +7,9 @@ local parser_mod = require("lib.yang.parser") local schema = require("lib.yang.schema") local util = require("lib.yang.util") local value = require("lib.yang.value") +local list = require("lib.yang.list") + typeof = require("lib.yang.ctype").typeof local ffi = require("ffi") -local ctable = require('lib.ctable') -local cltable = require('lib.cltable') local lib = require('core.lib') local regexp = require("lib.xsd_regexp") local lib = require("core.lib") @@ -18,126 +18,6 @@ function normalize_id(id) return (id:gsub('[^%w_]', '_')) end --- Helper for parsing C type declarations. -local function parse_type(str, start, is_member) - local function err(msg, pos) - io.stderr:write('ERROR: While parsing type:\n') - io.stderr:write('ERROR: '..str..'\n') - io.stderr:write('ERROR: '..string.rep(' ', pos - 1)..'^\n') - io.stderr:write('ERROR: '..msg..'\n') - error(msg, 2) - end - local function assert_match(str, pat, pos, what) - local ret = { str:match(pat, pos) } - if not ret[1] then err('bad '..what, pos) end - return unpack(ret) - end - local t, array, member, pos - -- See if it's a struct. - t, pos = str:match('^%s*(struct%s*%b{})%s*()', start) - -- Otherwise it might be a scalar. - if not t then t, pos = str:match('^%s*([%a_][%w_]*)%s*()', start) end - -- We don't do unions currently. - if not t then err('invalid type', start) end - -- If we're parsing a struct or union member, get the name. - if is_member then - member, pos = assert_match(str, '^([%a_][%w_]*)%s*()', pos, 'member name') - end - -- Parse off the array suffix, if any. - if str:match('^%[', pos) then - array, pos = assert_match(str, '^(%b[])%s*()', pos, 'array component') - end - if is_member then - -- Members should have a trailing semicolon. - pos = assert_match(str, '^;%s*()', pos, 'semicolon') - else - -- Nonmembers should parse to the end of the string. - assert_match(str, '^()$', pos, 'suffix') - end - return t, array, member, pos -end - --- We want structural typing, not nominal typing, for Yang data. The --- "foo" member in "struct { struct { uint16 a; } foo; }" should not --- have a unique type; we want to be able to instantiate a "struct { --- uint16 a; }" and get a compatible value. To do this, we parse out --- nested "struct" types and only ever make one FFI type for each --- compatible struct kind. The user-facing interface is the "typeof" --- function below; the "compile_type" helper handles nesting. --- --- It would be possible to avoid this complexity by having the grammar --- generate something other than a string "ctype" representation, but --- then we don't have a type name to serialize into binary data. We --- might as well embrace the type strings. -local function compile_type(name) - local function maybe_array_type(t, array) - -- If ARRAY is something like "[10]", make a corresponding type. - -- Otherwise just return T. - if array then return ffi.typeof('$'..array, t) end - return t - end - local parsed, array = parse_type(name, 1, false) - local ret - if parsed:match('^struct[%s{]') then - -- It's a struct type; parse out the members and rebuild. - local struct_type = 'struct { ' - local struct_type_args = {} - local function add_member(member_type, member_name) - struct_type = struct_type..'$ '..member_name..'; ' - table.insert(struct_type_args, member_type) - end - -- Loop from initial "struct {" to final "}". - local pos = assert(parsed:match('^struct%s*{%s*()')) - while not parsed:match('^}$', pos) do - local mtype, mname, marray - mtype, marray, mname, pos = parse_type(parsed, pos, true) - -- Recurse on mtype by calling the caching "typeof" defined - -- below. - add_member(maybe_array_type(typeof(mtype), marray), mname) - end - struct_type = struct_type..'}' - ret = ffi.typeof(struct_type, unpack(struct_type_args)) - else - -- Otherwise the type is already structural and we can just use - -- ffi.typeof. - ret = ffi.typeof(parsed) - end - return maybe_array_type(ret, array) -end - -local type_cache = {} -function typeof(name) - assert(type(name) == 'string') - if not type_cache[name] then type_cache[name] = compile_type(name) end - return type_cache[name] -end - --- If a "list" node has a single key that is string-valued or representable as --- a Lua number, we will represent instances of that node as normal Lua tables --- where the key is the table key and the value does not contain the key. -local function table_native_key(keys) - local native_number_types = - lib.set('int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32') - local function representable_as_native(v) - return v.type == 'scalar' and - (v.argument_type.primitive_type == 'string' or - native_number_types[v.argument_type.primitive_type]) - end - local native_key = nil - for k,v in pairs(keys) do - if native_key ~= nil then - -- Bail out if the list has multiple keys, native or otherwise. - return nil - elseif representable_as_native(v) then - -- Select the first native key, if any. - native_key = k - else - return nil - end - end - return native_key -end - -- We need to properly support unions. It's a big FIXME! As an -- intermediate step, we pick the first type in the union. Terrible. local function elide_unions(t) @@ -238,10 +118,66 @@ function data_grammar_from_schema(schema, is_config) end end if is_empty(values) and node.config ~= is_config then return end - return {type='table', keys=keys, values=values, - native_key=table_native_key(keys), - key_ctype=struct_ctype(keys), - value_ctype=struct_ctype(values), + local function list_spec(nodes, builder, validate) + local spec = {} + for name, node in pairs(nodes) do + builder(spec, normalize_id(name), node) + end + if validate then validate(spec) end + return spec + end + local function list_key(keys, name, node) + assert(node.type =='scalar') + if node.ctype then + keys[name] = {ctype=node.ctype} + elseif value_ctype(node.argument_type) then + keys[name] = {ctype=value_ctype(node.argument_type)} + else + keys[name] = {type=node.argument_type.primitive_type} + end + end + local function list_member(members, name, node) + if node.ctype then + members[name] = { + ctype = node.ctype + } + elseif node.type == 'scalar' then + if value_ctype(node.argument_type) then + members[name] = { + ctype = value_ctype(node.argument_type), + optional = not (node.default or node.mandatory) + } + else + members[name] = { + type = node.argument_type.primitive_type, + optional = not (node.default or node.mandatory) + } + end + elseif node.type == 'choice' then + for _, choices in pairs(node.choices) do + local choice_members = list_spec(choices, list_member) + for name, member in pairs(choice_members) do + assert(not members[name]) + members[name] = member + end + end + else + members[name] = { + type = 'lvalue', + optional = not node.mandatory + } + end + end + local l = {} + if node.key then + l.keys = list_spec(keys, list_key, list.validate_keys) + else + l.keys = {__ikey={ctype='uint64_t'}} + end + l.members = list_spec(values, list_member, list.validate_members) + l.has_key = node.key and true + function l.new() return list.new(l.keys, l.members) end + return {type='list', keys=keys, values=values, list=l, unique = node.unique, min_elements=node.min_elements, max_elements=node.max_elements} end @@ -546,89 +482,35 @@ function choice_parser(keyword, choices, members, default, mandatory) return {represents=represents, stateful_parser=stateful_parser} end -local function ctable_builder(key_t, value_t) - local res = ctable.new({ key_type=key_t, value_type=value_t, - max_occupancy_rate = 0.4 }) - local builder = {} - -- Uncomment for progress counters. - -- local counter = 0 - function builder:add(key, value) - -- counter = counter + 1 - -- if counter % 1000 == 0 then print('ctable add', counter) end - res:add(key, value) - end - function builder:finish() return res end - return builder -end - -local function native_keyed_table_builder(native_key) - local res = {} - local builder = {} - function builder:add(key, value) - local k = assert(key[native_key]) - assert(res[k] == nil, 'duplicate key: '..k) - res[k] = value - end - function builder:finish() return res end - return builder -end - -local function cltable_builder(key_t) - local res = cltable.new({ key_type=key_t }) - local builder = {} - function builder:add(key, value) - assert(res[key] == nil, 'duplicate key') - res[key] = value - end - function builder:finish() return res end - return builder -end - -local function ltable_builder() - local res = {} - local builder = {} - function builder:add(key, value) res[key] = value end - function builder:finish() return res end - return builder -end - -local function table_parser(keyword, keys, values, native_key, key_ctype, - value_ctype) +function list_parser(keyword, keys, values, spec) local members = {} for k,v in pairs(keys) do members[k] = v end for k,v in pairs(values) do members[k] = v end local parser = struct_parser(keyword, members) - local key_t = key_ctype and typeof(key_ctype) - local value_t = value_ctype and typeof(value_ctype) - local init - if native_key then - function init() return native_keyed_table_builder(native_key) end - elseif key_t and value_t then - function init() return ctable_builder(key_t, value_t) end - elseif key_t then - function init() return cltable_builder(key_t) end - else - function init() return ltable_builder() end + local function init() + local res = spec.new() + local l = list.object(res) + local assoc = {} + function assoc:add(entry) + if not spec.has_key then + entry.__ikey = #res+1 + end + l:add_entry(entry) + end + function assoc:finish() return res end + return assoc end local function parse1(P) return parser.finish(parser.parse(P, parser.init())) end local function parse(P, assoc) local struct = parse1(P) - local key, value = {}, {} - if key_t then key = key_t() end - if value_t then value = value_t() end - for k,_ in pairs(keys) do + local entry = {} + for k,_ in pairs(struct) do local id = normalize_id(k) - key[id] = struct[id] - end - for k, v in pairs(struct) do - local id = normalize_id(k) - if keys[k] == nil then - value[id] = struct[id] - end + entry[id] = struct[id] end - assoc:add(key, value) + assoc:add(entry) return assoc end local function finish(assoc) @@ -658,10 +540,9 @@ function data_parser_from_grammar(production) function handlers.array(keyword, production) return array_parser(keyword, production.element_type, production.ctype) end - function handlers.table(keyword, production) + function handlers.list(keyword, production) local keys, values = visitn(production.keys), visitn(production.values) - return table_parser(keyword, keys, values, production.native_key, - production.key_ctype, production.value_ctype) + return list_parser(keyword, keys, values, production.list) end function handlers.scalar(keyword, production) return scalar_parser(keyword, production.argument_type, @@ -747,8 +628,8 @@ function data_parser_from_grammar(production) return parser.finish(out) end end - function top_parsers.table(production) - local parser = visit1('[bare table]', production) + function top_parsers.list(production) + local parser = visit1('[bare list]', production) return function(stream) local P = parser_mod.Parser.new(stream) local out = parser.init() @@ -954,47 +835,18 @@ function xpath_printer_from_grammar(production, print_default, root) end end end - -- As a special case, the table handler allows the keyword to be nil, - -- for printing tables at the top level without keywords. - function handlers.table(keyword, production) + -- As a special case, the list handler allows the keyword to be nil, + -- for printing lists at the top level without keywords. + function handlers.list(keyword, production) local compose_key = key_composer(production.keys) local print_value = body_printer(production.values) - if production.native_key then - local id = normalize_id(production.native_key) - return function(data, file, path) - path = path or '' - for key, value in pairs(data) do - local key = compose_key({[id]=key}) - local path = path..(keyword or '')..key..'/' - print_value(value, file, path) - end - end - elseif production.key_ctype and production.value_ctype then - return function(data, file, path) - path = path or '' - for entry in data:iterate() do - local key = compose_key(entry.key) - local path = path..(keyword or '')..key..'/' - print_value(entry.value, file, path) - end - end - elseif production.key_ctype then - return function(data, file, path) - path = path or '' - for key, value in cltable.pairs(data) do - local key = compose_key(key) - local path = path..(keyword or '')..key..'/' - print_value(value, file, path) - end - end - else - return function(data, file, path) - path = path or '' - for key, value in pairs(data) do - local key = compose_key(key) - local path = path..(keyword or '')..key..'/' - print_value(value, file, path) - end + return function(data, file, path) + assert(list.object(data)) + path = path or '' + for _, entry in ipairs(data) do + local key = compose_key(entry) + local path = path..(keyword or '')..key..'/' + print_value(entry, file, path) end end end @@ -1031,8 +883,8 @@ function xpath_printer_from_grammar(production, print_default, root) return file:flush() end end - function top_printers.table(production) - local printer = handlers.table(nil, production) + function top_printers.list(production) + local printer = handlers.list(nil, production) return function(data, file) printer(data, file, '') return file:flush() @@ -1213,52 +1065,20 @@ function influxdb_printer_from_grammar(production, print_default, root) end return true end - -- As a special case, the table handler allows the keyword to be nil, - -- for printing tables at the top level without keywords. - function handlers.table(keyword, production) + -- As a special case, the list handler allows the keyword to be nil, + -- for printing lists at the top level without keywords. + function handlers.list(keyword, production) local is_key_unique = is_key_unique(production) local compose_key = key_composer(production.keys) local print_value = body_printer(production.values) - if production.native_key then - local id = normalize_id(production.native_key) - return function(data, file, path) - path = path or '' - for key, value in pairs(data) do - local key = compose_key({[id]=key}) - local path = path..(keyword or '')..'/' - if not is_key_unique then key = path..key end - print_value(value, file, path, key) - end - end - elseif production.key_ctype and production.value_ctype then - return function(data, file, path) - path = path or '' - for entry in data:iterate() do - local key = compose_key(entry.key) - local path = path..(keyword or '')..'/' - if not is_key_unique then key = path..key end - print_value(entry.value, file, path, key) - end - end - elseif production.key_ctype then - return function(data, file, path) - path = path or '' - for key, value in cltable.pairs(data) do - local key = compose_key(key) - local path = path..(keyword or '')..'/' - if not is_key_unique then key = path..key end - print_value(value, file, path, key) - end - end - else - return function(data, file, path) - path = path or '' - for key, value in pairs(data) do - local key = compose_key(key) - local path = path..(keyword or '')..'/' - if not is_key_unique then key = path..key end - print_value(value, file, path, key) - end + return function(data, file, path) + assert(list.object(data)) + path = path or '' + for _, entry in ipairs(data) do + local key = compose_key(entry) + local path = path..(keyword or '')..'/' + if not is_key_unique then key = path..key end + print_value(entry, file, path, key) end end end @@ -1296,8 +1116,8 @@ function influxdb_printer_from_grammar(production, print_default, root) return file:flush() end end - function top_printers.table(production) - local printer = handlers.table(nil, production) + function top_printers.list(production) + local printer = handlers.list(nil, production) return function(data, file) printer(data, file, '') return file:flush() @@ -1408,51 +1228,19 @@ function data_printer_from_grammar(production, print_default) end end end - -- As a special case, the table handler allows the keyword to be nil, - -- for printing tables at the top level without keywords. - function handlers.table(keyword, production) + -- As a special case, the list handler allows the keyword to be nil, + -- for printing lists at the top level without keywords. + function handlers.list(keyword, production) local print_key = body_printer(production.keys) local print_value = body_printer(production.values) - if production.native_key then - local id = normalize_id(production.native_key) - return function(data, file, indent) - for key, value in pairs(data) do - if keyword then print_keyword(keyword, file, indent) end - file:write('{\n') - print_key({[id]=key}, file, indent..' ') - print_value(value, file, indent..' ') - file:write(indent..'}\n') - end - end - elseif production.key_ctype and production.value_ctype then - return function(data, file, indent) - for entry in data:iterate() do - if keyword then print_keyword(keyword, file, indent) end - file:write('{\n') - print_key(entry.key, file, indent..' ') - print_value(entry.value, file, indent..' ') - file:write(indent..'}\n') - end - end - elseif production.key_ctype then - return function(data, file, indent) - for key, value in cltable.pairs(data) do - if keyword then print_keyword(keyword, file, indent) end - file:write('{\n') - print_key(key, file, indent..' ') - print_value(value, file, indent..' ') - file:write(indent..'}\n') - end - end - else - return function(data, file, indent) - for key, value in pairs(data) do - if keyword then print_keyword(keyword, file, indent) end - file:write('{\n') - print_key(key, file, indent..' ') - print_value(value, file, indent..' ') - file:write(indent..'}\n') - end + return function(data, file, indent) + assert(list.object(data)) + for _, entry in ipairs(data) do + if keyword then print_keyword(keyword, file, indent) end + file:write('{\n') + print_key(entry, file, indent..' ') + print_value(entry, file, indent..' ') + file:write(indent..'}\n') end end end @@ -1489,8 +1277,8 @@ function data_printer_from_grammar(production, print_default) return file:flush() end end - function top_printers.table(production) - local printer = handlers.table(nil, production) + function top_printers.list(production) + local printer = handlers.list(nil, production) return function(data, file) printer(data, file, '') return file:flush() @@ -1729,7 +1517,7 @@ function selftest() enum plastic; } } - list contents { uses fruit; key name; } + list contents { uses fruit; key name; ordered-by user; } } leaf addr { description "internet of fruit"; @@ -1781,6 +1569,22 @@ function selftest() assert(data.choices.one.blue == "hey") assert(data.choices.two.red == "bye") + -- Check list order + local score, total = 0, 0 + for i, content in ipairs(contents) do + assert(score < content.score, "ipairs out of order: "..i) + score = content.score + total = total + 1 + end + assert(total == #contents) + local score, total = 0, 0 + for i, content in pairs(contents) do + assert(score < content.score, "pairs out of order: "..i) + score = content.score + total = total + 1 + end + assert(total == #contents) + local stream = mem.tmpfile() print_config_for_schema(test_schema, data, stream) stream:seek('set', 0) diff --git a/src/lib/yang/list.lua b/src/lib/yang/list.lua new file mode 100644 index 0000000000..b949ad4846 --- /dev/null +++ b/src/lib/yang/list.lua @@ -0,0 +1,1393 @@ +-- Use of this source code is governed by the Apache 2.0 license; see +-- COPYING. +module(..., package.seeall) + +local typeof = require("lib.yang.ctype").typeof +local murmur = require("lib.hash.murmur") +local lib = require("core.lib") +local ffi = require("ffi") +local C = ffi.C +local band, bor, bnot, lshift, rshift = + bit.band, bit.bor, bit.bnot, bit.lshift, bit.rshift +local min, max = + math.min, math.max + +local Heap = { + line_size = 128, + block_lines = 64, + block_size = 64*128, -- 8KB +} + +-- NB: `a' must be a power of two +local function pad (a, l) return band(-l, a-1) end +local function padded (a, l) return l + pad(a, l) end + +Heap.block_t = ffi.typeof(([[ + struct { + uint8_t ref[%d]; + uint8_t mem[%d]; + } __attribute__((packed)) +]]):format(Heap.block_lines, Heap.block_size)) + +function Heap:new () + local heap = { + _blocks = { + [0] = self.block_t() + }, + _free = 0, _maxfree = Heap.block_size, + _recycle = nil, _maxrecycle = nil + } + return setmetatable(heap, {__index=Heap}) +end + +local _block_pow = 13 +assert(Heap.block_size == lshift(1,_block_pow)) + +function Heap:_block (o) + local block = rshift(o, _block_pow) + local offset = band(o, lshift(1, _block_pow)-1) + return block, offset +end + +function Heap:_bump_alloc (bytes) + local o, new_free = self._free, self._free + bytes + if new_free <= self._maxfree then + self._free = new_free + return o + end +end + +local _line_pow = 7 +assert(Heap.line_size == lshift(1, _line_pow)) + +function Heap:_ref (o, bytes, c) + local block, offset = self:_block(o) + local b = self._blocks[block] + while bytes > 0 do + local ref = rshift(offset, _line_pow) + b.ref[ref] = b.ref[ref] + c + local next_offset = lshift(ref+1, _line_pow) + bytes = bytes - (next_offset - offset) + offset = next_offset + end +end + +function Heap:_has_ref (l) + local block, offset = self:_block(l) + local b = self._blocks[block] + local ref = rshift(offset, _line_pow) + return b.ref[ref] > 0 +end + +function Heap:_find_hole (recycle) + local block = self:_block(recycle) + while recycle < lshift(block+1, _block_pow) do + if not self:_has_ref(recycle) then + return recycle + end + recycle = recycle + Heap.line_size + end +end + +function Heap:_find_recycle (recycle) + local hole + local block = self:_block(recycle) + local free_block = self:_block(self._free) + -- NB: scan only blocks before current free block + while not hole and block < free_block do + hole = self:_find_hole(recycle) + block = block + 1 + recycle = lshift(block, _block_pow) + end + if hole then + return hole, hole + Heap.line_size + end +end + +function Heap:_recycle_alloc (bytes) + assert(bytes <= Heap.line_size) + local o, new_recycle = self._recycle, self._recycle + bytes + if new_recycle <= self._maxrecycle then + self._recycle = new_recycle + return o + else + local next_line = padded(Heap.line_size, self._recycle) + self._recycle, self._maxrecycle = self:_find_recycle(next_line) + if self._recycle then + return self:_recycle_alloc(bytes) + end + end +end + +function Heap:_new_block () + local block = #self._blocks+1 + self._blocks[block] = self.block_t() + local o = lshift(block, _block_pow) + return o, o + Heap.block_size +end + +function Heap:_collect () + self._free, self._maxfree = self:_new_block() + self._recycle, self._maxrecycle = self:_find_recycle(0) +end + +function Heap:allocate (bytes) + assert(bytes <= Heap.block_size) + local o + if self._recycle and bytes <= Heap.line_size then + o = self:_recycle_alloc(bytes) + end + if not o then + o = self:_bump_alloc(bytes) + end + if o then + self:_ref(o, bytes, 1) + -- Allocated space is zeroed. We are civilized, after all. + ffi.fill(self:ptr(o), bytes, 0) + return o + else + self:_collect() + return self:allocate(bytes) + end +end + +function Heap:free (o, bytes) + assert(bytes <= Heap.block_size) + self:_ref(o, bytes, -1) +end + +function Heap:ptr (o) + local block, offset = self:_block(o) + return self._blocks[block].mem + offset +end + +Heap.header_t = ffi.typeof[[ + struct { + uint32_t maxblock; + double free, maxfree; + double recycle, maxrecycle; + } __attribute__((packed)) +]] + +function Heap:save (stream) + stream:write_struct(self.header_t, self.header_t( + #self._blocks, + self._free, self._maxfree, + self._recycle or -1, self._maxrecycle or -1 + )) + for block=0, #self._blocks do + stream:write_struct(self.block_t, self._blocks[block]) + end +end + +function Heap:load (stream) + local header = stream:read_struct(nil, self.header_t) + local blocks = {} + for block=0, header.maxblock do + blocks[block] = stream:read_struct(nil, self.block_t) + end + local heap = { + _blocks = blocks, + _free = header.free, _maxfree = header.maxfree, + _recycle = (header.recycle >= 0 and header.recycle) or nil, + _maxrecycle = (header.maxrecycle >= 0 and header.maxrecycle) or nil + } + return setmetatable(heap, {__index=Heap}) +end + +local function selftest_heap () + local h = Heap:new() + local o1 = h:allocate(Heap.line_size/2) + assert(h:_has_ref(0*Heap.line_size)) + local o2 = h:allocate(Heap.line_size*1) + assert(h:_has_ref(0*Heap.line_size)) + assert(h:_has_ref(1*Heap.line_size)) + h:free(o2, Heap.line_size*1) + assert(h:_has_ref(0*Heap.line_size)) + assert(not h:_has_ref(1*Heap.line_size)) + h:free(o1, Heap.line_size/2) + assert(not h:_has_ref(0*Heap.line_size)) + local o1 = h:allocate(Heap.block_size) + local o1_b, o1_o = h:_block(o1) + assert(o1_b == 1 and o1_o == 0) + assert(#h._blocks == 1) + assert(h._recycle == 0) + assert(h._maxrecycle == Heap.line_size) + assert(h._free == Heap.block_size*2) + assert(h._maxfree == Heap.block_size*2) + local o2 = h:allocate(Heap.line_size/2) + assert(h._recycle == Heap.line_size/2) + local o3 = h:allocate(Heap.line_size) + assert(h._recycle == h._maxrecycle) + + -- Stress + local h = Heap:new() + local obj = {} + math.randomseed(0) + local function alloc_obj () + local size = math.random(10*Heap.line_size) + local s = ffi.new("char[?]", size) + for i=0, size-1 do + s[i] = math.random(127) + end + local o = h:allocate(size) + assert(not obj[o]) + ffi.copy(h:ptr(o), s, size) + obj[o] = s + return o + end + local function free_obj () + for o, s in pairs(obj) do + if math.random(10) == 1 then + h:free(o, ffi.sizeof(s)) + obj[o] = nil + end + end + end + local function check_obj () + for o, s in pairs(obj) do + if C.memcmp(h:ptr(o), s, ffi.sizeof(s)) ~= 0 then + return o + end + end + end + for i=1, 100000 do + local o = alloc_obj() + local err = check_obj() + if err then + error("error after allocation "..i.." ("..o..") in object "..err) + end + free_obj() + end + + -- Test save + local memstream = require("lib.stream.mem") + local tmp = memstream.tmpfile() + h:save(tmp) + tmp:seek('set', 0) + h = Heap:load(tmp) + check_obj() + tmp:seek('set', 0) + Heap:new():save(tmp) + tmp:seek('set', 0) + local h = Heap:load(tmp) + assert(h._free == 0) + assert(h._recycle == nil) +end + + +local List = { + trie_width = 4, + hash_width = 32, + node_children = 16 +} + +List.type_map = { + binary = {ctype='uint32_t', kind='string'}, -- same as string + empty = {ctype='bool', kind='empty'}, -- no representation (always true) + enumeration = {ctype='uint32_t', kind='string'}, -- same as string + identityref = {ctype='uint32_t', kind='string'}, -- same as string + leafref = {ctype='uint32_t', kind='string'}, -- same as string + string = {ctype='uint32_t', kind='string'}, -- pointer into heap + lvalue = {ctype='uint32_t', kind='lvalue'} +} + +function List:type_info (type) + return assert(self.type_map[type], "Unsupported type: "..type) +end + +function List:type_kind (spec) + if spec.ctype then return 'ctype' + else return self:type_info(spec.type).kind end +end + +function List:type_ctype (spec) + return spec.ctype or self:type_info(spec.type).ctype +end + +function validate_keys (keys) + validate_members(keys) + for name, key in pairs(keys) do + assert(not key.optional, "Keys can not be optional") + end +end + +function validate_members (members) + for name, member in pairs(members) do + assert(type(member) == 'table' and + (type(member.type) == 'string' or + type(member.ctype) == 'string'), + "Invalid field spec for "..name) + assert(List:type_kind(member)) + end +end + +List.node_t = ffi.typeof [[ + struct { + uint16_t occupied; + uint16_t leaf; + uint32_t parent; + uint32_t children[16]; + } +]] + +List.list_ts = [[ + struct { + uint32_t prev; + uint32_t next; + } +]] + +List.string_t = ffi.typeof [[ + struct { + uint16_t len; + uint8_t str[1]; + } __attribute__((packed)) +]] + +List.optional_ts = [[ + struct { + struct { %s } value; + bool present; + } %s; +]] + +List.leaf_ts = [[ + struct { + %s list; + %s keys; + %s members; + } +]] + +function List:_new (keys, members) + local self = setmetatable({}, {__index=List}) + validate_keys(keys) + validate_members(members) + local keys_ts = self:build_type(keys) + local members_ts = self:build_type(members) + self.keys = keys + self.members = members + self.keys_t = typeof(keys_ts) + self.leaf_t = typeof(self:build_leaf_type(keys_ts, members_ts)) + self.hashin = self.keys_t() + return self +end + +function List:new (keys, members) + local self = self:_new(keys, members) + self.heap = Heap:new() + self.first, self.last = 0, 0 -- empty + self.root = self:alloc_node() -- heap obj=0 reserved for root node + self.length = 0 + self.lvalues = {} + for name, member in pairs(members) do + if member.type == 'lvalue' then + self.lvalues[name] = {} + end + end + return self +end + +function List:field_order (fields) + local order = {} + for name in pairs(fields) do + table.insert(order, name) + end + local function order_fields (x, y) + -- 1. mandatory fields (< name) + -- 2. optional fields (< name) + if (not fields[x].optional) and fields[y].optional then + return true + elseif fields[x].optional and (not fields[y].optional) then + return false + else + return x < y + end + end + table.sort(order, order_fields) + return order +end + +function List:build_type (fields) + local t = "struct { " + for _, name in ipairs(self:field_order(fields)) do + local spec = fields[name] + local ct = self:type_ctype(spec) + local et, array = ct:match("(.+)(%[%d+%])") + local f + if array then + f = ("%s %s%s;"):format(et, name, array) + else + f = ("%s %s;"):format(ct, name) + end + if spec.optional then + f = self.optional_ts:format(f, name) + end + t = t..f.." " + end + t = t.."}" + return t +end + +function List:build_leaf_type (keys_ts, members_ts) + return self.leaf_ts:format(self.list_ts, keys_ts, members_ts) +end + +function List:heap_cast (t, o) + return ffi.cast(ffi.typeof('$*', t), self.heap:ptr(o)) +end + +function List:alloc_node () + local o = self.heap:allocate(ffi.sizeof(self.node_t)) + return o +end + +function List:free_node (o) + self.heap:free(o, ffi.sizeof(self.node_t)) +end + +function List:node (o) + return self:heap_cast(self.node_t, o) +end + +function List:alloc_leaf () + local o = self.heap:allocate(ffi.sizeof(self.leaf_t)) + return o +end + +function List:free_leaf (o) + self.heap:free(o, ffi.sizeof(self.leaf_t)) +end + +function List:leaf (o) + return self:heap_cast(self.leaf_t, o) +end + +function List:alloc_str (s) + local o = self.heap:allocate(ffi.sizeof(self.string_t)+#s-1) + local str = self:str(o) + ffi.copy(str.str, s, #s) + str.len = #s + return o +end + +function List:free_str (o) + local str = self:str(o) + self.heap:free(o, ffi.sizeof(self.string_t)+str.len-1) +end + +function List:str (o) + return self:heap_cast(self.string_t, o) +end + +function List:tostring(o) + local str = self:str(o) + return ffi.string(str.str, str.len) +end + +function List:str_equal_string (o, s) + local str = self:str(o) + if not str.len == #s then + return false + end + return C.memcmp(str.str, s, str.len) == 0 +end + +function List:pack_mandatory (dst, name, kind, value) + assert(value ~= nil, "Missing value: "..name) + if kind == 'string' then + dst[name] = self:alloc_str(value) + elseif kind == 'empty' then + dst[name] = true + elseif kind == 'ctype' then + dst[name] = value + elseif kind == 'lvalue' then + local idx = #self.lvalues[name] + 1 + self.lvalues[name][idx] = assert(value) + dst[name] = idx + else + error("NYI: kind "..kind) + end +end + +function List:unpack_mandatory (dst, name, kind, value) + if kind == 'string' then + dst[name] = self:tostring(value) + elseif kind == 'empty' then + dst[name] = true + elseif kind == 'ctype' then + dst[name] = value + elseif kind == 'lvalue' then + dst[name] = assert(self.lvalues[name][value]) + else + error("NYI: kind "..kind) + end +end + +function List:free_mandatory (name, kind, value) + if kind == 'string' then + self:free_str(value) + elseif kind == 'empty' then + -- nop + elseif kind == 'ctype' then + -- nop + elseif kind == 'lvalue' then + self.lvalues[name][value] = nil + else + error("NYI: kind "..kind) + end +end + +function List:pack_optional (dst, name, kind, value) + if value ~= nil then + self:pack_mandatory(dst[name].value, name, kind, value) + dst[name].present = true + else + dst[name].present = false + end +end + +function List:unpack_optional (dst, name, kind, value) + if value.present then + self:unpack_mandatory(dst, name, kind, value.value[name]) + end +end + +function List:free_optional (name, kind, value) + if value.present then + self:free_mandatory(name, kind, value.value[name]) + end +end + +function List:pack_field (dst, name, spec, value) + local kind = self:type_kind(spec) + if spec.optional then + self:pack_optional(dst, name, kind, value) + else + self:pack_mandatory(dst, name, kind, value) + end +end + +function List:unpack_field (dst, name, spec, value) + local kind = self:type_kind(spec) + if spec.optional then + self:unpack_optional(dst, name, kind, value) + else + self:unpack_mandatory(dst, name, kind, value) + end +end + +function List:free_field (name, spec, value) + local kind = self:type_kind(spec) + if spec.optional then + self:free_optional(name, kind, value) + else + self:free_mandatory(name, kind, value) + end +end + +function List:pack_fields (s, t, fields) + for name, spec in pairs(fields) do + self:pack_field(s, name, spec, t[name]) + end +end + +function List:unpack_fields (t, s, fields) + for name, spec in pairs(fields) do + self:unpack_field(t, name, spec, s[name]) + end +end + +function List:free_fields (s, fields) + for name, spec in pairs(fields) do + self:free_field(name, spec, s[name]) + end +end + +local murmur32 = murmur.MurmurHash3_x86_32:new() +local function hash32 (ptr, len, seed) + return murmur32:hash(ptr, len, seed).u32[0] +end + +function List:entry_hash (e, seed) + for name, spec in pairs(self.keys) do + local kind = self:type_kind(spec) + if kind == 'ctype' then + self:pack_field(self.hashin, name, spec, e[name]) + elseif kind == 'string' then + self.hashin[name] = hash32(e[name], #e[name], seed) + elseif kind == 'empty' then + self:pack_field(self.hashin, name, spec, e[name]) + else + error("NYI: kind "..kind) + end + end + return hash32(self.hashin, ffi.sizeof(self.keys_t), seed) +end + +-- Same as entry hash but for keys_t +function List:leaf_hash (keys, seed) + for name, spec in pairs(self.keys) do + local kind = self:type_kind(spec) + if kind == 'ctype' then + self:pack_field(self.hashin, name, spec, keys[name]) + elseif kind == 'string' then + local str = self:str(keys[name]) + self.hashin[name] = hash32(str.str, str.len, seed) + elseif kind == 'empty' then + self:pack_field(self.hashin, name, spec, keys[name]) + else + error("NYI: kind "..kind) + end + end + return hash32(self.hashin, ffi.sizeof(self.keys_t), seed) +end + +function List:new_leaf (e, members, prev, next) + local o = self:alloc_leaf() + local leaf = self:leaf(o) + leaf.list.prev = prev or 0 -- NB: obj=0 is root node, can not be a leaf! + leaf.list.next = next or 0 + self:pack_fields(leaf.keys, e, self.keys) + self:pack_fields(leaf.members, members or e, self.members) + return o +end + +function List:update_leaf (o, members) + local leaf = self:leaf(o) + self:free_fields(leaf.members, self.members) + self:pack_fields(leaf.members, members, self.members) +end + +function List:destroy_leaf (o) + local leaf = self:leaf(o) + self:free_fields(leaf.keys, self.keys) + self:free_fields(leaf.members, self.members) + self:free_leaf(o) +end + +local node_index_mask = List.node_children - 1 +function List:node_index (node, d, h) + return band(node_index_mask, rshift(h, d)) +end + +function List:node_occupied (node, index, newval) + if newval == true then + node.occupied = bor(node.occupied, lshift(1, index)) + elseif newval == false then + node.occupied = band(node.occupied, bnot(lshift(1, index))) + end + return band(node.occupied, lshift(1, index)) > 0 +end + +function List:node_leaf (node, index, newval) + if newval == true then + node.leaf = bor(node.leaf, lshift(1, index)) + elseif newval == false then + node.leaf = band(node.leaf, bnot(lshift(1, index))) + end + return band(node.leaf, lshift(1, index)) > 0 +end + +function List:next_hash_parameters (d, s, h) + if d + self.trie_width < self.hash_width then + return d + self.trie_width, s, h + else + return 0, s + 1, nil + end +end + +function List:prev_hash_parameters (d, s, h) + if d >= self.trie_width then + return d - self.trie_width, s, h + else + return self.hash_width - self.trie_width, s - 1, nil + end +end + +function List:entry_keys_equal (e, o) + local keys = self:leaf(o).keys + local cmp = self.hashin + ffi.fill(cmp, ffi.sizeof(cmp)) + for name, spec in pairs(self.keys) do + local kind = self:type_kind(spec) + if kind == 'string' then + if self:str_equal_string(keys[name], e[name]) then + cmp[name] = keys[name] + end + else + self:pack_mandatory(cmp, name, kind, e[name]) + end + end + return C.memcmp(keys, cmp, ffi.sizeof(keys)) == 0 +end + +-- NB: finds any node matching the keys hash! +function List:find_node (k, r, d, s, h) + r = r or self.root + d = d or 0 + s = s or 0 + h = h or self:entry_hash(k, s) + local node = self:node(r) + local index = self:node_index(node, d, h) + if self:node_occupied(node, index) and + not self:node_leaf(node, index) + then + -- Continue searching in child node. + d, s, h = self:next_hash_parameters(d, s, h) + return self:find_node(k, node.children[index], d, s, h) + else + -- Found! + return r, d, s, h + end +end + +-- NB: finds leaf with matching keys in node. +function List:find_leaf (k, n, d, s, h) + local node = self:node(n) + local index = self:node_index(node, d, h) + if self:node_occupied(node, index) then + assert(self:node_leaf(node, index)) + local o = node.children[index] + if self:entry_keys_equal(k, o) then + return o + end + end +end + +-- NB: does not handle already existing identical keys! +function List:insert_leaf (o, r, d, s, h) + h = h or self:leaf_hash(self:leaf(o).keys, s) + local node = self:node(r) + local index = self:node_index(node, d, h) + if self:node_occupied(node, index) then + assert(self:node_leaf(node, index)) + -- Occupied by leaf, replace with node and insert + -- both existing and new leaves into new node. + local l = node.children[index] + local n = self:alloc_node() + self:node(n).parent = r + node.children[index] = n + self:node_leaf(node, index, false) + d, s, h = self:next_hash_parameters(d, s, h) + self:insert_leaf(l, n, d, s, nil) + self:insert_leaf(o, n, d, s, h) + else + -- Not occupied, insert leaf. + self:node_occupied(node, index, true) + self:node_leaf(node, index, true) + node.children[index] = o + end +end + +-- NB: does not handle non-existing keys! +function List:remove_child (k, r, d, s, h) + local node = self:node(r) + local index = self:node_index(node, d, h) + assert(self:node_occupied(node, index)) + assert(self:node_leaf(node, index)) + -- Remove + self:node_occupied(node, index, false) + self:node_leaf(node, index, false) + node.children[index] = 0 + self:remove_obsolete_nodes(k, r, d, s, h) +end + +assert(ffi.abi("le")) +local t = ffi.new("union { uint32_t u[2]; double d; }") +local function msb_set (v) + -- https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogIEEE64Float + -- "Finding integer log base 2 of an integer + -- (aka the position of the highest bit set)" + -- + -- We use this function to find the only bit set. :-) + t.u[1] = 0x43300000 + t.u[0] = v + t.d = t.d - 4503599627370496.0 + return rshift(t.u[1], 20) - 0x3FF +end + +function List:remove_obsolete_nodes (k, r, d, s, h) + if r == self.root then + -- Node is the root, and never obsolete. + return + end + local node = self:node(r) + local d, s, h = self:prev_hash_parameters(d, s, h) + h = h or self:entry_hash(k, s) + local parent = self:node(node.parent) + local parent_index = self:node_index(parent, d, h) + if node.occupied == 0 then + -- Node is now empty, remove from parent. + error("unreachable") + -- ^- This case never happens, because we only ever create + -- new nodes with at least two leaves (the new leaf, and + -- the displaced leaf). + parent.children[parent_index] = 0 + self:node_occupied(parent, parent_index, false) + self:free_node(r) + return self:remove_obsolete_nodes(k, node.parent, d, s, h) + elseif band(node.occupied, node.occupied-1) == 0 then + -- Node has only one child, move it to parent. + local index = msb_set(node.occupied) + parent.children[parent_index] = node.children[index] + if self:node_leaf(node, index) then + self:node_leaf(parent, parent_index, true) + else + self:node(node.children[index]).parent = node.parent + end + self:free_node(r) + return self:remove_obsolete_nodes(k, node.parent, d, s, h) + end +end + +function List:append_leaf (o, prev) + prev = prev or self.last + if prev == 0 then -- empty (0 is reserved for root node) + self.first, self.last = o, o + else + local leaf = self:leaf(o) + local pleaf = self:leaf(prev) + leaf.list.prev = prev + leaf.list.next = pleaf.list.next + pleaf.list.next = o + if leaf.list.next == 0 then + -- print("new last") + self.last = o + end + end + self.length = self.length + 1 +end + +function List:unlink_leaf (o) + local leaf = self:leaf(o) + if self.first == o then + self.first = leaf.list.next + end + if self.last == o then + self.last = leaf.list.prev + end + if leaf.list.prev ~= 0 then + local prev = self:leaf(leaf.list.prev) + prev.list.next = leaf.list.next + end + if leaf.list.next ~= 0 then + local next = self:leaf(leaf.list.next) + next.list.prev = leaf.list.prev + end + self.length = self.length - 1 +end + +function List:leaf_entry (o) + local leaf = self:leaf(o) + local ret = {} + self:unpack_fields(ret, leaf.keys, self.keys) + self:unpack_fields(ret, leaf.members, self.members) + return ret +end + +function List:add_entry (e, update, members) + local n, d, s, h = self:find_node(e) + local o = self:find_leaf(e, n, d, s, h) + if o then + if update then + self:update_leaf(o, members or e) + else + error("Attempting to add duplicate entry to list") + end + else + local o = self:new_leaf(e, members) + self:insert_leaf(o, n, d, s, h) + self:append_leaf(o) + end +end + +function List:add_or_update_entry (e, members) + self:add_entry(e, true, members) +end + +function List:find_entry (k) + local o = self:find_leaf(k, self:find_node(k)) + if o then + return self:leaf_entry(o) + end +end + +function List:remove_entry (k) + local n, d, s, h = self:find_node(k) + local o = self:find_leaf(k, n, d, s, h) + if o then + self:remove_child(k, n, d, s, h) + self:unlink_leaf(o) + self:destroy_leaf(o) + return true + end +end + +function List:ipairs () + local n = 1 + local o = self.first + assert(type(o) == 'number') + return function () + if o == 0 then + return + end + local i = n + local e = self:leaf_entry(o) + n = n + 1 + o = self:leaf(o).list.next + return i, e + end +end + +List.header_t = ffi.typeof[[ + struct { + double first, last, length; + } __attribute__((packed)) +]] + +function List:save (stream) + local header = self.header_t(self.first, self.last, self.length) + stream:write_struct(self.header_t, header) + self.heap:save(stream) +end + +function List:load (stream, keys, members, lvalues) + local self = self:_new(keys, members) + local h = stream:read_struct(nil, self.header_t) + self.heap = Heap:load(stream) + self.first = h.first + self.last = h.last + self.root = 0 -- heap obj=0 reserved for root node + self.length = h.length + self.lvalues = lvalues + return self +end + +function selftest_list () + local l = List:new( + {id={ctype='uint32_t'}, name={type='string'}}, + {value={ctype='double'}, description={type='string'}} + ) + -- print("leaf_t", ffi.sizeof(l.leaf_t)) + -- print("node_t", ffi.sizeof(l.node_t)) + l:add_entry { + id=42, name="foobar", + value=3.14, description="PI" + } + local root = l:node(l.root) + assert(root.occupied == lshift(1, 14)) + assert(root.occupied == root.leaf) + -- print(l.root, root.occupied, root.leaf, root.children[14]) + local e1 = l:find_entry {id=42, name="foobar"} + assert(e1) + assert(e1.id == 42 and e1.name == "foobar") + assert(not l:find_entry {id=43, name="foobar"}) + assert(not l:find_entry {id=42, name="foo"}) + -- for k,v in pairs(e1) do print(k,v) end + l:add_entry { + id=127, name="hey", + value=1/0, description="inf" + } + for i, e in l:ipairs() do + if i == 1 then + assert(e.id == 42) + elseif i == 2 then + assert(e.id == 127) + else + error("unexpected index: "..i) + end + end + + -- Test empty interator + for _ in List:new({id={ctype='uint64_t'}}, {}):ipairs() do + error("list is empty") + end + + -- Test update + local ok = pcall(function () + l:add_entry { + id=127, name="hey", + value=1, description="one" + } + end) + assert(not ok) + l:add_or_update_entry { + id=127, name="hey", + value=1, description="one" + } + local e_updated = l:find_entry {id=127, name="hey"} + assert(e_updated) + assert(e_updated.value == 1) + assert(e_updated.description == "one") + + -- Test collisions + local lc = List:new({id={ctype='uint64_t'}}, {}) + -- print("leaf_t", ffi.sizeof(lc.leaf_t)) + -- print("node_t", ffi.sizeof(lc.node_t)) + lc:add_entry {id=0ULL} + lc:add_entry {id=4895842651ULL} + local root = lc:node(lc.root) + assert(root.leaf == 0) + assert(root.occupied == lshift(1, 12)) + -- print(lc.root, root.occupied, root.leaf, root.children[12]) + local e1 = lc:find_entry {id=0ULL} + local e2 = lc:find_entry {id=4895842651ULL} + assert(e1) + assert(e2) + assert(e1.id == 0ULL) + assert(e2.id == 4895842651ULL) + assert(lc:remove_entry {id=0ULL}) + assert(lc:remove_entry {id=4895842651ULL}) + assert(lc.length == 0) + assert(root.occupied == 0) + + -- Test optional + local l = List:new( + {id={type='string'}}, + {value={ctype='double', optional=true}, + description={type='string', optional=true}} + ) + l:add_entry{ + id="foo", + value=3.14, + description="PI" + } + l:add_entry{ + id="foo1", + value=42 + } + l:add_entry{ + id="foo2", + description="none" + } + l:add_entry{ + id="foo3" + } + assert(l:find_entry{id="foo"}.value == 3.14) + assert(l:find_entry{id="foo"}.description == "PI") + assert(l:find_entry{id="foo1"}.value == 42) + assert(l:find_entry{id="foo1"}.description == nil) + assert(l:find_entry{id="foo2"}.value == nil) + assert(l:find_entry{id="foo2"}.description == "none") + assert(l:find_entry{id="foo3"}.value == nil) + assert(l:find_entry{id="foo3"}.description == nil) + + -- Test empty type + local l = List:new( + {id={type='string'}, e={type='empty'}}, + {value={type='empty', optional=true}} + ) + l:add_entry {id="foo", e=true} + l:add_entry {id="foo1", e=true, value=true} + assert(l:find_entry{id="foo", e=true}.value == nil) + assert(l:find_entry{id="foo1", e=true}.value == true) + local ok, err = pcall(function () l:add_entry {id="foo2"} end) + assert(not ok) + assert(err:match("Missing value: e")) + + -- Test lvalues + local l = List:new( + {id={type='string'}}, + {value={type='lvalue'}} + ) + l:add_entry {id="foo", value={}} + l:add_entry {id="foo1", value={bar=true}} + assert(#l:find_entry{id="foo"}.value == 0) + assert(l:find_entry{id="foo1"}.value.bar == true) + l:add_or_update_entry {id="foo1", value={bar=false}} + l:remove_entry {id="foo"} + assert(l.lvalues.value[1] == nil) + assert(l.lvalues.value[2].bar == false) + + -- Test optional lvalue + local l = List:new( + {id={type='string'}}, + {o={type='lvalue', optional=true}} + ) + l:add_entry {id="foo"} + l:add_entry {id="foo1", o={bar=true}} + assert(l:find_entry{id="foo"}.o == nil) + assert(l:find_entry{id="foo1"}.o.bar == true) + l:add_or_update_entry {id="foo1", o={bar=false}} + l:remove_entry {id="foo"} + assert(l.lvalues.o[1].bar == false) + + -- Test struct + local ts = "struct { uint16_t x; uint16_t y; }" + local l = List:new( + {id={type='string'}}, + {value={ctype=ts}} + ) + l:add_entry {id="foo", value={x=1, y=2}} + l:add_entry {id="foo1", value={x=2, y=3}} + assert(l:find_entry{id="foo"}.value.x == 1) + assert(l:find_entry{id="foo"}.value.y == 2) + assert(l:find_entry{id="foo1"}.value.x == 2) + assert(l:find_entry{id="foo1"}.value.y == 3) + l:add_or_update_entry {id="foo1", value={x=3,y=4}} + assert(l:find_entry{id="foo1"}.value.x == 3) + assert(l:find_entry{id="foo1"}.value.y == 4) + l:add_or_update_entry {id="foo1", value=l:find_entry{id="foo"}.value} + assert(l:find_entry{id="foo1"}.value.x == 1) + assert(l:find_entry{id="foo1"}.value.y == 2) + l:add_entry {id="foo2", value=typeof(ts)({x=7, y=8})} + assert(l:find_entry{id="foo2"}.value.x == 7) + assert(l:find_entry{id="foo2"}.value.y == 8) + + -- Test optional struct + local l = List:new( + {id={type='string'}}, + {value={ctype=ts, optional=true}} + ) + l:add_entry {id="foo"} + l:add_entry {id="foo1", value={x=2, y=3}} + assert(l:find_entry{id="foo"}.value == nil) + assert(l:find_entry{id="foo1"}.value.x == 2) + assert(l:find_entry{id="foo1"}.value.y == 3) + l:add_or_update_entry {id="foo", value={x=1,y=2}} + assert(l:find_entry{id="foo"}.value.x == 1) + assert(l:find_entry{id="foo"}.value.y == 2) + l:add_or_update_entry {id="foo1", value=nil} + assert(l:find_entry{id="foo1"}.value == nil) + + -- Test list ordering + local l = List:new({n={ctype='uint64_t'}},{}) + l:add_entry({n=1}) + l:add_entry({n=2}) + l:add_entry({n=3}) + l:add_entry({n=4}) + l:add_entry({n=5}) + local n, count = 0, 0 + for i, e in l:ipairs() do + assert(e.n > n, "out of order: "..i) + n = e.n + count = count + 1 + end + assert(count == l.length) + + -- Test empty iterator + local l = List:new({n={ctype='uint64_t'}},{}) + l:add_entry({n=1}) + l:remove_entry({n=1}) + for i, e in l:ipairs() do + assert(false) + end + + -- Test load/save + local keys = {id={type='string'}} + local members = {value={type='lvalue'}} + local l = List:new(keys, members) + l:add_entry {id="foo", value={}} + l:add_entry {id="foo1", value={bar=true}} + local memstream = require("lib.stream.mem") + local tmp = memstream.tmpfile() + l:save(tmp) + tmp:seek('set', 0) + local l = List:load(tmp, keys, members, l.lvalues) + assert(#l:find_entry{id="foo"}.value == 0) + assert(l:find_entry{id="foo1"}.value.bar == true) +end + + +local ListMeta = {} + +function ListMeta:__len () + return self.list.length +end + +function ListMeta:__index (k) + return self.list:find_entry(k) +end + +function ListMeta:__newindex (k, members) + if members ~= nil then + self.list:add_or_update_entry(k, members) + else + self.list:remove_entry(k) + end +end + +function ListMeta:__ipairs () + return self.list:ipairs() +end + +ListMeta.__pairs = ListMeta.__ipairs + +local function pairs_for_key (iter, key) + return function () + local i, e = iter() + if i then + return e[key], e + end + end +end + +local function make_mt_for_single_key (key) + local mt = { + __len = ListMeta.__len, + __ipairs = ListMeta.__ipairs + } + function mt:__index (k) + return ListMeta.__index(self, {[key]=k}) + end + function mt:__newindex (k, members) + return ListMeta.__newindex(self, {[key]=k}, members) + end + function mt:__pairs () + return pairs_for_key(ListMeta.__ipairs(self), key) + end + return mt +end + +local list_mt_cache = {} +local function mt_for_single_key (key) + if not list_mt_cache[key] then + list_mt_cache[key] = make_mt_for_single_key(key) + end + return list_mt_cache[key] +end + +local function list_meta (keys, members) + local numkeys = 0 + local key1 + for key in pairs(keys) do + key1 = key1 or key + numkeys = numkeys + 1 + end + if numkeys == 1 then + return mt_for_single_key(key1) + elseif numkeys > 1 then + return ListMeta + else + error("List needs at least one key") + end +end + +function new (keys, members) + local mt = list_meta(keys, members) + return setmetatable({list=List:new(keys, members)}, mt) +end + +function object (list) + if type(list) == 'table' then + local o = rawget(list, 'list') + local mt = getmetatable(o) + if mt and mt.__index == List then + return o + end + end +end + +function load (stream, keys, members, lvalues) + local mt = list_meta(keys, members) + return setmetatable({list=List:load(stream, keys, members, lvalues)}, mt) +end + +local function selftest_listmeta () + local l1 = new( + {id={ctype='uint32_t'}, name={type='string'}}, + {value={ctype='double'}, description={type='string'}} + ) + l1[{id=0, name='foo'}] = {value=1.5, description="yepyep"} + l1[{id=1, name='bar'}] = {value=3.14, description="PI"} + local ok, err = pcall (function() + object(l1):add_entry { + id=0, name='foo', + value=0, description="should fail" + } + end) + assert(not ok and err:match("Attempting to add duplicate entry to list")) + assert(#l1 == 2) + assert(#l1 == object(l1).length) + assert(l1[{id=0, name='foo'}].value == 1.5) + for i, entry in ipairs(l1) do + if i == 1 then + assert(entry.name == 'foo') + elseif i == 2 then + assert(entry.name == 'bar') + else + error("unexpected entry: "..i) + end + end + l1[{id=0, name='foo'}] = nil + assert(l1[{id=0, name='foo'}] == nil) + assert(object(l1):find_entry({id=1, name='bar'}).value == 3.14) + -- Test single key meta + local l = new({id={type='string'}}, {value={type='string'}}) + l.foo = {value="bar"} + assert(l.foo) + assert(l.foo.value == "bar") + assert(#l == 1) + l.foo = nil + assert(l.foo == nil) + assert(#l == 0) + local l = new({id={ctype='double'}}, {value={type='string'}}) + l[3] = {value="bar"} + assert(l[3]) + assert(#l == 1) + for id, e in pairs(l) do + assert(id == 3) + assert(e.value == "bar") + end + for i, e in ipairs(l) do + assert(i == 1) + assert(e.id == 3) + assert(e.value == "bar") + end + l[3] = nil + assert(l[3] == nil) + assert(#l == 0) + -- Test object() + assert(object(l) == rawget(l, 'list')) + assert(object({}) == nil) + assert(object(42) == nil) +end + +function selftest_ip () + local yang_util = require("lib.yang.util") + local ipv6 = require("lib.protocol.ipv6") + local l = new( + {ip={ctype='uint32_t'}, port={ctype='uint16_t'}}, + {b4_address={ctype='uint8_t[16]'}} + ) + math.randomseed(0) + for i=1, 1e5 do + local b4 = ffi.new("uint8_t[16]") + for j=0,15 do b4[j] = i end + object(l):add_entry { + ip = math.random(0xffffffff), + port = bit.band(0xffff, i), + b4_address = b4 + } + end + print("added "..#l.." entries") + local middle = math.floor(#l/2) + local entry + for i, e in ipairs(l) do + if i == middle then + entry = e + print("Iterated to entry #"..middle) + assert(e.ip == l[e].ip) + print("Looked up middle entry with ip="..yang_util.ipv4_ntop(e.ip)) + print("B4 address is: "..ipv6:ntop(e.b4_address)) + break + end + end + l[entry] = nil + print("Removed middle entry") + assert(not l[entry]) + print("Asserted entry is no longer present") +end + +function selftest () + print("Selftest: Heap") + selftest_heap() + print("Selftest: List") + selftest_list() + print("Selftest: ListMeta") + selftest_listmeta() + print("Selftest: ip bench") + selftest_ip() +end \ No newline at end of file diff --git a/src/lib/yang/path.lua b/src/lib/yang/path.lua index 9427b0e278..839d79ed79 100644 --- a/src/lib/yang/path.lua +++ b/src/lib/yang/path.lua @@ -5,42 +5,18 @@ -- of representing a path. The path provided is a subset of XPath supporting -- named keys such as [addr=1.2.3.4] and also basic positional querying -- for arrays e.g [position()=1] for the first element. --- --- The structure of the path is dependent on the type the node is. The --- conversions are as follows: --- --- Scalar fields: --- A lua string of the member name --- Struct fields: --- A lua string of the member name --- Array fields: --- This is a table which has a "name" property specifying member --- name and a "key" field which is a 1 based integer to specify the --- position in the array. --- Table fields: --- This is a table which has a "name" property specifying member --- name and has a "keys" (not key) property which is either: --- - A string representing the key if the table is string keyed. --- - A lua table with corrisponding leaf names as the key and the --- value as the value. module(..., package.seeall) -local equal = require("core.lib").equal +local valuelib = require("lib.yang.value") local datalib = require("lib.yang.data") local normalize_id = datalib.normalize_id +local lib = require("core.lib") -local function table_keys(t) - local ret = {} - for k, v in pairs(t) do table.insert(ret, k) end - return ret -end -local syntax_error = function (str, pos) - local header = "Syntax error in " - io.stderr:write(header..str.."\n") - io.stderr:write(string.rep(" ", #header + pos-1)) - io.stderr:write("^\n") - os.exit(1) +local function syntax_error(str, pos) + error("Syntax error in:\n" + ..str.."\n" + ..string.rep(" ", pos-1).."^\n") end local function extract_parts (fragment) @@ -77,61 +53,47 @@ end -- Finds the grammar node for a fragment in a given grammar. local function extract_grammar_node(grammar, name) - local handlers = {} - function handlers.struct () return grammar.members[name] end - function handlers.table () - if grammar.keys[name] == nil then - return grammar.values[name] - else - return grammar.keys[name] - end - end - function handlers.choice () - for case_name, case in pairs(grammar.choices) do - if case[name] ~= nil then return case[name] end + local function expand_choices (members) + for _, member in pairs(members) do + if member.type == 'choice' then + local node = extract_grammar_node(member, name) + if node then return node end + end end end - return assert(assert(handlers[grammar.type], grammar.type)(), name) -end - --- Converts an XPath path to a lua array consisting of path componants. --- A path component can then be resolved on a yang data tree: -function convert_path(grammar, path) - local path = normalize_path(path) local handlers = {} - function handlers.scalar(grammar, fragment) - return {name=fragment.name, grammar=grammar} + function handlers.struct (node) + if node.members[name] then return node.members[name] end + return expand_choices(node.members) end - function handlers.struct(grammar, fragment) - return {name=fragment.name, grammar=grammar} + function handlers.list (node) + if node.keys[name] then return node.keys[name] end + if node.values[name] then return node.values[name] end + return expand_choices(node.values) end - function handlers.table(grammar, fragment) - return {name=fragment.name, keys=fragment.query, grammar=grammar} + function handlers.choice (node) + for _, case in pairs(node.choices) do + if case[name] then return case[name] end + local node = expand_choices(case) + if node then return node end + end end - function handlers.array(grammar, fragment) - local position = fragment.query["position()"] - return {name=fragment.name, key=tonumber(position), grammar=grammar} + function handlers.scalar () + error("Invalid path: trying to access '"..name.."' in scalar.") end - local function handle(grammar, fragment) - return assert(handlers[grammar.type], grammar.type)(grammar, fragment) + function handlers.array () + error("Invalid path: trying to access '"..name.."' in leaf-list.") end - - if path == "/" then return {} end - - local ret = {} - local node = grammar - if path:sub(1, 1) == "/" then path = path:sub(2) end -- remove leading / - if path:sub(-1) == "/" then path = path:sub(1, -2) end -- remove trailing / - for element in path:split("/") do - local parts = extract_parts(element) - node = extract_grammar_node(node, parts.name) - local luapath = handle(node, parts) - table.insert(ret, luapath) + -- rpc + function handlers.sequence (node) + if node.members[name] then return node.members[name] end end - return ret + local node = assert(handlers[grammar.type], grammar.type)(grammar) + return node or error("Invalid path: '"..name.."' is not in schema.") end -function parse_path (path) +-- Converts an XPath path to a lua array consisting of path components. +local function parse_path1 (path) local depth = 0 local t, token = {}, '' local function insert_token () @@ -157,27 +119,150 @@ function parse_path (path) end insert_token() - local ret = {} + local ret = {relative = not path:match("^/")} for _, element in ipairs(t) do if element ~= '' then table.insert(ret, extract_parts(element)) end end return ret end -function normalize_path(path) +local function parse_query(grammar, query) + if grammar.type == 'array' then + local idx + for key, value in pairs(query) do + if key == 'position()' then + idx = tonumber(value) + else + error("Invalid query: leaf-list can only be indexed by position.") + end + end + if (not idx) or idx < 1 or idx ~= math.floor(idx) then + error("Invalid query: leaf-list can only be indexed by positive integers.") + end + return idx + elseif grammar.type == 'list' then + if not grammar.list.has_key then + error("Invalid query: list has no key.") + end + local key = {} + for k,_ in pairs(query) do + if not grammar.keys[k] then + error("Invalid query:'"..k.."' is not a list key.") + end + end + for k,grammar in pairs(grammar.keys) do + local v = query[k] or grammar.default + if v == nil then + error("Invalid query: missing required key '"..k.."'") + end + local key_primitive_type = grammar.argument_type.primitive_type + local parser = valuelib.types[key_primitive_type].parse + key[normalize_id(k)] = parser(v, 'path query value') + end + return key + else + error("Invalid query: can only query list or leaf-list.") + end +end + +function parse_path(path, grammar) + if type(path) == 'string' then + path = parse_path1(path) + end + if grammar then + for _, part in ipairs(path) do + grammar = extract_grammar_node(grammar, part.name) + part.grammar = grammar + for _ in pairs(part.query) do + part.key = parse_query(grammar, part.query) + break + end + end + end + return path +end + +local function unparse_query(grammar, key) + if grammar.type == 'array' then + return {['position()']=tonumber(key)} + elseif grammar.type == 'list' then + if not grammar.list.has_key then + error("Invalid key: list has no key.") + end + local query = {} + for k,grammar in pairs(grammar.keys) do + local key_primitive_type = grammar.argument_type.primitive_type + local tostring = valuelib.types[key_primitive_type].tostring + local id = normalize_id(k) + if key[id] then + query[k] = tostring(key[id]) + elseif grammar.default then + query[k] = grammar.default + else + error("Invalid key: missing required key '"..k.."'") + end + end + return query + else + error("Invalid key: can only query list or leaf-list.") + end +end + +function unparse_path(path, grammar) + path = lib.deepcopy(path) + for _, part in ipairs(path) do + grammar = extract_grammar_node(grammar, part.name) + part.grammar = grammar + if part.key then + part.query = unparse_query(grammar, part.key) + end + end + return path +end + +function normalize_path(path, grammar) + path = parse_path(path, grammar) local ret = {} - for _,part in ipairs(parse_path(path)) do + for _,part in ipairs(path) do local str = part.name - local keys = table_keys(part.query) + local keys = {} + for key in pairs(part.query) do + table.insert(keys, key) + end table.sort(keys) for _,k in ipairs(keys) do str = str..'['..k..'='..part.query[k]..']' end table.insert(ret, str) end - return '/'..table.concat(ret, '/') + return ((path.relative and '') or '/')..table.concat(ret, '/') +end + +function parse_relative_path(path, node_path, grammar) + path = parse_path(path) + if not path.relative then + return parse_path(path, grammar) + end + node_path = parse_path(node_path, grammar) + assert(not node_path.relative, "node_path has to be absolute.") + local apath = {relative=false} + for _, part in ipairs(node_path) do + table.insert(apath, part) + end + for i, part in ipairs(path) do + if part.name == '.' or part.name == 'current()' then + assert(i==1, "Invalid path: '"..part.name"' has to be first component.") + elseif part.name == '..' then + assert(#apath >= 1, "Invalid path: attempt to traverse up root (/..).") + table.remove(apath, #apath) + else + table.insert(apath, part) + end + end + return parse_path(apath, grammar) end function selftest() print("selftest: lib.yang.path") + local util = require("lib.yang.util") local schemalib = require("lib.yang.schema") local schema_src = [[module snabb-simple-router { namespace snabb:simple-router; @@ -200,30 +285,34 @@ function selftest() local grammar = datalib.config_grammar_from_schema(scm) -- Test path to lua path. - local path = convert_path(grammar,"/routes/route[addr=1.2.3.4]/port") + local path = parse_path("/routes/route[addr=1.2.3.4]/port", grammar) assert(path[1].name == "routes") assert(path[2].name == "route") - assert(path[2].keys) - assert(path[2].keys["addr"] == "1.2.3.4") + assert(path[2].query.addr == "1.2.3.4") + assert(path[2].key.addr == util.ipv4_pton("1.2.3.4")) assert(path[3].name == "port") - local path = convert_path(grammar, "/blocked-ips[position()=4]/") + local path = parse_path("/blocked-ips[position()=4]/", grammar) assert(path[1].name == "blocked-ips") + assert(path[1].query['position()'] == "4") assert(path[1].key == 4) - assert(normalize_path('') == '/') + assert(normalize_path('') == '') assert(normalize_path('//') == '/') assert(normalize_path('/') == '/') assert(normalize_path('//foo//bar//') == '/foo/bar') assert(normalize_path('//foo[b=1][c=2]//bar//') == '/foo[b=1][c=2]/bar') assert(normalize_path('//foo[c=1][b=2]//bar//') == '/foo[b=2][c=1]/bar') - assert(extract_parts('//foo[b=1]')) - - parse_path('/alarms/alarm-list/alarm'.. - '[resource=alarms/alarm-list/alarm/related-alarm/resource]'.. - '[alarm-type-id=/alarms/alarm-list/alarm/related-alarm/alarm-type-id]') + local path = + parse_path('/alarms/alarm-list/alarm'.. + '[resource=alarms/alarm-list/alarm/related-alarm/resource]'.. + '[alarm-type-id=/alarms/alarm-list/alarm/related-alarm/alarm-type-id]') + assert(#path == 3) + assert(path[3].name == 'alarm') + assert(path[3].query.resource == "alarms/alarm-list/alarm/related-alarm/resource") + assert(path[3].query['alarm-type-id'] == "/alarms/alarm-list/alarm/related-alarm/alarm-type-id") print("selftest: ok") end diff --git a/src/lib/yang/path_data.lua b/src/lib/yang/path_data.lua index 991d115b10..271e620a31 100644 --- a/src/lib/yang/path_data.lua +++ b/src/lib/yang/path_data.lua @@ -3,151 +3,107 @@ module(..., package.seeall) local ffi = require("ffi") -local lib = require("core.lib") local data = require("lib.yang.data") local value = require("lib.yang.value") local schema = require("lib.yang.schema") -local parse_path = require("lib.yang.path").parse_path +local path = require("lib.yang.path") +local parse_path = path.parse_path +local unparse_path = path.unparse_path +local parse_relative_path = path.parse_relative_path +local normalize_path = path.normalize_path local util = require("lib.yang.util") -local cltable = require("lib.cltable") +local list = require("lib.yang.list") local normalize_id = data.normalize_id +local lib = require("core.lib") -local function table_keys(t) - local ret = {} - for k, v in pairs(t) do table.insert(ret, k) end - return ret -end - -function prepare_array_lookup(query) - if not lib.equal(table_keys(query), {"position()"}) then - error("Arrays can only be indexed by position.") - end - local idx = tonumber(query["position()"]) - if idx < 1 or idx ~= math.floor(idx) then - error("Arrays can only be indexed by positive integers.") +local function compute_struct_getter(name, getter) + local id = normalize_id(name) + return function (data) + local struct = getter(data) + if struct[id] ~= nil then + return struct[id] + else + error("Container has no member '"..name.."'") + end end - return idx end -function prepare_table_lookup(keys, ctype, query) - local static_key = ctype and data.typeof(ctype)() or {} - for k,_ in pairs(query) do - if not keys[k] then error("'"..k.."' is not a table key") end - end - for k,grammar in pairs(keys) do - local v = query[k] or grammar.default - if v == nil then - error("Table query missing required key '"..k.."'") +local function compute_array_getter(idx, getter) + return function (data) + local array = getter(data) + if idx > #array then + error("Index "..idx.." is out of bounds") end - local key_primitive_type = grammar.argument_type.primitive_type - local parser = value.types[key_primitive_type].parse - static_key[normalize_id(k)] = parser(v, 'path query value') + return array[idx] end - return static_key end --- Returns a resolver for a particular schema and *lua* path. -function resolver(grammar, path_string) - local function ctable_getter(key, getter) - return function(data) - local data = getter(data):lookup_ptr(key) - if data == nil then error("Not found") end - return data.value - end - end - local function table_getter(key, getter) - return function(data) - local data = getter(data)[key] - if data == nil then error("Not found") end - return data - end - end - local function slow_table_getter(key, getter) - return function(data) - for k,v in pairs(getter(data)) do - if lib.equal(k, key) then return v end - end - error("Not found") - end - end - local function compute_table_getter(grammar, key, getter) - if grammar.native_key then - return table_getter(key[normalize_id(grammar.native_key)], getter) - elseif grammar.key_ctype and grammar.value_ctype then - return ctable_getter(key, getter) - elseif grammar.key_ctype then - return table_getter(key, getter) +local function compute_list_getter(key, getter) + return function (data) + local l = list.object(getter(data)) + local entry = l:find_entry(key) + if entry ~= nil then + return entry else - return slow_table_getter(key, getter) + error("List has no such entry") end end - local function handle_table_query(grammar, query, getter) - local key = prepare_table_lookup(grammar.keys, grammar.key_ctype, query) - local child_grammar = {type="struct", members=grammar.values, - ctype=grammar.value_ctype} - local child_getter = compute_table_getter(grammar, key, getter) - return child_getter, child_grammar - end - local function handle_array_query(grammar, query, getter) - local idx = prepare_array_lookup(query) - -- Pretend that array elements are scalars. - local child_grammar = {type="scalar", argument_type=grammar.element_type, - ctype=grammar.ctype} - local function child_getter(data) - local array = getter(data) - if idx > #array then error("Index out of bounds") end - return array[idx] - end - return child_getter, child_grammar +end + +local function compute_getter(grammar, part, getter) + if grammar.type == 'struct' or grammar.type == 'sequence' then + getter = compute_struct_getter(part.name, getter) + grammar = part.grammar + else + error("Invalid path: '"..part.name.."' is not a container") end - local function handle_query(grammar, query, getter) - if lib.equal(table_keys(query), {}) then return getter, grammar end + if part.key then if grammar.type == 'array' then - return handle_array_query(grammar, query, getter) - elseif grammar.type == 'table' then - return handle_table_query(grammar, query, getter) + getter = compute_array_getter(part.key, getter) + -- Pretend that array elements are scalars. + grammar = {type="scalar", argument_type=grammar.element_type, + ctype=grammar.ctype} + elseif grammar.type == 'list' then + getter = compute_list_getter(part.key, getter) + -- Pretend that list entries are structs. + grammar = {type="struct", members=grammar.values, + ctype=grammar.value_ctype} else - error("Path query parameters only supported for structs and tables.") + error("Invalid path: '"..name.."' can not be queried") end end - local function compute_getter(grammar, name, query, getter) - local child_grammar - child_grammar = grammar.members[name] - if not child_grammar then - for member_name, member in pairs(grammar.members) do - if child_grammar then break end - if member.type == 'choice' then - for case_name, case in pairs(member.choices) do - if child_grammar then break end - if case[name] then child_grammar = case[name] end - end - end - end - end - if not child_grammar then - error("Struct has no field named '"..name.."'.") - end - local id = normalize_id(name) - local function child_getter(data) - local struct = getter(data) - local child = struct[id] - if child == nil then - error("Struct instance has no field named '"..name.."'.") - end - return child - end - return handle_query(child_grammar, query, child_getter) - end - local getter, grammar = function(data) return data end, grammar - for _, elt in ipairs(parse_path(path_string)) do - -- All non-leaves of the path tree must be structs. - if grammar.type ~= 'struct' then error("Invalid path.") end - getter, grammar = compute_getter(grammar, elt.name, elt.query, getter) + return getter, grammar +end + +-- Returns a resolver for a particular schema and *lua* path. +function resolver(grammar, path) + path = parse_path(path, grammar) + local getter = function(data) return data end + for _, part in ipairs(path) do + getter, grammar = compute_getter(grammar, part, getter) end return getter, grammar end + resolver = util.memoize(resolver) +local function grammar_for_schema(schema, path, is_config) + local grammar = data.data_grammar_from_schema(schema, is_config ~= false) + local path = parse_path(path or '/', grammar) + if #path > 0 then + return path[#path].grammar + else + return grammar + end +end + +function grammar_for_schema_by_name(schema_name, path, is_config) + local schema = schema.load_schema_by_name(schema_name) + return grammar_for_schema(schema, path, is_config) +end + +grammar_for_schema_by_name = util.memoize(grammar_for_schema_by_name) + local function printer_for_grammar(grammar, path, format, print_default) local getter, subgrammar = resolver(grammar, path) local printer @@ -195,85 +151,61 @@ function parser_for_schema_by_name(schema_name, path) end parser_for_schema_by_name = util.memoize(parser_for_schema_by_name) -local function parsed_path_to_string (path) - local ret = {} - for _,v in ipairs(path) do - local query = {} - for k,v in pairs(v.query or {}) do - table.insert(query, '['..k..'='..v..']') - end - query = table.concat(query, '') - table.insert(ret, v.name..query) - end - return '/'..table.concat(ret, '/') -end - local function setter_for_grammar(grammar, path) if path == "/" then return function(config, subconfig) return subconfig end end - local head = parse_path(path) + local head = parse_path(path, grammar) local tail = table.remove(head) - local tail_name, query = tail.name, tail.query - head = parsed_path_to_string(head) - if lib.equal(query, {}) then - -- No query; the simple case. + local tail_name, tail_key = tail.name, tail.key + local target = head[#head] + local target_name, target_key = target.name, target.key + if tail_key then + -- The path ends in a query; it must denote an array or + -- list item. + table.insert(head, {name=tail_name, query={}}) local getter, grammar = resolver(grammar, head) - assert(grammar.type == 'struct') - local tail_id = data.normalize_id(tail_name) - return function(config, subconfig) - getter(config)[tail_id] = subconfig - return config - end - end - - -- Otherwise the path ends in a query; it must denote an array or - -- table item. - local getter, grammar = resolver(grammar, head..'/'..tail_name) - if grammar.type == 'array' then - local idx = prepare_array_lookup(query) - return function(config, subconfig) - local array = getter(config) - assert(idx <= #array) - array[idx] = subconfig - return config - end - elseif grammar.type == 'table' then - local key = prepare_table_lookup(grammar.keys, grammar.key_ctype, query) - if grammar.native_key then - key = key[data.normalize_id(grammar.native_key)] - return function(config, subconfig) - local tab = getter(config) - assert(tab[key] ~= nil) - tab[key] = subconfig - return config - end - elseif grammar.key_ctype and grammar.value_ctype then + if grammar.type == 'array' then + local idx = tail_key return function(config, subconfig) - getter(config):update(key, subconfig) + local array = getter(config) + array[idx] = subconfig return config end - elseif grammar.key_ctype then - return function(config, subconfig) - local tab = getter(config) - assert(tab[key] ~= nil) - tab[key] = subconfig + elseif grammar.type == 'list' then + return function (config, subconfig) + local l = list.object(getter(config)) + l:add_or_update_entry(tail_key, subconfig) return config end else - return function(config, subconfig) - local tab = getter(config) - for k,v in pairs(tab) do - if lib.equal(k, key) then - tab[k] = subconfig - return config - end - end - error("Not found") - end + error("Invalid path: '"..tail_name.."' can not be queried") + end + elseif target_key then + -- The path updates an entry in a collection; it must denote + -- a list item. + head[#head] = {name=target_name, query={}} + local getter, grammar = resolver(grammar, head) + local tail_id = data.normalize_id(tail_name) + assert(grammar.type == 'list') + return function (config, subconfig) + local l = list.object(getter(config)) + local entry = l:find_entry(target_key) + entry[tail_id] = subconfig + l:add_or_update_entry(entry) + return config end else - error('Query parameters only allowed on arrays and tables') + -- No query; the simple case. + local getter, grammar = resolver(grammar, head) + if grammar.type ~= 'struct' then + error("Invalid path: missing query for '"..tail.name.."'") + end + local tail_id = data.normalize_id(tail_name) + return function(config, subconfig) + getter(config)[tail_id] = subconfig + return config + end end end @@ -288,86 +220,32 @@ end setter_for_schema_by_name = util.memoize(setter_for_schema_by_name) local function adder_for_grammar(grammar, path) - local top_grammar = grammar local getter, grammar = resolver(grammar, path) if grammar.type == 'array' then - if grammar.ctype then - -- It's an FFI array; have to create a fresh one, sadly. - local setter = setter_for_grammar(top_grammar, path) - local elt_t = data.typeof(grammar.ctype) - local array_t = ffi.typeof('$[?]', elt_t) - return function(config, subconfig) - local cur = getter(config) - local new = array_t(#cur + #subconfig) - local i = 1 - for _,elt in ipairs(cur) do new[i-1] = elt; i = i + 1 end - for _,elt in ipairs(subconfig) do new[i-1] = elt; i = i + 1 end - return setter(config, util.ffi_array(new, elt_t)) - end - end - -- Otherwise we can add entries in place. return function(config, subconfig) local cur = getter(config) - for _,elt in ipairs(subconfig) do table.insert(cur, elt) end + for _,elt in ipairs(subconfig) do + cur[#cur+1] = elt + end return config end - elseif grammar.type == 'table' then + elseif grammar.type == 'list' then -- Invariant: either all entries in the new subconfig are added, -- or none are. - if grammar.native_key - or (grammar.key_ctype and not grammar.value_ctype) then - -- cltable or string-keyed table. - local pairs = grammar.key_ctype and cltable.pairs or pairs - return function(config, subconfig) - local tab = getter(config) - for k,_ in pairs(subconfig) do - if tab[k] ~= nil then error('already-existing entry') end - end - for k,v in pairs(subconfig) do tab[k] = v end - return config - end - elseif grammar.key_ctype and grammar.value_ctype then - -- ctable. - return function(config, subconfig) - local ctab = getter(config) - for entry in subconfig:iterate() do - if ctab:lookup_ptr(entry.key) ~= nil then - error('already-existing entry') - end - end - for entry in subconfig:iterate() do - ctab:add(entry.key, entry.value) - end - return config - end - elseif grammar.native_key or grammar.key_ctype then - -- cltable or native-keyed table. - local pairs = grammar.native_key and pairs or cltable.pairs - return function(config, subconfig) - local tab = getter(config) - for k,_ in pairs(subconfig) do - if tab[k] ~= nil then error('already-existing entry') end + return function(config, subconfig) + local l = list.object(getter(config)) + for i, entry in ipairs(subconfig) do + if l:find_entry(entry) then + error("Can not add already-existing list entry #"..i) end - for k,v in pairs(subconfig) do tab[k] = v end - return config end - else - -- Sad quadratic loop. - return function(config, subconfig) - local tab = getter(config) - for key,val in pairs(tab) do - for k,_ in pairs(subconfig) do - if lib.equal(key, k) then - error('already-existing entry', key) - end - end - end - for k,v in pairs(subconfig) do tab[k] = v end - return config + for _, entry in ipairs(subconfig) do + l:add_entry(entry) end + return config end else - error('Add only allowed on arrays and tables') + error("Invalid path: '"..tail_name.."' is not a list or a leaf-list") end end @@ -382,74 +260,32 @@ end adder_for_schema_by_name = util.memoize(adder_for_schema_by_name) local function remover_for_grammar(grammar, path) - local top_grammar = grammar - local head = parse_path(path) + local head = parse_path(path, grammar) local tail = table.remove(head) - local tail_name, query = tail.name, tail.query - head = parsed_path_to_string(head) - local head_and_tail_name = head..'/'..tail_name - local getter, grammar = resolver(grammar, head_and_tail_name) + if not tail.key then error("Invalid path: missing query") end + local tail_name, key = tail.name, tail.key + table.insert(head, {name=tail_name, query={}}) + local getter, grammar = resolver(grammar, head) if grammar.type == 'array' then - if grammar.ctype then - -- It's an FFI array; have to create a fresh one, sadly. - local idx = prepare_array_lookup(query) - local setter = setter_for_grammar(top_grammar, head_and_tail_name) - local elt_t = data.typeof(grammar.ctype) - local array_t = ffi.typeof('$[?]', elt_t) - return function(config) - local cur = getter(config) - assert(idx <= #cur) - local new = array_t(#cur - 1) - for i,elt in ipairs(cur) do - if i < idx then new[i-1] = elt end - if i > idx then new[i-2] = elt end - end - return setter(config, util.ffi_array(new, elt_t)) - end - end - -- Otherwise we can remove the entry in place. + local idx = key return function(config) local cur = getter(config) - assert(i <= #cur) - table.remove(cur, i) + if idx > #cur then + error("Leaf-list '"..tail_name"' has no element #"..idx) + end + cur[idx] = nil return config end - elseif grammar.type == 'table' then - local key = prepare_table_lookup(grammar.keys, grammar.key_ctype, query) - if grammar.native_key then - key = key[data.normalize_id(grammar.native_key)] - return function(config) - local tab = getter(config) - assert(tab[key] ~= nil) - tab[key] = nil - return config - end - elseif grammar.key_ctype and grammar.value_ctype then - return function(config) - getter(config):remove(key) - return config - end - elseif grammar.key_ctype then - return function(config) - local tab = getter(config) - assert(tab[key] ~= nil) - tab[key] = nil - return config - end - else - return function(config) - local tab = getter(config) - for k,v in pairs(tab) do - if lib.equal(k, key) then - tab[k] = nil - return config - end - end - error("Not found") + elseif grammar.type == 'list' then + return function(config) + local l = list.object(getter(config)) + if not l:remove_entry(key) then + error("List '"..tail_name"' has no entry matching the query") end + return config end else - error('Remove only allowed on arrays and tables') + error("Invalid path: '"..tail_name.."' is not a list or a leaf-list") end end @@ -463,105 +299,6 @@ function remover_for_schema_by_name (schema_name, path) end remover_for_schema_by_name = util.memoize(remover_for_schema_by_name) -function leafref_checker_from_grammar(grammar) - -- Converts a relative path to an absolute path. - -- TODO: Consider moving it to /lib/yang/path.lua. - local function to_absolute_path (path, node_path) - path = path:gsub("current%(%)", node_path) - if path:sub(1, 1) == '/' then return path end - if path:sub(1, 2) == './' then - path = path:sub(3) - return node_path..'/'..path - end - while path:sub(1, 3) == '../' do - path = path:sub(4) - node_path = lib.dirname(node_path) - end - return node_path..'/'..path - end - local function leafref (node) - return node.argument_type and node.argument_type.leafref - end - -- Leafref nodes iterator. Returns node as value and full data path as key. - local function visit_leafref_paths (root) - local function visit (path, node) - if node.type == 'struct' then - for k,v in pairs(node.members) do visit(path..'/'..k, v) end - elseif node.type == 'array' then - -- Pass. - elseif node.type == 'scalar' then - if leafref(node) then - coroutine.yield(path, node) - else - -- Pass. - end - elseif node.type == 'table' then - for k,v in pairs(node.keys) do visit(path..'/'..k, v) end - for k,v in pairs(node.values) do visit(path..'/'..k, v) end - elseif node.type == 'choice' then - for _,choice in pairs(node.choices) do - for k,v in pairs(choice) do visit(path..'/'..k, v) end - end - else - error('unexpected kind', node.kind) - end - end - return coroutine.wrap(function() visit('', root) end), true - end - -- Fetch value of path in data tree. - local function resolve (data, path) - local ret = data - for k in path:gmatch("[^/]+") do ret = ret[k] end - return ret - end - -- If not present, should be true. - local function require_instance (node) - if node.argument_type.require_instances == nil then return true end - return node.argument_type.require_instances - end - local leafrefs = {} - for path, node in visit_leafref_paths(grammar) do - if require_instance(node) then - local leafref = to_absolute_path(leafref(node), path) - local success, getter = pcall(resolver, grammar, lib.dirname(leafref)) - if success then - table.insert(leafrefs, {path=path, leafref=leafref, getter=getter}) - end - end - end - if #leafrefs == 0 then return function(data) end end - return function (data) - for _,v in ipairs(leafrefs) do - local path, leafref, getter = v.path, v.leafref, v.getter - local results = assert(getter(data), - 'Wrong XPath expression: '..leafref) - local val = resolve(data, path) - assert(type(results) == 'table' and results[val], - ("Broken leafref integrity in '%s' when referencing '%s'"):format( - path, leafref)) - end - end -end - -local function pairs_from_grammar(grammar) - if grammar.native_key then - return pairs - elseif grammar.key_ctype and grammar.value_ctype then - return function (ctable) - local ctable_next, ctable_max, ctable_entry = ctable:iterate() - return function() - ctable_entry = ctable_next(ctable_max, ctable_entry) - if not ctable_entry then return end - return ctable_entry.key, ctable_entry.value - end - end - elseif grammar.key_ctype then - return cltable.pairs - else - return pairs - end -end - local function expanded_pairs(values) -- Return an iterator for each non-choice pair in values and each pair of -- all choice bodies recursively. @@ -581,116 +318,203 @@ local function expanded_pairs(values) return pairs(expanded) end -function uniqueness_checker_from_grammar(grammar) - -- Generate checker for table - local function unique_assertion(leaves, grammar) - local unique_leaves = {} - for leaf in leaves:split(" +") do - table.insert(unique_leaves, normalize_id(leaf)) - end - local pairs = pairs_from_grammar(grammar) - return function (tab) - -- Sad quadratic loop, again - for k1, v1 in pairs(tab) do - for k2, v2 in pairs(tab) do - if k1 == k2 then break end - local collision = true - for _, leaf in ipairs(unique_leaves) do - if not lib.equal(v1[leaf], v2[leaf]) then - collision = false - break - end +function checker_from_grammar(grammar, checker) + local function path_add(path, name) + local p = {} + function p.unparse() return unparse_path(p, grammar) end + for i, part in ipairs(path) do p[i] = part end + p[#p+1] = {name=name, query={}} + return p + end + local function visitor(node, path) + local check = checker(node, path, grammar) + if node.type == 'scalar' then + return check + elseif node.type == 'struct' then + local visits = {} + for name, member in expanded_pairs(node.members) do + local id = normalize_id(name) + visits[id] = visitor(member, path_add(path, name)) + end + for _ in pairs(visits) do + return function (data, root) + root = root or data + if check then check(data, root) end + for id, visit in pairs(visits) do + if data[id] then visit(data[id], root) end end - assert(not collision, "Not unique: "..leaves) end end - end - end - -- Visit tables with unique constraints in grammar and apply checker - local function visit_unique_and_check(grammar, data) - if not data then return - elseif grammar.type == 'table' then - local pairs = pairs_from_grammar(grammar) - -- visit values - for name, value in expanded_pairs(grammar.values) do - for k, datum in pairs(data) do - visit_unique_and_check(value, datum[normalize_id(name)]) + return check + elseif node.type == 'array' then + -- Pretend that array elements are scalars. + local pseudo_node = {type="scalar", argument_type=node.element_type, + ctype=node.ctype} + local check_elt = checker(pseudo_node, path, grammar) + if check_elt then + return function (data, root) + root = root or data + if check then check(data, root) end + for idx, elt in ipairs(data) do + path[#path].key = idx + check_elt(elt, root) + end + path[#path].key = nil end end - -- check unique rescrictions - for _, leaves in ipairs(grammar.unique) do - unique_assertion(leaves, grammar)(data) + return check + elseif node.type == 'list' then + local checks_and_visits = {} + for name, member in pairs(node.keys) do + local id = normalize_id(name) + checks_and_visits[id] = + checker(member, path_add(path, name), grammar) + end + for name, member in expanded_pairs(node.values) do + local id = normalize_id(name) + checks_and_visits[id] = + visitor(member, path_add(path, name)) end - elseif grammar.type == 'struct' then - -- visit members - for name, member in expanded_pairs(grammar.members) do - visit_unique_and_check(member, data[normalize_id(name)]) + for _ in pairs(checks_and_visits) do + return function (data, root) + root = root or data + if check then check(data, root) end + for _, entry in ipairs(data) do + path[#path].key = entry + for id, visit in pairs(checks_and_visits) do + if entry[id] then visit(entry[id], root) end + end + end + path[#path].key = nil + end end + return check + else + error("BUG: unhandled node type: "..node.type) end end - return function (data) - visit_unique_and_check(grammar, data) - end + return visitor(grammar, {}) end -function minmax_elements_checker_from_grammar(grammar) - -- Generate checker for table (list, leaf-list) - local function minmax_assertion(grammar, name) - name = name or "" - if not (grammar.min_elements or grammar.max_elements) then - return function () end - end - local pairs = pairs_from_grammar(grammar) - return function (tab) - local n = 0 - for k1, v1 in pairs(tab) do - n = n + 1 - end - if grammar.min_elements then - assert(n >= grammar.min_elements, - name..": requires at least ".. - grammar.min_elements.." element(s)") +local function consistency_error(path, msg, ...) + if path.unparse then path = path.unparse() end + error(("Consistency error in '%s': %s") + :format(normalize_path(path), msg:format(...))) +end + +local function leafref_checker(node, path, grammar) + if node.type ~= 'scalar' then return end + if not (node.argument_type and node.argument_type.leafref) then return end + local ok, leafref = pcall(parse_path, node.argument_type.leafref) + if not ok then + consistency_error(path, + "invalid leafref '%s' (%s)", + node.argument_type.leafref, leafref) + end + for _, part in ipairs(leafref) do + -- NYI: queries in leafrefs are currently ignored. + part.query = {} + end + local ok, err = pcall(parse_relative_path, leafref, path, grammar) + if not ok then + consistency_error(path, + "invalid leafref '%s' (%s)", + node.argument_type.leafref, err) + end + if node.require_instances ~= false then + -- We only support one simple case: + -- leafrefs that are keys into lists with a single key. + local leaf = table.remove(leafref) + local list = leafref[#leafref] + if not (list and list.grammar.type == 'list') then return end + if not list.grammar.list.has_key then return end + for k in pairs(list.grammar.keys) do + if k ~= leaf.name then return end + end + return function (data, root) + local ok, err = pcall(function () + list.query = {[leaf.name]=assert(data, "missing leafref value")} + local p = parse_relative_path(leafref, unparse_path(path, grammar)) + return resolver(grammar, p)(root) + end) + if not ok then + consistency_error(path, + "broken leafref integrity for '%s' (%s)", + normalize_path(leafref), err) end - if grammar.max_elements then - assert(n <= grammar.max_elements, - name..": must not have more than ".. - grammar.max_elements.." element(s)") + end + end +end + +local function uniqueness_checker(node, path) + local function collision_checker(unique) + local leaves = {} + for leaf in unique:split(" +") do + table.insert(leaves, normalize_id(leaf)) + end + return function (x, y) + local collision = true + for _, leaf in ipairs(leaves) do + if not lib.equal(x[leaf], y[leaf]) then + collision = false + break + end end + return collision end end - -- Visit tables with unique constraints in grammar and apply checker - local function visit_minmax_and_check(grammar, data, name) - if not data then return - elseif grammar.type == 'array' then - -- check min/max elements restrictions - minmax_assertion(grammar, name)(data) - elseif grammar.type == 'table' then - -- visit values - local pairs = pairs_from_grammar(grammar) - for name, value in expanded_pairs(grammar.values) do - for k, datum in pairs(data) do - visit_minmax_and_check(value, datum[normalize_id(name)], name) + local function has_collision(list, collision) + -- Sad quadratic loop + for i, x in ipairs(list) do + for j, y in ipairs(list) do + if i == j then break end + if collision(x, y) then + return true end end - -- check min/max elements restrictions - minmax_assertion(grammar, name)(data) - elseif grammar.type == 'struct' then - -- visit members - for name, member in expanded_pairs(grammar.members) do - visit_minmax_and_check(member, data[normalize_id(name)], name) + end + end + if node.type ~= 'list' then return end + if not node.unique or #node.unique == 0 then return end + local invariants = {} + for _, unique in ipairs(node.unique) do + invariants[unique] = collision_checker(unique) + end + return function (data) + for unique, collision in pairs(invariants) do + if has_collision(data, collision) then + consistency_error(path, "not unique (%s)", unique) end end end +end + +local function minmax_checker(node, path) + if not (node.type == 'array' or node.type == 'list') then return end + if not (node.min_elements or node.max_elements) then return end return function (data) - visit_minmax_and_check(grammar, data) + local n = #data + if node.min_elements and n < node.min_elements then + consistency_error(path, + "requires at least %d element(s)", node.min_elements) + end + if node.max_elements and n > node.max_elements then + consistency_error(path, + "must not have more than %d element(s)", node.max_elements) + end end end function consistency_checker_from_grammar(grammar) + local checks = { + checker_from_grammar(grammar, leafref_checker), + checker_from_grammar(grammar, uniqueness_checker), + checker_from_grammar(grammar, minmax_checker) + } return function (data) - leafref_checker_from_grammar(grammar)(data) - uniqueness_checker_from_grammar(grammar)(data) - minmax_elements_checker_from_grammar(grammar)(data) + for _, check in pairs(checks) do + check(data) + end end end @@ -850,13 +674,29 @@ function selftest() local checker = consistency_checker_from_schema(my_schema, true) checker(loaded_data) + local invalid_data = data.load_config_for_schema(my_schema, mem.open_input_string([[ + test { + interface { + name "eth1"; + admin-status true; + address { + ip 192.168.0.1; + } + } + mgmt "eth0"; + } + ]])) + local ok, err = pcall(checker, invalid_data) + assert(not ok) + print(err) + local checker = consistency_checker_from_schema_by_name('ietf-alarms', false) assert(checker) local scm = schema.load_schema_by_name('snabb-softwire-v3') local grammar = data.config_grammar_from_schema(scm) setter_for_grammar(grammar, "/softwire-config/instance[device=test]/".. - "queue[id=0]/external-interface/ip 208.118.235.148") + "queue[id=0]/external-interface/ip") remover_for_grammar(grammar, "/softwire-config/instance[device=test]/") -- Test unique restrictions: @@ -890,6 +730,7 @@ function selftest() } ]])) assert(not success) + print(result) -- Test unique validation (should succeed) checker(data.load_config_for_schema(unique_schema, @@ -1089,5 +930,56 @@ function selftest() } ]])) + -- Test restrictions embedded in list entries: + local nested_schema = schema.load_schema([[module nested-schema { + namespace "urn:ietf:params:xml:ns:yang:nested-schema"; + prefix "test"; + + list entry { + key name; + leaf name { type string; } + leaf-list ll { type string; min-elements 1; } + } + + list ref { + key name; + leaf name { type string; } + leaf entry { + type leafref { + path "../../entry/name"; + } + } + } + }]]) + local checker = consistency_checker_from_schema(nested_schema, true) + + -- Test validation (should succeed) + checker(data.load_config_for_schema(nested_schema, + mem.open_input_string [[ + entry { name foo; ll "a"; } + ref { name bar; entry foo; } + ]])) + + -- Test minmax inconsistency in list entry (should fail) + local ok, err = pcall(checker, + data.load_config_for_schema(nested_schema, + mem.open_input_string [[ + entry { name foo; } + ref { name bar; entry foo; } + ]])) + assert(not ok) + print(err) + + -- Test leafref inconsistency in list entry (should fail) + local ok, err = pcall(checker, + data.load_config_for_schema(nested_schema, + mem.open_input_string [[ + entry { name foo; ll "a"; } + ref { name bar; entry foo1; } + ]])) + assert(not ok) + print(err) + + print("selftest: ok") end diff --git a/src/lib/yang/schema.lua b/src/lib/yang/schema.lua index c3b121cb65..3552da9dc7 100644 --- a/src/lib/yang/schema.lua +++ b/src/lib/yang/schema.lua @@ -619,9 +619,22 @@ local function inherit_config(schema) return visit(schema, true) end +local config_leader_rpc_output_grammar +local function get_describe_capability_grammar() + local data, path_data = require("lib.yang.data"), require("lib.yang.path_data") + if not config_leader_rpc_output_grammar then + local schema = load_schema_by_name('snabb-config-leader-v1') + config_leader_rpc_output_grammar = data.rpc_output_grammar_from_schema(schema) + end + local _, capability_grammar = path_data.resolver( + config_leader_rpc_output_grammar, '/describe/capability' + ) + return capability_grammar +end + local default_features = {} function get_default_capabilities() - local ret = {} + local ret = get_describe_capability_grammar().list.new() for mod,features in pairs(default_features) do local feature_names = {} for feature,_ in pairs(features) do diff --git a/src/lib/yang/snabb-snabbflow-v1.yang b/src/lib/yang/snabb-snabbflow-v1.yang index 6262b22ff8..2465475409 100644 --- a/src/lib/yang/snabb-snabbflow-v1.yang +++ b/src/lib/yang/snabb-snabbflow-v1.yang @@ -25,6 +25,7 @@ module snabb-snabbflow-v1 { list interface { key device; + unique "name vlan-tag"; description "Interaces serving as IPFIX Observation Points."; @@ -106,7 +107,10 @@ module snabb-snabbflow-v1 { "Sofware RSS configuration for the selected exporter."; leaf name { - type string-name; + type leafref { + path "/snabbflow-config/ipfix/exporter/name"; + require-instance true; + } description "The 'name' of an exporter defined in /snabbflow-config/ipfix/exporter."; @@ -168,7 +172,6 @@ module snabb-snabbflow-v1 { list class { key exporter; - unique order; description "Traffic classes match packets making up the sets of flows processed by indivdual exporters. @@ -182,20 +185,17 @@ module snabb-snabbflow-v1 { matches multiple classes, it is duplicated for each class and thereby flows can be processed by multiple exporters."; - ordered-by user; // XXX not yet implemented, hence the 'order' leaf + ordered-by user; leaf exporter { - type string-name; + type leafref { + path "/snabbflow-config/ipfix/exporter/name"; + require-instance true; + } description "An exporter defined in /snabbflow-config/ipfix/exporter. Packets matched by the class are processed by this exporter."; } - - leaf order { - type uint32 { range 1..max; } - description - "Match order of this class."; - } leaf filter { mandatory true; @@ -222,7 +222,10 @@ module snabb-snabbflow-v1 { any other traffic class."; leaf exporter { - type string-name; + type leafref { + path "/snabbflow-config/ipfix/exporter/name"; + require-instance true; + } description "An exporter defined in /snabbflow-config/ipfix/exporter. Flows matched by the default class are processed by this @@ -347,7 +350,7 @@ module snabb-snabbflow-v1 { } leaf mtu { - type uint32; + type uint32 { range 512..9000; } default 1500; description "MTU for exported UDP packets."; @@ -397,7 +400,7 @@ module snabb-snabbflow-v1 { type string-name; description "Identifier used to refer to the collector pool in - /snabbflow-config/ipic/exporter."; + /snabbflow-config/ipfix/exporter/collector-pool."; } list collector { @@ -443,7 +446,7 @@ module snabb-snabbflow-v1 { type string-name; description "The name used to refer to this exporter in - /snabbflow-config/rss/software-scaling."; + /snabbflow-config/rss/software-scaling/exporter/name."; } leaf-list template { @@ -454,7 +457,11 @@ module snabb-snabbflow-v1 { } leaf collector-pool { - type string; + type leafref { + path "/snabbflow-config/ipfix/collector-pool/name"; + require-instance true; + } + mandatory true; description "A collector pool defined in /snabbflow-config/ipfix/collector-pool."; } @@ -507,7 +514,9 @@ module snabb-snabbflow-v1 { "Exporter statistics."; leaf name { - type string-name; + type leafref { + path "/snabbflow-config/ipfix/exporter/name"; + } description "An exporter defined in /snabbflow-config/ipfix/exporter."; } @@ -562,7 +571,9 @@ module snabb-snabbflow-v1 { /snabbflow-config/rss/software-scaling."; leaf name { - type string-name; + type leafref { + path "/snabbflow-config/ipfix/exporter/name"; + } description "An exporter defined in /snabbflow-config/ipfix/exporter."; } @@ -596,7 +607,7 @@ module snabb-snabbflow-v1 { grouping interface-state { leaf device { - type string; + type pci-address; description "PCI address of the network device."; } diff --git a/src/lib/yang/snabb-softwire-v1.yang b/src/lib/yang/snabb-softwire-v1.yang deleted file mode 100644 index 44ebb2d337..0000000000 --- a/src/lib/yang/snabb-softwire-v1.yang +++ /dev/null @@ -1,750 +0,0 @@ -module snabb-softwire-v1 { - namespace snabb:lwaftr; - prefix softwire; - - import ietf-inet-types { prefix inet; } - import ietf-yang-types { prefix yang; } - - organization "Igalia, S.L."; - contact "Jessica Tallon "; - description - "Configuration for the Snabb Switch lwAFTR."; - - revision 2016-11-04 { - description - "Initial revision."; - } - - container softwire-config { - description - "Configuration for Snabb lwaftr."; - - grouping traffic-filters { - description - "Ingress and egress filters describing the set of packets - that should be allowed to pass, as pflang filters. pflang - is the language of tcpdump, libpcap and other tools. Note - that if VLAN tagging is enabled, the filters run on packets - after VLAN tags have been stripped off."; - leaf ingress-filter { - type string; - description - "Filter for incoming traffic. Packets that do not match - the filter will be silently dropped."; - } - leaf egress-filter { - type string; - description - "Filter for outgoing traffic. Packets that do not match - the filter will be silently dropped."; - } - } - - grouping icmp-policy { - description - "The lwAFTR can be configured to allow or drop incoming ICMP - messages, and to generate outgoing ICMP error messages or - not."; - - leaf allow-incoming-icmp { - type boolean; - default true; - description - "Whether to allow incoming ICMP packets."; - } - - leaf generate-icmp-errors { - type boolean; - default true; - description - "Whether to generate outgoing ICMP error messages."; - } - } - - grouping vlan-tagging { - description - "802.1Q Ethernet tagging."; - - leaf vlan-tag { - type uint16 { - range 0..4095; - } - description - "802.1Q Ethernet VLAN tag for this interface."; - } - } - - grouping error-rate-limiting { - description - "These settings limit the rate of ICMP error message - transmission."; - - container error-rate-limiting { - leaf packets { - type uint32; - description - "The number of ICMP error messages which can be sent within - the specified time period."; - } - - leaf period { - type uint32 { range 1..max; } - default 2; - description - "The time period given in seconds."; - } - } - } - - grouping reassembly { - description - "These settings limit the resources devoted to reassembling - fragmented packets."; - - container reassembly { - leaf max-fragments-per-packet { - type uint32 { range 1..max; } - default 20; - description - "The maximum number of fragments per reassembled packet. - Attempts to reassemble a packet using more fragments than - this threshold will fail and the reassembly data will be - discarded."; - } - - leaf max-packets { - type uint32; - default 20000; - description - "The maximum number of concurrent reassembly attempts. If - this limit is reached, an additional reassembly will cause - random eviction of an ongoing reassembly. Note that this - setting directly affects memory usage; the memory buffer - allocated to reassembly is this maximum number of - reassemblies times 25 kilobytes each."; - } - } - } - - leaf name { - type string; - description - "Name of lwAFTR instance. This must be unique amongst the Snabb - processes on the system. This may be specified either here, in the - YANG configuration or via the command line when the lwAFTR is started. - - The order of presidence for this leaf is as followers: - 1. The name set on an already running lwAFTR instance via snabb set. - 2. A command line option to specify the name upon starting the lwAFTR - instance (i.e. overriding this value). - 3. The value here in the configuration when starting a lwaftr instance. - - If no name is specified the lwaftr can be referred to using the PID of - the lwAFTR process on the system."; - } - - container external-interface { - description - "Configuration for the external, internet-facing IPv4 - interface."; - - leaf ip { - type inet:ipv4-address; - mandatory true; - description - "L3 Address of the internet-facing network interface. Used - when generating error messages and responding to ICMP echo - requests."; - } - leaf mac { - type yang:mac-address; - mandatory true; - description - "MAC address of the internet-facing NIC."; - } - leaf mtu { - type uint16; - default 1460; - description - "Maximum packet size to send on the IPv4 interface."; - } - - uses traffic-filters; - uses icmp-policy; - uses vlan-tagging; - uses error-rate-limiting; - uses reassembly; - - container next-hop { - leaf ip { - type inet:ipv4-address; - description - "IPv4 address of the next hop for the internet-facing NIC. - The lwAFTR will resolve this to a MAC address using ARP."; - } - leaf mac { - type yang:mac-address; - description - "Statically configured MAC address of the next hop for the - internet-facing NIC."; - } - } - } - - container internal-interface { - description - "Configuration for the internal IPv6 interface."; - - leaf ip { - type inet:ipv6-address; - mandatory true; - description - "L3 Address of the internal-facing network interface. Used - when generating error messages and responding to ICMP echo - requests."; - } - leaf mac { - type yang:mac-address; - mandatory true; - description - "MAC address of the internal-facing NIC."; - } - leaf mtu { - type uint16; - default 1500; - description - "Maximum packet size to sent on the IPv6 interface."; - } - - uses traffic-filters; - uses icmp-policy; - uses vlan-tagging; - uses error-rate-limiting; - uses reassembly; - - container next-hop { - leaf ip { - type inet:ipv6-address; - description - "IPv6 address of the next hop for the internal-facing NIC. - The lwAFTR will resolve this to a MAC address using NDP."; - } - leaf mac { - type yang:mac-address; - description - "Statically configured MAC address of the next hop for the - internal-facing NIC."; - } - } - - leaf hairpinning { - type boolean; - default true; - description - "Indicates whether to support hairpinning of traffic between - two B4s."; - } - } - - container binding-table { - description - "A collection of softwires (tunnels), along with a description - of the IPv4 and IPv6 addresses handled by the lwAFTR."; - - list psid-map { - description - "The set of IPv4 addresses managed by the lwAFTR, along with - the way in which those IPv4 addresses share ports. A PSID map - entry associates a PSID length, shift, and - reserved-ports-bit-count with each IPv4 address served by - the lwAFTR. - - The lightweight 4-over-6 architecture supports sharing of - IPv4 addresses by partitioning the space of TCP/UDP/ICMP - ports into disjoint \"port sets\". Each softwire associated - with an IPv4 address corresponds to a different set of ports - on that address. The way that the ports are partitioned is - specified in RFC 7597: each address has an associated set - of parameters that specifies how to compute a \"port set - identifier\" (PSID) from a given port. - - 0 1 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 - +-----------+-----------+-------+ - Ports in | A | PSID | j | - the CE port set | > 0 | | | - +-----------+-----------+-------+ - | a bits | k bits |m bits | - - Figure 2: Structure of a Port-Restricted Port Field - - Source: http://tools.ietf.org/html/rfc7597#section-5.1 - - We find the specification's names to be a bit obtuse, so we - refer to them using the following names: - - a bits = reserved-ports-bit-count. - k bits = psid-length. - m bits = shift."; - - key addr; - - leaf addr { - type inet:ipv4-address; - description - "Public IPv4 address managed by the lwAFTR."; - } - - leaf end-addr { - type inet:ipv4-address; - description - "If present, this PSID map entry applies to all addresses - between 'addr' and this address, inclusive."; - } - - leaf psid-length { - type uint8 { range 0..16; } - mandatory true; - description - "The number of bits devoted to the PSID in the port map. - If the psid-length is N, then the IPv4 address will be - shared 2^N ways. Note that psid-length, shift, and - reserved-ports-bit-count must add up to 16."; - } - - leaf shift { - type uint8 { range 0..16; } - description - "Given an incoming port, one can obtain the PSID by - shifting the port right by 'shift' bits and then masking - off the lowest 'psid-length' bits. Defaults to 16 - - psid-length. Note that psid-length, shift, and - reserved-ports-bit-count must add up to 16."; - } - - leaf reserved-ports-bit-count { - type uint8 { range 0..16; } - default 0; - description - "Reserve the lowest 2^N ports so that they map to no - softwire. This can be useful to prevent the low 1024 - ports (for example) from being mapped to customers. Note - that psid-length and shift must add up to less than or - equal to 16."; - } - } - - leaf-list br-address { - type inet:ipv6-address; - description - "B4-facing address of an lwAFTR."; - } - - list softwire { - key "ipv4 psid padding"; - - leaf ipv4 { - type inet:ipv4-address; - description - "Public IPv4 address of the softwire."; - } - - leaf psid { - type uint16; - description - "Port set ID."; - } - - leaf padding { - type uint16 { range 0..0; } - default 0; - description - "Reserved bytes."; - } - - leaf br { - type uint32; - default 1; - description - "The B4-facing address of the lwAFTR for this softwire, as - a one-based index into br-addresses."; - } - - leaf b4-ipv6 { - type inet:ipv6-address; - mandatory true; - description - "B4 address."; - } - } - } - } - - container softwire-state { - description "State data about lwaftr."; - config false; - - leaf drop-all-ipv4-iface-bytes { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv4 interfaces, - whether or not they actually IPv4 (they only include data about - packets that go in/out over the wires, excluding internally generated - ICMP packets)."; - } - leaf drop-all-ipv4-iface-packets { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv4 interfaces, - whether or not they actually IPv4 (they only include data about - packets that go in/out over the wires, excluding internally generated - ICMP packets)."; - } - leaf drop-all-ipv6-iface-bytes { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv6 interfaces, - whether or not they actually IPv6 (they only include data about packets - that go in/out over the wires, excluding internally generated ICMP - packets)."; - } - leaf drop-all-ipv6-iface-packets { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv6 interfaces, - whether or not they actually IPv6 (they only include data about packets - that go in/out over the wires, excluding internally generated ICMP - packets)."; - } - leaf drop-bad-checksum-icmpv4-bytes { - type yang:zero-based-counter64; - description "ICMPv4 packets dropped because of a bad checksum."; - } - leaf drop-bad-checksum-icmpv4-packets { - type yang:zero-based-counter64; - description "ICMPv4 packets dropped because of a bad checksum."; - } - leaf drop-in-by-policy-icmpv4-bytes { - type yang:zero-based-counter64; - description "Incoming ICMPv4 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv4-packets { - type yang:zero-based-counter64; - description "Incoming ICMPv4 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv6-bytes { - type yang:zero-based-counter64; - description "Incoming ICMPv6 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv6-packets { - type yang:zero-based-counter64; - description "Incoming ICMPv6 packets dropped because of current policy."; - } - leaf drop-in-by-rfc7596-icmpv4-bytes { - type yang:zero-based-counter64; - description - "Incoming ICMPv4 packets with no destination (RFC 7596 section 8.1)."; - } - leaf drop-in-by-rfc7596-icmpv4-packets { - type yang:zero-based-counter64; - description - "Incoming ICMPv4 packets with no destination (RFC 7596 section 8.1)."; - } - leaf drop-ipv4-frag-disabled { - type yang:zero-based-counter64; - description - "If fragmentation is disabled, the only potentially non-zero IPv4 - fragmentation counter is drop-ipv4-frag-disabled. If fragmentation is - enabled, it will always be zero."; - } - leaf drop-ipv4-frag-invalid-reassembly { - type yang:zero-based-counter64; - description - "Two or more IPv4 fragments were received, and reassembly was started, - but was invalid and dropped. Causes include multiple fragments claiming - they are the last fragment, overlapping fragment offsets, or the packet - was being reassembled from too many fragments (the setting is - max_fragments_per_reassembly_packet, and the default is that no packet - should be reassembled from more than 40)."; - } - leaf drop-ipv4-frag-random-evicted { - type yang:zero-based-counter64; - description - "Reassembling an IPv4 packet from fragments was in progress, but the - configured amount of packets to reassemble at once was exceeded, so one - was dropped at random. Consider increasing the setting - max_ipv4_reassembly_packets."; - } - leaf drop-ipv6-frag-disabled { - type yang:zero-based-counter64; - description - "If fragmentation is disabled, the only potentially non-zero IPv6 - fragmentation counter is drop-ipv6-frag-disabled. If fragmentation is - enabled, it will always be zero."; - } - leaf drop-ipv6-frag-invalid-reassembly { - type yang:zero-based-counter64; - description - "Two or more IPv6 fragments were received, and reassembly was started, - but was invalid and dropped. Causes include multiple fragments claiming - they are the last fragment, overlapping fragment offsets, or the packet - was being reassembled from too many fragments (the setting is - max_fragments_per_reassembly_packet, and the default is that no packet - should be reassembled from more than 40)."; - } - leaf drop-ipv6-frag-random-evicted { - type yang:zero-based-counter64; - description - "Reassembling an IPv6 packet from fragments was in progress, but the - configured amount of packets to reassemble at once was exceeded, so one - was dropped at random. Consider increasing the setting - max_ipv6_reassembly_packets."; - } - leaf drop-misplaced-not-ipv4-bytes { - type yang:zero-based-counter64; - description "Non-IPv4 packets incoming on the IPv4 link."; - } - leaf drop-misplaced-not-ipv4-packets { - type yang:zero-based-counter64; - description "Non-IPv4 packets incoming on the IPv4 link."; - } - leaf drop-misplaced-not-ipv6-bytes { - type yang:zero-based-counter64; - description "Non-IPv6 packets incoming on IPv6 link."; - } - leaf drop-misplaced-not-ipv6-packets { - type yang:zero-based-counter64; - description "Non-IPv6 packets incoming on IPv6 link."; - } - leaf drop-no-dest-softwire-ipv4-bytes { - type yang:zero-based-counter64; - description - "No matching destination softwire in the binding table; incremented - whether or not the reason was RFC7596."; - } - leaf drop-no-dest-softwire-ipv4-packets { - type yang:zero-based-counter64; - description - "No matching destination softwire in the binding table; incremented - whether or not the reason was RFC7596."; - } - leaf drop-no-source-softwire-ipv6-bytes { - type yang:zero-based-counter64; - description - "No matching source softwire in the binding table; incremented whether - or not the reason was RFC7596."; - } - leaf drop-no-source-softwire-ipv6-packets { - type yang:zero-based-counter64; - description - "No matching source softwire in the binding table; incremented whether - or not the reason was RFC7596."; - } - leaf drop-out-by-policy-icmpv4-packets { - type yang:zero-based-counter64; - description - "Internally generated ICMPv4 error packets dropped because of current - policy."; - } - leaf drop-out-by-policy-icmpv6-packets { - type yang:zero-based-counter64; - description - "Internally generated ICMPv6 packets dropped because of current - policy."; - } - leaf drop-over-mtu-but-dont-fragment-ipv4-bytes { - type yang:zero-based-counter64; - description - "IPv4 packets whose size exceeded the MTU, but the DF (Don't Fragment) - flag was set."; - } - leaf drop-over-mtu-but-dont-fragment-ipv4-packets { - type yang:zero-based-counter64; - description - "IPv4 packets whose size exceeded the MTU, but the DF (Don't Fragment) - flag was set."; - } - leaf drop-over-rate-limit-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packets dropped because the outgoing ICMPv6 rate limit was reached."; - } - leaf drop-over-rate-limit-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packets dropped because the outgoing ICMPv6 rate limit was reached."; - } - leaf drop-over-time-but-not-hop-limit-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packet's time limit was exceeded, but the hop limit was not."; - } - leaf drop-over-time-but-not-hop-limit-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packet's time limit was exceeded, but the hop limit was not."; - } - leaf drop-too-big-type-but-not-code-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packet's ICMP type was 'Packet too big' but its ICMP code was not an - acceptable one for this type."; - } - leaf drop-too-big-type-but-not-code-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packet's ICMP type was 'Packet too big' but its ICMP code was not an - acceptable one for this type."; - } - leaf drop-ttl-zero-ipv4-bytes { - type yang:zero-based-counter64; - description "IPv4 packets dropped because their TTL was zero."; - } - leaf drop-ttl-zero-ipv4-packets { - type yang:zero-based-counter64; - description "IPv4 packets dropped because their TTL was zero."; - } - leaf drop-unknown-protocol-icmpv6-bytes { - type yang:zero-based-counter64; - description "Packets with an unknown ICMPv6 protocol."; - } - leaf drop-unknown-protocol-icmpv6-packets { - type yang:zero-based-counter64; - description "Packets with an unknown ICMPv6 protocol."; - } - leaf drop-unknown-protocol-ipv6-bytes { - type yang:zero-based-counter64; - description "Packets with an unknown IPv6 protocol."; - } - leaf drop-unknown-protocol-ipv6-packets { - type yang:zero-based-counter64; - description "Packets with an unknown IPv6 protocol."; - } - leaf hairpin-ipv4-bytes { - type yang:zero-based-counter64; - description "IPv4 packets going to a known b4 (hairpinned)."; - } - leaf hairpin-ipv4-packets { - type yang:zero-based-counter64; - description "IPv4 packets going to a known b4 (hairpinned)."; - } - leaf in-ipv4-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv4-frag-needs-reassembly { - type yang:zero-based-counter64; - description "An IPv4 fragment was received."; - } - leaf in-ipv4-frag-reassembled { - type yang:zero-based-counter64; - description "A packet was successfully reassembled from IPv4 fragments."; - } - leaf in-ipv4-frag-reassembly-unneeded { - type yang:zero-based-counter64; - description - "An IPv4 packet which was not a fragment was received - consequently, - it did not need to be reassembled. This should be the usual case."; - } - leaf in-ipv4-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv6-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv6-frag-needs-reassembly { - type yang:zero-based-counter64; - description "An IPv6 fragment was received."; - } - leaf in-ipv6-frag-reassembled { - type yang:zero-based-counter64; - description "A packet was successfully reassembled from IPv6 fragments."; - } - leaf in-ipv6-frag-reassembly-unneeded { - type yang:zero-based-counter64; - description - "An IPv6 packet which was not a fragment was received - consequently, it - did not need to be reassembled. This should be the usual case."; - } - leaf in-ipv6-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf ingress-packet-drops { - type yang:zero-based-counter64; - description "Packets dropped due to ingress filters."; - } - leaf memuse-ipv4-frag-reassembly-buffer { - type yang:zero-based-counter64; - description - "The amount of memory being used by the statically sized data structure - for reassembling IPv4 fragments. This is directly proportional to the - setting max_ipv4_reassembly_packets."; - } - leaf memuse-ipv6-frag-reassembly-buffer { - type yang:zero-based-counter64; - description - "The amount of memory being used by the statically sized data structure - for reassembling IPv6 fragments. This is directly proportional to the - setting max_ipv6_reassembly_packets."; - } - leaf out-icmpv4-bytes { - type yang:zero-based-counter64; - description "Internally generated ICMPv4 packets."; - } - leaf out-icmpv4-packets { - type yang:zero-based-counter64; - description "Internally generated ICMPv4 packets."; - } - leaf out-icmpv6-bytes { - type yang:zero-based-counter64; - description "Internally generted ICMPv6 error packets."; - } - leaf out-icmpv6-packets { - type yang:zero-based-counter64; - description "Internally generted ICMPv6 error packets."; - } - leaf out-ipv4-bytes { - type yang:zero-based-counter64; - description "Valid outgoing IPv4 packets."; - } - leaf out-ipv4-frag { - type yang:zero-based-counter64; - description - "An outgoing packet exceeded the configured IPv4 MTU, so needed to be - fragmented. This may happen, but should be unusual."; - } - leaf out-ipv4-frag-not { - type yang:zero-based-counter64; - description - "An outgoing packet was small enough to pass through unfragmented - this - should be the usual case."; - } - leaf out-ipv4-packets { - type yang:zero-based-counter64; - description "Valid outgoing IPv4 packets."; - } - leaf out-ipv6-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv6 packets."; - } - leaf out-ipv6-frag { - type yang:zero-based-counter64; - description - "An outgoing packet exceeded the configured IPv6 MTU, so needed to be - fragmented. This may happen, but should be unusual."; - } - leaf out-ipv6-frag-not { - type yang:zero-based-counter64; - description - "An outgoing packet was small enough to pass through unfragmented - this - should be the usual case."; - } - leaf out-ipv6-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv6 packets."; - } - } -} diff --git a/src/lib/yang/snabb-softwire-v2.yang b/src/lib/yang/snabb-softwire-v2.yang deleted file mode 100644 index bd14a9e760..0000000000 --- a/src/lib/yang/snabb-softwire-v2.yang +++ /dev/null @@ -1,889 +0,0 @@ -module snabb-softwire-v2 { - yang-version 1.1; - namespace snabb:softwire-v2; - prefix softwire; - - import ietf-inet-types { prefix inet; } - import ietf-yang-types { prefix yang; } - - organization "Igalia, S.L."; - contact "Jessica Tallon "; - description - "Configuration for the Snabb Switch lwAFTR."; - - revision 2019-09-17 { - description - "Add discontinuity time to softwire-state."; - } - - revision 2018-10-13 { - description - "Add flow-label setting."; - } - - revision 2017-04-17 { - description - "Removal of br-address leaf-list and br leaf. It adds the - addition of br-address binding_table.softwire. This is to - make the schema more yang-like. One now only need to specify - the br-address on the softwire rather than managing the index's - to a leaf-list of them. - - This also removes the psid-map list and adds a new port-set - container on the softwire container instead. This will help - adding the softwires as well as bring it more inline with the - ietf-softwire schema. - - The addition of /softwire-config/instance allows for configuring - multiple instances of the lwAFTR with a shared binding table and - other common configuration properties."; - } - - revision 2016-11-04 { - description - "Initial revision."; - } - - grouping state-counters { - container softwire-state { - - description "State data about interface."; - config false; - - leaf discontinuity-time { - type yang:date-and-time; - mandatory true; - description - "The time of the most recent occasion on which the lwaftr instance - suffered a discontinuity. This is set to the current time whenever - the lwaftr instance is started or configured."; - } - - leaf drop-all-ipv4-iface-bytes { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv4 interfaces, - whether or not they actually IPv4 (they only include data about - packets that go in/out over the wires, excluding internally generated - ICMP packets)."; - } - leaf drop-all-ipv4-iface-packets { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv4 interfaces, - whether or not they actually IPv4 (they only include data about - packets that go in/out over the wires, excluding internally generated - ICMP packets)."; - } - leaf drop-all-ipv6-iface-bytes { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv6 interfaces, - whether or not they actually IPv6 (they only include data about packets - that go in/out over the wires, excluding internally generated ICMP - packets)."; - } - leaf drop-all-ipv6-iface-packets { - type yang:zero-based-counter64; - description - "All dropped packets and bytes that came in over IPv6 interfaces, - whether or not they actually IPv6 (they only include data about packets - that go in/out over the wires, excluding internally generated ICMP - packets)."; - } - leaf drop-bad-checksum-icmpv4-bytes { - type yang:zero-based-counter64; - description "ICMPv4 packets dropped because of a bad checksum."; - } - leaf drop-bad-checksum-icmpv4-packets { - type yang:zero-based-counter64; - description "ICMPv4 packets dropped because of a bad checksum."; - } - leaf drop-in-by-policy-icmpv4-bytes { - type yang:zero-based-counter64; - description "Incoming ICMPv4 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv4-packets { - type yang:zero-based-counter64; - description "Incoming ICMPv4 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv6-bytes { - type yang:zero-based-counter64; - description "Incoming ICMPv6 packets dropped because of current policy."; - } - leaf drop-in-by-policy-icmpv6-packets { - type yang:zero-based-counter64; - description "Incoming ICMPv6 packets dropped because of current policy."; - } - leaf drop-in-by-rfc7596-icmpv4-bytes { - type yang:zero-based-counter64; - description - "Incoming ICMPv4 packets with no destination (RFC 7596 section 8.1)."; - } - leaf drop-in-by-rfc7596-icmpv4-packets { - type yang:zero-based-counter64; - description - "Incoming ICMPv4 packets with no destination (RFC 7596 section 8.1)."; - } - leaf drop-ipv4-frag-disabled { - type yang:zero-based-counter64; - description - "If fragmentation is disabled, the only potentially non-zero IPv4 - fragmentation counter is drop-ipv4-frag-disabled. If fragmentation is - enabled, it will always be zero."; - } - leaf drop-ipv4-frag-invalid-reassembly { - type yang:zero-based-counter64; - description - "Two or more IPv4 fragments were received, and reassembly was started, - but was invalid and dropped. Causes include multiple fragments claiming - they are the last fragment, overlapping fragment offsets, or the packet - was being reassembled from too many fragments (the setting is - max_fragments_per_reassembly_packet, and the default is that no packet - should be reassembled from more than 40)."; - } - leaf drop-ipv4-frag-random-evicted { - type yang:zero-based-counter64; - description - "Reassembling an IPv4 packet from fragments was in progress, but the - configured amount of packets to reassemble at once was exceeded, so one - was dropped at random. Consider increasing the setting - max_ipv4_reassembly_packets."; - } - leaf drop-ipv6-frag-disabled { - type yang:zero-based-counter64; - description - "If fragmentation is disabled, the only potentially non-zero IPv6 - fragmentation counter is drop-ipv6-frag-disabled. If fragmentation is - enabled, it will always be zero."; - } - leaf drop-ipv6-frag-invalid-reassembly { - type yang:zero-based-counter64; - description - "Two or more IPv6 fragments were received, and reassembly was started, - but was invalid and dropped. Causes include multiple fragments claiming - they are the last fragment, overlapping fragment offsets, or the packet - was being reassembled from too many fragments (the setting is - max_fragments_per_reassembly_packet, and the default is that no packet - should be reassembled from more than 40)."; - } - leaf drop-ipv6-frag-random-evicted { - type yang:zero-based-counter64; - description - "Reassembling an IPv6 packet from fragments was in progress, but the - configured amount of packets to reassemble at once was exceeded, so one - was dropped at random. Consider increasing the setting - max_ipv6_reassembly_packets."; - } - leaf drop-misplaced-not-ipv4-bytes { - type yang:zero-based-counter64; - description "Non-IPv4 packets incoming on the IPv4 link."; - } - leaf drop-misplaced-not-ipv4-packets { - type yang:zero-based-counter64; - description "Non-IPv4 packets incoming on the IPv4 link."; - } - leaf drop-misplaced-not-ipv6-bytes { - type yang:zero-based-counter64; - description "Non-IPv6 packets incoming on IPv6 link."; - } - leaf drop-misplaced-not-ipv6-packets { - type yang:zero-based-counter64; - description "Non-IPv6 packets incoming on IPv6 link."; - } - leaf drop-no-dest-softwire-ipv4-bytes { - type yang:zero-based-counter64; - description - "No matching destination softwire in the binding table; incremented - whether or not the reason was RFC7596."; - } - leaf drop-no-dest-softwire-ipv4-packets { - type yang:zero-based-counter64; - description - "No matching destination softwire in the binding table; incremented - whether or not the reason was RFC7596."; - } - leaf drop-no-source-softwire-ipv6-bytes { - type yang:zero-based-counter64; - description - "No matching source softwire in the binding table; incremented whether - or not the reason was RFC7596."; - } - leaf drop-no-source-softwire-ipv6-packets { - type yang:zero-based-counter64; - description - "No matching source softwire in the binding table; incremented whether - or not the reason was RFC7596."; - } - leaf drop-out-by-policy-icmpv4-packets { - type yang:zero-based-counter64; - description - "Internally generated ICMPv4 error packets dropped because of current - policy."; - } - leaf drop-out-by-policy-icmpv6-packets { - type yang:zero-based-counter64; - description - "Internally generated ICMPv6 packets dropped because of current - policy."; - } - leaf drop-over-mtu-but-dont-fragment-ipv4-bytes { - type yang:zero-based-counter64; - description - "IPv4 packets whose size exceeded the MTU, but the DF (Don't Fragment) - flag was set."; - } - leaf drop-over-mtu-but-dont-fragment-ipv4-packets { - type yang:zero-based-counter64; - description - "IPv4 packets whose size exceeded the MTU, but the DF (Don't Fragment) - flag was set."; - } - leaf drop-over-rate-limit-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packets dropped because the outgoing ICMPv6 rate limit was reached."; - } - leaf drop-over-rate-limit-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packets dropped because the outgoing ICMPv6 rate limit was reached."; - } - leaf drop-over-time-but-not-hop-limit-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packet's time limit was exceeded, but the hop limit was not."; - } - leaf drop-over-time-but-not-hop-limit-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packet's time limit was exceeded, but the hop limit was not."; - } - leaf drop-too-big-type-but-not-code-icmpv6-bytes { - type yang:zero-based-counter64; - description - "Packet's ICMP type was 'Packet too big' but its ICMP code was not an - acceptable one for this type."; - } - leaf drop-too-big-type-but-not-code-icmpv6-packets { - type yang:zero-based-counter64; - description - "Packet's ICMP type was 'Packet too big' but its ICMP code was not an - acceptable one for this type."; - } - leaf drop-ttl-zero-ipv4-bytes { - type yang:zero-based-counter64; - description "IPv4 packets dropped because their TTL was zero."; - } - leaf drop-ttl-zero-ipv4-packets { - type yang:zero-based-counter64; - description "IPv4 packets dropped because their TTL was zero."; - } - leaf drop-unknown-protocol-icmpv6-bytes { - type yang:zero-based-counter64; - description "Packets with an unknown ICMPv6 protocol."; - } - leaf drop-unknown-protocol-icmpv6-packets { - type yang:zero-based-counter64; - description "Packets with an unknown ICMPv6 protocol."; - } - leaf drop-unknown-protocol-ipv6-bytes { - type yang:zero-based-counter64; - description "Packets with an unknown IPv6 protocol."; - } - leaf drop-unknown-protocol-ipv6-packets { - type yang:zero-based-counter64; - description "Packets with an unknown IPv6 protocol."; - } - leaf hairpin-ipv4-bytes { - type yang:zero-based-counter64; - description "IPv4 packets going to a known b4 (hairpinned)."; - } - leaf hairpin-ipv4-packets { - type yang:zero-based-counter64; - description "IPv4 packets going to a known b4 (hairpinned)."; - } - leaf in-ipv4-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv4-frag-needs-reassembly { - type yang:zero-based-counter64; - description "An IPv4 fragment was received."; - } - leaf in-ipv4-frag-reassembled { - type yang:zero-based-counter64; - description "A packet was successfully reassembled from IPv4 fragments."; - } - leaf in-ipv4-frag-reassembly-unneeded { - type yang:zero-based-counter64; - description - "An IPv4 packet which was not a fragment was received - consequently, - it did not need to be reassembled. This should be the usual case."; - } - leaf in-ipv4-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv6-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf in-ipv6-frag-needs-reassembly { - type yang:zero-based-counter64; - description "An IPv6 fragment was received."; - } - leaf in-ipv6-frag-reassembled { - type yang:zero-based-counter64; - description "A packet was successfully reassembled from IPv6 fragments."; - } - leaf in-ipv6-frag-reassembly-unneeded { - type yang:zero-based-counter64; - description - "An IPv6 packet which was not a fragment was received - consequently, it - did not need to be reassembled. This should be the usual case."; - } - leaf in-ipv6-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv4 packets."; - } - leaf ingress-packet-drops { - type yang:zero-based-counter64; - description "Packets dropped due to ingress filters."; - } - leaf memuse-ipv4-frag-reassembly-buffer { - type yang:zero-based-counter64; - description - "The amount of memory being used by the statically sized data structure - for reassembling IPv4 fragments. This is directly proportional to the - setting max_ipv4_reassembly_packets."; - } - leaf memuse-ipv6-frag-reassembly-buffer { - type yang:zero-based-counter64; - description - "The amount of memory being used by the statically sized data structure - for reassembling IPv6 fragments. This is directly proportional to the - setting max_ipv6_reassembly_packets."; - } - leaf out-icmpv4-bytes { - type yang:zero-based-counter64; - description "Internally generated ICMPv4 packets."; - } - leaf out-icmpv4-packets { - type yang:zero-based-counter64; - description "Internally generated ICMPv4 packets."; - } - leaf out-icmpv6-bytes { - type yang:zero-based-counter64; - description "Internally generted ICMPv6 error packets."; - } - leaf out-icmpv6-packets { - type yang:zero-based-counter64; - description "Internally generted ICMPv6 error packets."; - } - leaf out-ipv4-bytes { - type yang:zero-based-counter64; - description "Valid outgoing IPv4 packets."; - } - leaf out-ipv4-frag { - type yang:zero-based-counter64; - description - "An outgoing packet exceeded the configured IPv4 MTU, so needed to be - fragmented. This may happen, but should be unusual."; - } - leaf out-ipv4-frag-not { - type yang:zero-based-counter64; - description - "An outgoing packet was small enough to pass through unfragmented - this - should be the usual case."; - } - leaf out-ipv4-packets { - type yang:zero-based-counter64; - description "Valid outgoing IPv4 packets."; - } - leaf out-ipv6-bytes { - type yang:zero-based-counter64; - description "All valid outgoing IPv6 packets."; - } - leaf out-ipv6-frag { - type yang:zero-based-counter64; - description - "An outgoing packet exceeded the configured IPv6 MTU, so needed to be - fragmented. This may happen, but should be unusual."; - } - leaf out-ipv6-frag-not { - type yang:zero-based-counter64; - description - "An outgoing packet was small enough to pass through unfragmented - this - should be the usual case."; - } - leaf out-ipv6-packets { - type yang:zero-based-counter64; - description "All valid outgoing IPv6 packets."; - } - } - } - - container softwire-config { - description - "Configuration for Snabb lwaftr."; - - leaf name { - type string; - description - "Name of lwAFTR instance. This must be unique amongst the Snabb - processes on the system. This may be specified either here, in the - YANG configuration or via the command line when the lwAFTR is started. - - The order of presidence for this leaf is as followers: - 1. The name set on an already running lwAFTR instance via snabb set. - 2. A command line option to specify the name upon starting the lwAFTR - instance (i.e. overriding this value). - 3. The value here in the configuration when starting a lwaftr instance. - - If no name is specified the lwaftr can be referred to using the PID of - the lwAFTR process on the system."; - } - - grouping traffic-filters { - description - "Ingress and egress filters describing the set of packets - that should be allowed to pass, as pflang filters. pflang - is the language of tcpdump, libpcap and other tools. Note - that if VLAN tagging is enabled, the filters run on packets - after VLAN tags have been stripped off."; - leaf ingress-filter { - type string; - description - "Filter for incoming traffic. Packets that do not match - the filter will be silently dropped."; - } - leaf egress-filter { - type string; - description - "Filter for outgoing traffic. Packets that do not match - the filter will be silently dropped."; - } - } - - grouping icmp-policy { - description - "The lwAFTR can be configured to allow or drop incoming ICMP - messages, and to generate outgoing ICMP error messages or - not."; - - leaf allow-incoming-icmp { - type boolean; - default true; - description - "Whether to allow incoming ICMP packets."; - } - - leaf generate-icmp-errors { - type boolean; - default true; - description - "Whether to generate outgoing ICMP error messages."; - } - } - - grouping vlan-tagging { - description - "802.1Q Ethernet tagging."; - - leaf vlan-tag { - type uint16 { - range 0..4095; - } - description - "802.1Q Ethernet VLAN tag for this interface."; - } - } - - grouping error-rate-limiting { - description - "These settings limit the rate of ICMP error message - transmission."; - - container error-rate-limiting { - leaf packets { - type uint32; - description - "The number of ICMP error messages which can be sent within - the specified time period."; - } - - leaf period { - type uint32 { range 1..max; } - default 2; - description - "The time period given in seconds."; - } - } - } - - grouping reassembly { - description - "These settings limit the resources devoted to reassembling - fragmented packets."; - - container reassembly { - leaf max-fragments-per-packet { - type uint32 { range 1..max; } - default 20; - description - "The maximum number of fragments per reassembled packet. - Attempts to reassemble a packet using more fragments than - this threshold will fail and the reassembly data will be - discarded."; - } - - leaf max-packets { - type uint32; - default 20000; - description - "The maximum number of concurrent reassembly attempts. If - this limit is reached, an additional reassembly will cause - random eviction of an ongoing reassembly. Note that this - setting directly affects memory usage; the memory buffer - allocated to reassembly is this maximum number of - reassemblies times 25 kilobytes each."; - } - } - } - - - list instance { - description - "Provides configuration for specific instances of the lwAFTR. - These configuration options will only affect the specific lwaftr - with the given name specified in the name leaf. The other options - not present in this list are shared amongst all instances."; - - key "device"; - - leaf device { - type string; - description - "The PCI device the instance should use during lwAFTR operation. If - device is configured in on-a-stick mode, the 'external-interface' - device should not be configured. If the 'external-interface is - specified this option should specify the PCI device of the - 'internal-interface' (IPv6 traffic only)."; - } - - list queue { - description "List of Receive-Side Scaling (RSS) queues."; - key "id"; - - leaf id { - type uint8 { range 0..1; } - description - "RSS queue on which to attach. Traffic will be partitioned - evenly between instances servicing queues on the same - interface. The queue to which an incoming packet is assigned - is a function of the TCP or UDP source and destination ports - (if any) and the source and destination IPv4 or IPv6 - addresses. Fragmented packets will be delivered to the - lowest-numbered queue. - - Note that currently the lwAFTR is restricted to running at - most 2 RSS workers per device. This limitation may be lifted - to 4 soon. Raising it farther is possible but needs changes - to how the lwAFTR uses its PCI devices."; - } - - container external-interface { - leaf ip { - type inet:ipv4-address; - mandatory true; - description - "L3 Address of the internet-facing network interface. Used - when generating error messages and responding to ICMP echo - requests."; - } - leaf device { - description - "PCI device of the instance uses for external IPv6 traffic. If this - is left unspecified the lwAFTR configures itself in on-a-stick - mode."; - type string; - } - leaf mac { - type yang:mac-address; - mandatory true; - description - "MAC address of the internet-facing NIC."; - } - - uses vlan-tagging; - - container next-hop { - choice address { - mandatory true; - case ip { - leaf ip { - type inet:ipv4-address; - description - "IPv4 address of the next hop for the internet-facing NIC. - The lwAFTR will resolve this to a MAC address using ARP."; - } - leaf resolved-mac { - config false; - description "Resolved next-hop mac address found by ARP."; - type yang:mac-address; - } - } - case mac { - leaf mac { - type yang:mac-address; - description - "Statically configured MAC address of the next hop for the - internet-facing NIC."; - } - } - } - } - } - - container internal-interface { - leaf ip { - type inet:ipv6-address; - mandatory true; - description - "L3 Address of the internal-facing network interface. Used - when generating error messages and responding to ICMP echo - requests."; - } - leaf mac { - type yang:mac-address; - mandatory true; - description - "MAC address of the internal-facing NIC."; - } - - uses vlan-tagging; - - - container next-hop { - choice address { - mandatory true; - case ip { - leaf ip { - type inet:ipv6-address; - description - "IPv4 address of the next hop for the internet-facing NIC. - The lwAFTR will resolve this to a MAC address using ARP."; - } - leaf resolved-mac { - config false; - description "Resolved next-hop mac address found by ARP."; - type yang:mac-address; - } - } - case mac { - leaf mac { - type yang:mac-address; - description - "Statically configured MAC address of the next hop for the - internet-facing NIC."; - } - } - } - } - } - } - - uses state-counters; - } - - container external-interface { - description - "Configuration for the external, internet-facing IPv4 - interface."; - - leaf mtu { - type uint16; - default 1460; - description - "Maximum packet size to send on the IPv4 interface."; - } - - leaf mru { - type uint16; - default 1460; - description - "Maximum packet size to receive on the IPv4 interface."; - } - - uses traffic-filters; - uses icmp-policy; - uses error-rate-limiting; - uses reassembly; - - - } - - container internal-interface { - description - "Configuration for the internal IPv6 interface."; - - leaf mtu { - type uint16; - default 1500; - description - "Maximum packet size to sent on the IPv6 interface."; - } - - leaf mru { - type uint16; - default 1460; - description - "Maximum packet size to recieve on the IPv6 interface."; - } - - leaf flow-label { - type uint32; - default 0; - description - "IPv6 flow label"; - } - - uses traffic-filters; - uses icmp-policy; - uses vlan-tagging; - uses error-rate-limiting; - uses reassembly; - - leaf hairpinning { - type boolean; - default true; - description - "Indicates whether to support hairpinning of traffic between - two B4s."; - } - } - - container binding-table { - description - "A collection of softwires (tunnels), along with a description - of the IPv4 and IPv6 addresses handled by the lwAFTR."; - - list softwire { - key "ipv4 psid"; - - leaf ipv4 { - type inet:ipv4-address; - mandatory true; - description - "Public IPv4 address of the softwire."; - } - - leaf padding { - type uint16; - default 0; - } - - leaf br-address { - type inet:ipv6-address; - mandatory true; - description - "The B4-facing address of the lwAFTR for this softwire."; - } - - leaf b4-ipv6 { - type inet:ipv6-address; - mandatory true; - description - "B4 address."; - } - - leaf psid { - type uint16; - mandatory true; - description "Port set ID."; - } - - container port-set { - description - "The set of IPv4 addresses managed by the lwAFTR, along with - the way in which those IPv4 addresses share ports. A PSID map - entry associates a PSID length and reserved-ports-bit-count - with each IPv4 address served by the lwAFTR. - - The lightweight 4-over-6 architecture supports sharing of - IPv4 addresses by partitioning the space of TCP/UDP/ICMP - ports into disjoint \"port sets\". Each softwire associated - with an IPv4 address corresponds to a different set of ports - on that address. The way that the ports are partitioned is - specified in RFC 7597: each address has an associated set - of parameters that specifies how to compute a \"port set - identifier\" (PSID) from a given port. - - 0 1 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 - +-----------+-----------+-------+ - Ports in | A | PSID | j | - the CE port set | > 0 | | | - +-----------+-----------+-------+ - | a bits | k bits |m bits | - - Figure 2: Structure of a Port-Restricted Port Field - - Source: http://tools.ietf.org/html/rfc7597#section-5.1 - - We find the specification's names to be a bit obtuse, so we - refer to them using the following names: - - a bits = reserved-ports-bit-count. - k bits = psid-length. - m bits = shift. - - The shift parameter is calculated from psid-length and - reserved-ports-bit-count. The calculation performed to - get the value of shift is: - - shift = 16 - psid-length - reserved-ports-bit-count"; - - leaf psid-length { - type uint8 { range 0..16; } - mandatory true; - description - "The number of bits devoted to the PSID in the port map. - If the psid-length is N, then the IPv4 address will be - shared 2^N ways. Note that psid-length, shift, and - reserved-ports-bit-count must add up to 16."; - } - - leaf reserved-ports-bit-count { - type uint8 { range 0..16; } - default 0; - description - "Reserve the lowest 2^N ports so that they map to no - softwire. This can be useful to prevent the low 1024 - ports (for example) from being mapped to customers. Note - that psid-length and shift must add up to less than or - equal to 16."; - } - } - } - - container version { - description - "Optional versioning for binding table. The vesioning information - will change on every update or change to the binding table."; - - leaf number { - type uint64; - description "Incremental version number."; - } - leaf date { - type yang:date-and-time; - description "Timestamp of last change."; - } - } - } - } - - uses state-counters; -} diff --git a/src/lib/yang/state.lua b/src/lib/yang/state.lua index 45096a5336..cffbea38ad 100644 --- a/src/lib/yang/state.lua +++ b/src/lib/yang/state.lua @@ -53,18 +53,18 @@ function state_reader_from_grammar(production, maybe_keyword) end return ret end - function visitor.table(keyword, production) + function visitor.list(keyword, production) -- TODO: Right now we basically map leaves to counters; we have -- no structured way to know what keys we might use. To make -- tables here we'd need more of a design! - io.stderr:write( - 'WARNING: Reading state into tables not yet implemented\n') + -- io.stderr:write( + -- 'WARNING: Reading state into lists is not yet implemented\n') return function(counters) return nil end end function visitor.array(keyword, production) -- For similar reasons as tables, no idea what to do here! - io.stderr:write( - 'WARNING: Reading state into arrays not yet implemented\n') + -- io.stderr:write( + -- 'WARNING: Reading state into arrays not yet implemented\n') return function(counters) return nil end end function visitor.struct(keyword, production) @@ -109,6 +109,7 @@ state_reader_from_schema_by_name = util.memoize(state_reader_from_schema_by_name function selftest () print("selftest: lib.yang.state") + local ffi = require("ffi") local simple_router_schema_src = [[module snabb-simple-router { namespace snabb:simple-router; prefix simple-router; @@ -152,26 +153,18 @@ function selftest () uses "detailed-counters"; } }]] - local function table_length(tbl) - local rtn = 0 - for k,v in pairs(tbl) do rtn = rtn + 1 end - return rtn - end - local function in_array(needle, haystack) - for _, i in pairs(haystack) do if needle == i then return true end end - return false - end - local simple_router_schema = yang.load_schema(simple_router_schema_src, "state-test") local reader = state_reader_from_schema(simple_router_schema) - local state = reader({}) - assert(0 == state.state.total_packets) - assert(0 == state.state.dropped_packets) - assert(0 == state.detailed_state.dropped_wrong_route) - assert(0 == state.detailed_state.dropped_not_permitted) - -- Would like to assert "state.routes == nil" but state is actually - -- a cdata object, and trying to access the non-existent routes - -- property throws an error. + local state = reader({ + ['total-packets'] = ffi.new("struct counter", {c=1}), + ['dropped-packets'] = ffi.new("struct counter", {c=2}), + ['dropped-wrong-route'] = ffi.new("struct counter", {c=3}), + ['dropped-not-permitted'] = ffi.new("struct counter", {c=4}) + }) + assert(1 == state.state.total_packets) + assert(2 == state.state.dropped_packets) + assert(3 == state.detailed_state.dropped_wrong_route) + assert(4 == state.detailed_state.dropped_not_permitted) print('selftest: ok') end diff --git a/src/lib/yang/util.lua b/src/lib/yang/util.lua index 4d7fe958bf..d96ba294e8 100644 --- a/src/lib/yang/util.lua +++ b/src/lib/yang/util.lua @@ -54,28 +54,56 @@ function tointeger(str, what, min, max) return res end +local array_mt = {} + function ffi_array(ptr, elt_t, count) - local mt = {} - local size = count or ffi.sizeof(ptr)/ffi.sizeof(elt_t) - function mt:__len() return size end - function mt:__index(idx) - assert(1 <= idx and idx <= size) - return ptr[idx-1] - end - function mt:__newindex(idx, val) - assert(1 <= idx and idx <= size) - ptr[idx-1] = val + local t = ffi.typeof(elt_t) + local initial_size = count or ffi.sizeof(ptr)/ffi.sizeof(t) + local self = { + t = t, + ptr = ptr, + size = initial_size, + fill = initial_size + } + return setmetatable(self, array_mt) +end + +function array_mt:__len() + return self.fill +end +function array_mt:__index(idx) + assert(1 <= idx and idx <= self.fill) + return self.ptr[idx-1] +end +function array_mt:__newindex(idx, val) + if val then + if idx > self.size then + -- grow + local old = self.ptr + self.size = self.size * 2 + self.ptr = ffi.new(ffi.typeof("$[?]", self.t), self.size) + ffi.copy(self.ptr, old, ffi.sizeof(old)) + end + assert(1 <= idx and idx <= self.fill+1) + self.fill = math.max(idx, self.fill) + self.ptr[idx-1] = val + else + assert(1 <= idx and idx <= self.fill) + ffi.copy(self.ptr+idx-1, self.ptr+idx, + (self.fill-idx)*ffi.sizeof(self.t)) + self.fill = self.fill-1 end - function mt:__ipairs() - local idx = -1 - return function() - idx = idx + 1 - if idx >= size then return end - return idx+1, ptr[idx] +end +function array_mt:__pairs() + local idx = 0 + return function() + idx = idx + 1 + if idx <= self.fill then + return idx, self.ptr[idx-1] end end - return ffi.metatype(ffi.typeof('struct { $* ptr; }', elt_t), mt)(ptr) end +array_mt.__ipairs = array_mt.__pairs -- The yang modules represent IPv4 addresses as host-endian uint32 -- values in Lua. See /~https://github.com/snabbco/snabb/issues/1063. @@ -184,5 +212,18 @@ function selftest() assert(tointeger('-0x8000000000000000') == -0x8000000000000000LL) assert(ipv4_pton('255.0.0.1') == 255 * 2^24 + 1) assert(ipv4_ntop(ipv4_pton('255.0.0.1')) == '255.0.0.1') + -- ffi_array + local a = ffi_array(ffi.new("double[3]"), "double") + assert(#a == 3) + for i in ipairs(a) do + a[i] = i + end + assert(a[3] == 3) + a[4] = 4 + assert(a[4] == 4) + assert(#a == 4) + a[2] = nil + assert(#a == 3) + assert(a[3] == 4) print('selftest: ok') end diff --git a/src/program/ipfix/lib.lua b/src/program/ipfix/lib.lua index 10d5dc2c3d..4a19eefef3 100644 --- a/src/program/ipfix/lib.lua +++ b/src/program/ipfix/lib.lua @@ -1,312 +1,290 @@ module(..., package.seeall) -local now = require("core.app").now -local lib = require("core.lib") -local counter = require("core.counter") -local app_graph = require("core.config") -local link = require("core.link") -local pci = require("lib.hardware.pci") -local numa = require("lib.numa") -local ipv4 = require("lib.protocol.ipv4") -local ethernet = require("lib.protocol.ethernet") -local S = require("syscall") -local basic = require("apps.basic.basic_apps") -local arp = require("apps.ipv4.arp") -local ipfix = require("apps.ipfix.ipfix") -local template = require("apps.ipfix.template") -local rss = require("apps.rss.rss") -local iftable = require("apps.snmp.iftable") +local lib = require("core.lib") +local app_graph = require("core.config") +local pci = require("lib.hardware.pci") +local ipv4 = require("lib.protocol.ipv4") +local ethernet = require("lib.protocol.ethernet") +local basic = require("apps.basic.basic_apps") +local ipfix = require("apps.ipfix.ipfix") +local tap = require("apps.tap.tap") +local rss = require("apps.rss.rss") +local iftable = require("apps.snmp.iftable") +local Receiver = require("apps.interlink.receiver") local Transmitter = require("apps.interlink.transmitter") - - --- apps that can be used as an input or output for the exporter -local in_apps, out_apps = {}, {} - -local function parse_spec (spec, delimiter) - local t = {} - for s in spec:split(delimiter or ':') do - table.insert(t, s) - end - return t -end +local pcap = require("apps.pcap.pcap") local function normalize_pci_name (device) return pci.qualified(device):gsub("[:%.]", "_") end -function in_apps.pcap (path) - return { input = "input", - output = "output" }, - { require("apps.pcap.pcap").PcapReader, path } -end +local function configure_ipfix_instance (config, in_graph) + local graph = in_graph or app_graph.new() -function out_apps.pcap (path) - return { input = "input", - output = "output" }, - { require("apps.pcap.pcap").PcapWriter, path } -end + local ipfix_name = "ipfix_"..assert(config.instance) + + app_graph.app(graph, ipfix_name, ipfix.IPFIX, config) -function out_apps.tap_routed (device) - return { input = "input", - output = "output" }, - { require("apps.tap.tap").Tap, { name = device } } + return graph, {name=ipfix_name, output='output', input='input'} end -function in_apps.raw (device) - return { input = "rx", - output = "tx" }, - { require("apps.socket.raw").RawSocket, device } +local function configure_tap_output (config, in_graph) + config = lib.parse(config, { + instance={required=true}, + observation_domain={required=true}, + mtu={required=true}, + log_date={required=true} + }) + local graph = in_graph or app_graph.new() + + local device = "ipfixexport"..config.observation_domain + + local tap_config = { + name = device, + mtu = config.mtu, + overwrite_dst_mac = true, + forwarding = true + } + local tap_name = "out_"..config.instance + local sink_name = "sink_"..config.instance + + -- with UDP, ipfix doesn't need to handle packets from the collector + -- (hence, discard packets incoming from the tap interface to sink) + app_graph.app(graph, tap_name, tap.Tap, tap_config) + app_graph.app(graph, sink_name, basic.Sink) + app_graph.link(graph, tap_name..".output -> "..sink_name..".input") + app_graph.app(graph, "tap_ifmib_"..config.instance, iftable.MIB, { + target_app = tap_name, + ifname = device, + ifalias = "IPFIX Observation Domain "..config.observation_domain, + log_date = config.log_date + }) + + return graph, {name=tap_name, input='input'} end -out_apps.raw = in_apps.raw -function in_apps.tap (device) - return { input = "input", - output = "output" }, - { require("apps.tap.tap").Tap, device } +local function configure_interlink_input (config, in_graph) + config = lib.parse(config, { + name={required=true} + }) + local graph = in_graph or app_graph.new() + + local in_name = config.name + + app_graph.app(graph, in_name, Receiver) + + return graph, {name=in_name, output='output'} end -out_apps.tap = in_apps.tap -function in_apps.interlink (name) - return { input = nil, - output = "output" }, - { require("apps.interlink.receiver"), nil } +local function configure_interlink_output (config, in_graph) + config = lib.parse(config, { + name={required=true} + }) + local graph = in_graph or app_graph.new() + + local out_name = config.name + + app_graph.app(graph, out_name, Transmitter) + + return graph, {name=out_name, input='input'} end -function in_apps.pci (input) - local device, rxq = input.device, input.rxq or 0 - local device_info = pci.device_info(device) - local conf = { pciaddr = device } +local function configure_pci_input (config, in_graph) + config = lib.parse(config, { + device={required=true}, + rxq={required=true}, + receive_queue_size={required=true}, + log_date={required=true}, + vlan_tag={}, + name={}, + description={} + }) + local graph = in_graph or app_graph.new() + + local pci_name = normalize_pci_name(config.device) + local in_name = "input_"..pci_name + local device_info = pci.device_info(config.device) + local driver = require(device_info.driver).driver + local conf if device_info.driver == 'apps.intel_mp.intel_mp' then - conf.rxq = rxq - conf.rxcounter = rxq - conf.ring_buffer_size = input.receive_queue_size + conf = { + pciaddr = config.device, + rxq = config.rxq, + rxcounter = config.rxq, + ring_buffer_size = config.receive_queue_size + } elseif device_info.driver == 'apps.mellanox.connectx' then conf = { - pciaddress = device, - queue = rxq + pciaddress = config.device, + queue = config.rxq } end - return { input = device_info.rx, output = device_info.tx }, - { require(device_info.driver).driver, conf } -end -out_apps.pci = in_apps.pci - -probe_config = { - -- Probe-specific - output_type = {required = true}, - output = { required = true }, - input_type = { default = nil }, - input = { default = nil }, - exporter_mac = { default = nil }, - -- Passed on to IPFIX app - active_timeout = { default = nil }, - idle_timeout = { default = nil }, - flush_timeout = { default = nil }, - cache_size = { default = nil }, - max_load_factor = { default = nil }, - scan_time = { default = nil }, - observation_domain = { default = nil }, - template_refresh_interval = { default = nil }, - ipfix_version = { default = nil }, - exporter_ip = { required = true }, - collector_ip = { required = true }, - collector_port = { required = true }, - mtu = { default = nil }, - templates = { required = true }, - maps = { default = {} }, - maps_logfile = { default = nil }, - instance = { default = 1 }, - add_packet_metadata = { default = true }, - log_date = { default = false }, - scan_protection = { default = {} } -} - -local function mk_ipfix_config (config) - return { active_timeout = config.active_timeout, - idle_timeout = config.idle_timeout, - flush_timeout = config.flush_timeout, - cache_size = config.cache_size, - max_load_factor = config.max_load_factor, - scan_time = config.scan_time, - observation_domain = config.observation_domain, - template_refresh_interval = - config.template_refresh_interval, - ipfix_version = config.ipfix_version, - exporter_ip = config.exporter_ip, - collector_ip = config.collector_ip, - collector_port = config.collector_port, - mtu = config.mtu - 14, - templates = config.templates, - maps = config.maps, - maps_logfile = config.maps_logfile, - instance = config.instance, - add_packet_metadata = config.add_packet_metadata, - log_date = config.log_date, - scan_protection = config.scan_protection } -end -function configure_graph (arg, in_graph) - local config = lib.parse(arg, probe_config) + app_graph.app(graph, in_name, driver, conf) + app_graph.app(graph, "nic_ifmib_"..in_name, iftable.MIB, { + target_app = in_name, stats = 'stats', + ifname = config.name or pci_name, + ifalias = config.description, + log_date = config.log_date + }) - local in_link, in_app - if config.input_type then - assert(in_apps[config.input_type], - "unknown input type: "..config.input_type) - assert(config.input, "Missing input parameter") - in_link, in_app = in_apps[config.input_type](config.input) - end - assert(out_apps[config.output_type], - "unknown output type: "..config.output_type) - local out_link, out_app = out_apps[config.output_type](config.output) - - if config.output_type == "tap_routed" then - local tap_config = out_app[2] - tap_config.mtu = config.mtu - tap_config.overwrite_dst_mac = true - tap_config.forwarding = true + local nic = {name=in_name, input=device_info.rx, output=device_info.tx} + local link_name = nic.name + if conf.vlan_tag then + link_name = "vlan"..conf.vlan_tag end - local ipfix_config = mk_ipfix_config(config) - local ipfix_name = "ipfix_"..config.instance - local out_name = "out_"..config.instance - local sink_name = "sink_"..config.instance + return graph, nic, link_name +end +local function configure_pcap_input (config, in_graph) + config = lib.parse(config, { + path={required=true}, + name={default='pcap'} + }) local graph = in_graph or app_graph.new() - if config.input then - local in_name = "in" - if config.input_type == "interlink" then - in_name = config.input - end - app_graph.app(graph, in_name, unpack(in_app)) - app_graph.link(graph, in_name ..".".. in_link.output .. " -> " - ..ipfix_name..".input") - end - app_graph.app(graph, ipfix_name, ipfix.IPFIX, ipfix_config) - app_graph.app(graph, out_name, unpack(out_app)) - - -- use ARP for link-layer concerns unless the output is connected - -- to a pcap writer or a routed tap interface - if (config.output_type ~= "pcap" and - config.output_type ~= "tap_routed") then - local arp_name = "arp_"..config.instance - local arp_config = { self_mac = config.exporter_mac and - ethernet:pton(config.exporter_mac), - self_ip = ipv4:pton(config.exporter_ip), - next_ip = ipv4:pton(config.collector_ip) } - app_graph.app(graph, arp_name, arp.ARP, arp_config) - app_graph.app(graph, sink_name, basic.Sink) - - app_graph.link(graph, out_name.."."..out_link.output.." -> " - ..arp_name..".south") - - -- with UDP, ipfix doesn't need to handle packets from the collector - app_graph.link(graph, arp_name..".north -> "..sink_name..".input") - - app_graph.link(graph, ipfix_name..".output -> "..arp_name..".north") - app_graph.link(graph, arp_name..".south -> " - ..out_name.."."..out_link.input) - else - app_graph.link(graph, ipfix_name..".output -> " - ..out_name.."."..out_link.input) - app_graph.app(graph, sink_name, basic.Sink) - app_graph.link(graph, out_name.."."..out_link.output.." -> " - ..sink_name..".input") - end - if config.input_type and config.input_type == "pci" then - local pciaddr = unpack(parse_spec(config.input, '/')) - app_graph.app(graph, "nic_ifmib", iftable.MIB, { - target_app = "in", stats = 'stats', - ifname = normalize_pci_name(pciaddr), - log_date = config.log_date - }) - end - if config.output_type == "tap_routed" then - app_graph.app(graph, "tap_ifmib_"..config.instance, iftable.MIB, { - target_app = out_name, - ifname = config.output, - ifalias = "IPFIX Observation Domain "..config.observation_domain, - log_date = config.log_date - }) - end + local in_name = config.name + + app_graph.app(graph, in_name, pcap.PcapReader, config.path) + + return graph, {name=in_name, output='output'} +end - return graph, config +local function link (graph, from, to) + assert(from.name, "missing name in 'from'") + assert(from.output, "missing output in 'from': "..from.name) + assert(to.name, "missing name in 'to'") + assert(to.input, "missing input in 'to': "..to.name) + app_graph.link( + graph, from.name.."."..from.output.."->"..to.name.."."..to.input + ) end -function configure_rss_graph (config, inputs, outputs, log_date, rss_group, input_type) - input_type = input_type or 'pci' +local function configure_ipfix_tap_instance (config, in_graph) + local graph = in_graph or app_graph.new() + local _, ipfix = configure_ipfix_instance(config, graph) + local tap_args = { + instance = config.instance, + observation_domain = config.observation_domain, + mtu = config.mtu, + log_date = config.log_date + } + local _, tap = configure_tap_output(tap_args, graph) + link(graph, ipfix, tap) + return graph, ipfix +end + +function configure_interlink_ipfix_tap_instance (in_name, config) local graph = app_graph.new() + local _, receiver = configure_interlink_input({name=in_name}, graph) + local _, ipfix = configure_ipfix_tap_instance(config, graph) + link(graph, receiver, ipfix) - local rss_name = "rss"..(rss_group or '') - app_graph.app(graph, rss_name, rss.rss, config) + return graph +end - -- An input describes a physical interface - local tags, in_app_specs = {}, {} - for n, input in ipairs(inputs) do - local input_name, link_name, in_link, in_app - if input_type == 'pci' then - local pci_name = normalize_pci_name(input.device) - input_name, link_name = "input_"..pci_name, pci_name - in_link, in_app = in_apps.pci(input) - table.insert(in_app_specs, - { pciaddr = input.device, - name = input_name, - ifname = input.name or pci_name, - ifalias = input.description }) - elseif input_type == 'pcap' then - input_name, link_name = 'pcap', 'pcap' - in_link, in_app = in_apps.pcap(input) - else - error("Unsupported input_type: "..input_type) - end - app_graph.app(graph, input_name, unpack(in_app)) - if input.tag then - local tag = input.tag - assert(not(tags[tag]), "Tag not unique: "..tag) - link_name = "vlan"..tag - end - app_graph.link(graph, input_name.."."..in_link.output - .." -> "..rss_name.."."..link_name) +function configure_pci_ipfix_tap_instance (config, inputs, rss_group) + local graph = app_graph.new() + + local rss_name = "rss"..assert(rss_group) + + local rss = {name=rss_name, output='output'} + app_graph.app(graph, rss_name, basic.Join) + + local links = {} + for _, pci in ipairs(inputs) do + local _, nic, link_name = configure_pci_input(pci, graph) + links[link_name] = assert(not links[link_name], + "input link not unique: "..link_name) + link(graph, nic, {name=rss.name, input=link_name}) end + local _, ipfix = configure_ipfix_tap_instance(config, graph) + link(graph, rss, ipfix) + + return graph +end + +function configure_pcap_ipfix_tap_instance (config, pcap_path, rss_group) + local graph = app_graph.new() + + local rss_name = "rss"..assert(rss_group) + + local _, pcap = configure_pcap_input({name=rss_name, path=pcap_path}, graph) + local _, ipfix = configure_ipfix_tap_instance(config, graph) + link(graph, pcap, ipfix) + + return graph +end + +local function configure_rss_tap_instances (config, outputs, rss_group, in_graph) + local graph = in_graph or app_graph.new() + + local rss_name = "rss"..assert(rss_group) + + app_graph.app(graph, rss_name, rss.rss, config) - -- An output describes either an interlink or a complete ipfix app for _, output in ipairs(outputs) do + local rss = {name=rss_name, output=output.link_name} if output.type == 'interlink' then -- Keys -- link_name name of the link - app_graph.app(graph, output.link_name, Transmitter) - app_graph.link(graph, rss_name.."."..output.link_name.." -> " - ..output.link_name..".input") + local _, transmitter = configure_interlink_output( + {name=output.link_name}, graph + ) + link(graph, rss, transmitter) else -- Keys -- link_name name of the link -- args probe configuration -- instance # of embedded instance output.args.instance = output.instance or output.args.instance - local graph = configure_graph(output.args, graph) - app_graph.link(graph, rss_name.."."..output.link_name - .." -> ipfix_"..output.args.instance..".input") + local _, ipfix = configure_ipfix_tap_instance(output.args, graph) + link(graph, rss, ipfix) end end - for _, spec in ipairs(in_app_specs) do - app_graph.app(graph, "nic_ifmib_"..spec.name, iftable.MIB, { - target_app = spec.name, stats = 'stats', - ifname = spec.ifname, - ifalias = spec.ifalias, - log_date = log_date - }) + return graph, {name=rss_name} +end + +function configure_pci_rss_tap_instances (config, inputs, outputs, rss_group) + local graph = app_graph.new() + + local _, rss = configure_rss_tap_instances(config, outputs, rss_group, graph) + local links = {} + for _, pci in ipairs(inputs) do + local _, nic, link_name = configure_pci_input(pci, graph) + links[link_name] = assert(not links[link_name], + "input link not unique: "..link_name) + link(graph, nic, {name=rss.name, input=link_name}) end - + + return graph +end + +function configure_pcap_rss_tap_instances(config, pcap_path, outputs, rss_group) + local graph = app_graph.new() + + local _, rss = configure_rss_tap_instances(config, outputs, rss_group, graph) + local _, pcap = configure_pcap_input({path=pcap_path}, graph) + link(graph, pcap, {name=rss.name, input='pcap'}) + return graph end -function configure_mlx_ctrl_graph (mellanox, log_date) +function configure_mlx_controller (devices) -- Create a trivial app graph that only contains the control apps -- for the Mellanox driver, which sets up the queues and -- maintains interface counters. local ctrl_graph, need_ctrl = app_graph.new(), false - for device, spec in pairs(mellanox) do + for device, spec in pairs(devices) do + spec = lib.parse(spec, { + queues={required=true}, + recvq_size={required=true}, + log_date={required=true}, + name={}, + alias={} + }) local conf = { pciaddress = device, queues = spec.queues, @@ -318,9 +296,9 @@ function configure_mlx_ctrl_graph (mellanox, log_date) require(driver).ConnectX, conf) app_graph.app(ctrl_graph, "nic_ifmib_"..pci_name, iftable.MIB, { target_app = "ctrl_"..pci_name, stats = 'stats', - ifname = spec.ifName or pci_name, - ifalias = spec.ifAlias, - log_date = log_date + ifname = spec.name or pci_name, + ifalias = spec.alias, + log_date = spec.log_date }) need_ctrl = true end diff --git a/src/program/ipfix/probe/example.conf b/src/program/ipfix/probe/example.conf index 285e8c6ed3..ce7a1989be 100644 --- a/src/program/ipfix/probe/example.conf +++ b/src/program/ipfix/probe/example.conf @@ -32,7 +32,6 @@ snabbflow-config { flow-director { class { exporter e2; - order 1; filter ip6; } default-class { diff --git a/src/program/ipfix/probe/probe.lua b/src/program/ipfix/probe/probe.lua index 380f7ffc6f..b09e43e118 100644 --- a/src/program/ipfix/probe/probe.lua +++ b/src/program/ipfix/probe/probe.lua @@ -10,6 +10,7 @@ local pci = require("lib.hardware.pci") local lib = require("core.lib") local app_graph = require("core.config") +local ipfix = require("apps.ipfix.ipfix") local probe = require("program.ipfix.lib") local probe_schema = 'snabb-snabbflow-v1' @@ -97,17 +98,13 @@ function start (name, confpath) } end -local ipfix_default_config = lib.deepcopy(probe.probe_config) +local ipfix_default_config = lib.deepcopy(ipfix.IPFIX.config) for _, key in ipairs({ "collector_ip", "collector_port", "observation_domain", "exporter_mac", "templates", - "output_type", - "output", - "input_type", - "input", "instance" }) do ipfix_default_config[key] = nil @@ -128,10 +125,10 @@ function setup_workers (config) local collector_pools = {} for name, p in pairs(ipfix.collector_pool) do local collectors = {} - for entry in p.collector:iterate() do + for _, entry in ipairs(p.collector) do table.insert(collectors, { - ip = yang_util.ipv4_ntop(entry.key.ip), - port = entry.key.port + ip = yang_util.ipv4_ntop(entry.ip), + port = entry.port }) end collector_pools[name] = collectors @@ -152,19 +149,6 @@ function setup_workers (config) "Exporter for the default traffic class can not be the exporter for a defined class.") end - local class_order = {} - for exporter in pairs(flow_director.class) do - table.insert(class_order, exporter) - end - table.sort(class_order, function (x, y) - return flow_director.class[x].order < flow_director.class[y].order - end) - - local function class_name (exporter, class) - -- Including order in name to avoid name collision with 'default' class - return ("%s_%d"):format(exporter, class.order) - end - local rss_links = {} local function rss_link_name (class) if not rss_links[class] then @@ -205,15 +189,12 @@ function setup_workers (config) for rss_group = 1, rss.hardware_scaling.rss_groups do local inputs, outputs = {}, {} for device, opt in pairs(interfaces) do - if pcap_input then - table.insert(inputs, pcap_input) - break - end ensure_device_unique(device, interfaces) local input = lib.deepcopy(opt) input.device = device input.rxq = rss_group - 1 + input.log_date = ipfix.log_date table.insert(inputs, input) -- The mellanox driver requires a master process that sets up @@ -223,10 +204,11 @@ function setup_workers (config) if device_info.driver == 'apps.mellanox.connectx' then local spec = mellanox[device] if not spec then - spec = { ifName = input.name, - ifAlias = input.description, + spec = { name = input.name, + alias = input.description, queues = {}, - recvq_size = input.receive_queue_size } + recvq_size = input.receive_queue_size, + log_date = ipfix.log_date } mellanox[device] = spec end table.insert(spec.queues, { id = input.rxq }) @@ -247,7 +229,6 @@ function setup_workers (config) config.collector_pool = exporter.collector_pool config.templates = exporter.template - config.output_type = "tap_routed" config.add_packet_metadata = false config.maps = {} @@ -275,7 +256,7 @@ function setup_workers (config) local rss_link local class = flow_director.class[name] if class then - rss_link = rss_link_name(class_name(name, class)) + rss_link = rss_link_name('class_'..name) elseif name == flow_director.default_class.exporter then rss_link = rss_link_name('default') else @@ -293,12 +274,14 @@ function setup_workers (config) iconfig.log_date = ipfix.log_date local od = next_observation_domain() iconfig.observation_domain = od - iconfig.output = "ipfixexport"..od if ipfix.maps.log_directory then iconfig.maps_logfile = ipfix.maps.log_directory.."/"..od..".log" end + -- Subtract Ethernet and VLAN overhead from MTU + iconfig.mtu = iconfig.mtu - 14 + -- Scale the scan protection parameters by the number of -- ipfix instances in this RSS class local scale_factor = rss.hardware_scaling.rss_groups * num_instances @@ -316,11 +299,10 @@ function setup_workers (config) } else output = { type = "interlink", link_name = rss_link } - iconfig.input_type = "interlink" - iconfig.input = rss_link - - - workers[rss_link] = probe.configure_graph(iconfig) + workers[rss_link] = + probe.configure_interlink_ipfix_tap_instance( + rss_link, iconfig + ) -- Dedicated exporter processes are restartable worker_opts[rss_link] = { restart_intensity = software_scaling.restart.intensity, @@ -336,27 +318,52 @@ function setup_workers (config) classes = {}, remove_extension_headers = flow_director.remove_ipv6_extension_headers } - for _, exporter in ipairs(class_order) do - local class = flow_director.class[exporter] - if not ipfix.exporter[exporter] then - error(("Exporter '%s' referenced in traffic class %d is not defined.") - :format(exporter, class.order)) + for i, class in ipairs(flow_director.class) do + if not ipfix.exporter[class.exporter] then + error(("Exporter '%s' referenced in traffic class #%d is not defined.") + :format(class.exporter, i)) end table.insert(rss_config.classes, { - name = class_name(exporter, class), + name = 'class_'..class.exporter, filter = class.filter, continue = class.continue }) end - workers["rss"..rss_group] = probe.configure_rss_graph( - rss_config, inputs, outputs, ipfix.log_date, rss_group, pcap_input and 'pcap' - ) + if #outputs == 1 and outputs[1].type ~= 'interlink' then + -- We have a single output within a single process + -- (no flow director classes, and a single embedded exporer instance.) + -- This is the simple case: omit creating a software RSS app. + -- NB: IPFIX app has to extract metadata as software RSS app is not present. + local config = outputs[1].args + config.add_packet_metadata = true + if pcap_input then + workers["rss"..rss_group] = probe.configure_pcap_ipfix_tap_instance( + config, pcap_input, rss_group + ) + else + workers["rss"..rss_group] = probe.configure_pci_ipfix_tap_instance( + config, inputs, rss_group + ) + end + else + -- Otherwise we have the general case: configure a software RSS app to + -- distribute inputs over flow director classes and exporter instances. + if pcap_input then + workers["rss"..rss_group] = probe.configure_pcap_rss_tap_instances( + rss_config, pcap_input, outputs, rss_group + ) + else + workers["rss"..rss_group] = probe.configure_pci_rss_tap_instances( + rss_config, inputs, outputs, rss_group + ) + end + end end -- Create a trivial app graph that only contains the control apps -- for the Mellanox driver, which sets up the queues and -- maintains interface counters. - local ctrl_graph, need_ctrl = probe.configure_mlx_ctrl_graph(mellanox, ipfix.log_date) + local ctrl_graph, need_ctrl = probe.configure_mlx_controller(mellanox) if need_ctrl then workers["mlx_ctrl"] = ctrl_graph diff --git a/src/program/ipfix/tests/test.lua b/src/program/ipfix/tests/test.lua index 6ee6003f5c..a117b0c261 100755 --- a/src/program/ipfix/tests/test.lua +++ b/src/program/ipfix/tests/test.lua @@ -98,7 +98,7 @@ function selftest () assert(diff <= tolerance, "Flows mismatch!") end - expect(ip4_flows, 30000, 0.1) + expect(ip4_flows, 30000, 0.2) expect(ip6_flows, 1400, 0.1) expect(http4_flows, 200, 0.2) expect(dns4_flows, 1300, 0.2) diff --git a/src/program/ipfix/tests/test_v4_v6.conf b/src/program/ipfix/tests/test_v4_v6.conf new file mode 100644 index 0000000000..44a494346d --- /dev/null +++ b/src/program/ipfix/tests/test_v4_v6.conf @@ -0,0 +1,31 @@ +snabbflow-config { + interface { + device "00:00.0"; + } + flow-director { + default-class { + exporter ip; + } + remove-ipv6-extension-headers true; + } + ipfix { + idle-timeout 2; + active-timeout 2; + flush-timeout 2; + scan-time 0.1; + exporter-ip 10.0.0.1; + collector-pool { name c1; collector { ip 10.0.0.2; port 4739; } } + maps { + pfx4-to-as { file "program/ipfix/tests/maps/pfx4_to_as.csv"; } + pfx6-to-as { file "program/ipfix/tests/maps/pfx6_to_as.csv"; } + vlan-to-ifindex { file "program/ipfix/tests/maps/vlan_to_ifindex"; } + mac-to-as { file "program/ipfix/tests/maps/mac_to_as"; } + } + exporter { + name ip; + template "v4_extended"; + template "v6_extended"; + collector-pool c1; + } + } +} \ No newline at end of file diff --git a/src/program/ipfix/tests/test_v4_v6_dnshttp.conf b/src/program/ipfix/tests/test_v4_v6_dnshttp.conf index 5400969bc6..e8e4c867ad 100644 --- a/src/program/ipfix/tests/test_v4_v6_dnshttp.conf +++ b/src/program/ipfix/tests/test_v4_v6_dnshttp.conf @@ -5,7 +5,6 @@ snabbflow-config { flow-director { class { exporter dnshttp; - order 1; filter "(ip or ip6) and ((udp port 53) or tcp dst port 80)"; continue true; } diff --git a/src/program/lwaftr/README b/src/program/lwaftr/README index 6aa203703f..c5c159a46e 100644 --- a/src/program/lwaftr/README +++ b/src/program/lwaftr/README @@ -3,7 +3,6 @@ Usage: snabb lwaftr check snabb lwaftr compile-configuration snabb lwaftr generate-configuration - snabb lwaftr migrate-configuration snabb lwaftr monitor snabb lwaftr query snabb lwaftr quickcheck diff --git a/src/program/lwaftr/migrate_configuration/README b/src/program/lwaftr/migrate_configuration/README deleted file mode 100644 index 5301fb878c..0000000000 --- a/src/program/lwaftr/migrate_configuration/README +++ /dev/null @@ -1,37 +0,0 @@ -Usage: migrate-configuration LWAFTR.CONF - -Options: - -h, --help Print usage information. - -f, --from=VERSION Specify version from which to migrate. - -Migrate an old-style configuration and binding table to the new YANG -configuration. LWAFTR.CONF should be the name of an old lwAFTR -configuration. Available VERSION values are: - - legacy - Configuration from pre-v3.0.0 lwAFTR. - 3.0.1 - lwAFTR versions where "container" nodes in schemas are missing - corresponding nodes in the data unless "presence true" is - specified. - 3.0.1.1 - lwAFTR development snapshot where "br" fields of softwires were - 0-based instead of 1-based. - 3.2.0 - lwAFTR versions where "br" fields were indexes for the "br-address" - leaf-list instead of "br-address" IPv6 entries on the softwire. - 2017.07.01 - lwAFTR changes to multiprcoess configuration with the introduction of the - instance list. - - This change will put place holders "IPv4 PCI Address" and "IPv6 PCI Address" - where the external and internal PCI addresses of your NICs go respectively. - This should be changed however when running the lwAFTR with only one - instance defined a in-memory migration will occur on each run of the lwAFTR - please refer to program/lwaftr/doc/running.md documentation for further - information. - -The default version is "legacy". - -The resulting up-to-date configuration will be printed on standard -output, ready to be saved to a new file. diff --git a/src/program/lwaftr/migrate_configuration/README.inc b/src/program/lwaftr/migrate_configuration/README.inc deleted file mode 120000 index 100b93820a..0000000000 --- a/src/program/lwaftr/migrate_configuration/README.inc +++ /dev/null @@ -1 +0,0 @@ -README \ No newline at end of file diff --git a/src/program/lwaftr/migrate_configuration/conf_parser.lua b/src/program/lwaftr/migrate_configuration/conf_parser.lua deleted file mode 100644 index 39f82ad32a..0000000000 --- a/src/program/lwaftr/migrate_configuration/conf_parser.lua +++ /dev/null @@ -1,321 +0,0 @@ -module(..., package.seeall) - -local ffi = require("ffi") -local lib = require("core.lib") -local ipv4 = require("lib.protocol.ipv4") -local ipv6 = require("lib.protocol.ipv6") -local ethernet = require("lib.protocol.ethernet") - -Parser = {} - -function Parser.new(file) - local name = file.name - local err - if type(file) == 'string' then - name = file - file, err = io.open(file) - if not file then error(err) end - end - local ret = { column=0, line=1, name=name } - function ret.read_char() return file:read(1) end - function ret.cleanup() - function ret.cleanup() end - return file:close() - end - ret.peek_char = ret.read_char() - return setmetatable(ret, {__index=Parser}) -end - -function Parser:error(msg, ...) - self.cleanup() - error(('%s:%d:%d: error: '..msg):format( - self.name or '', self.line, self.column, ...)) -end - -function Parser:next() - local chr = self.peek_char - if chr == '\n' then - self.column = 0 - self.line = self.line + 1 - elseif chr then - self.column = self.column + 1 - else - self.cleanup() - end - self.peek_char = self.read_char() - return chr -end - -function Parser:peek() return self.peek_char end -function Parser:is_eof() return not self:peek() end - -function Parser:check(expected) - if self:peek() == expected then - if expected then self:next() end - return true - end - return false -end - -function Parser:consume(expected) - if not self:check(expected) then - local ch = self:peek() - if ch == nil then - self:error("while looking for '%s', got EOF", expected) - elseif expected then - self:error("expected '%s', got '%s'", expected, ch) - else - self:error("expected EOF, got '%s'", ch) - end - end -end - -function Parser:take_while(pattern) - local res = {} - while not self:is_eof() and self:peek():match(pattern) do - table.insert(res, self:next()) - end - return table.concat(res) -end - -function Parser:consume_token(pattern, expected) - local tok = self:take_while(pattern) - if tok:lower() ~= expected then - self:error("expected '%s', got '%s'", expected, tok) - end -end - -function Parser:skip_whitespace() - self:take_while('%s') - -- Skip comments, which start with # and continue to the end of line. - while self:check('#') do - self:take_while('[^\n]') - self:take_while('%s') - end -end - -function Parser:parse_uint(min, max) - local tok = self:take_while('%d') - if tok == '' then self:error('expected a number') end - if #tok > #(tostring(max)) then - self:error('numeric constant too long: %s', tok) - end - local uint = tonumber(tok) - if uint < min or uint > max then - self:error('numeric constant out of range: %d', uint) - end - return uint -end - -function Parser:parse_psid_param() return self:parse_uint(0, 16) end -function Parser:parse_ipv4_quad() return self:parse_uint(0, 255) end - -function Parser:parse_property_list(spec, bra, ket) - local res = {} - self:skip_whitespace() - if bra then - self:consume(bra) - self:skip_whitespace() - end - while not self:check(ket) do - local key = self:take_while('[%w_]') - if key == '' then - self:error("expected a key=value property or a closing '%s'", ket) - end - if res[key] then self:error('duplicate key: %s', key) end - if not spec.parse[key] then self:error('unexpected key: "%s"', key) end - self:skip_whitespace() - self:consume('=') - self:skip_whitespace() - local val = spec.parse[key](self) - res[key] = val - - -- Key-value pairs are separated by newlines or commas, and - -- terminated by the ket. A trailing comma is optional. - local line = self.line - self:skip_whitespace() - local has_comma = self:check(',') - if has_comma then self:skip_whitespace() end - if self:check(ket) then break end - if not has_comma and self.line == line then - self:error('properties should be separated by commas or newlines') - end - end - for k, default in pairs(spec.defaults) do - if res[k] == nil then res[k] = default(res) end - end - spec.validate(self, res) - return res -end - --- Returns a uint8_t[4]. -function Parser:parse_ipv4() - local addr_string = self:take_while('[%d.]') - if not addr_string or #addr_string == 0 then - self:error("IPv4 address expected") - end - local addr, err = ipv4:pton(addr_string) - if not addr then self:error('%s', err) end - return addr -end - -function Parser:parse_ipv4_as_uint32() - local addr = self:parse_ipv4() - return ffi.C.htonl(ffi.cast('uint32_t*', addr)[0]) -end - --- Returns a uint8_t[16]. -function Parser:parse_ipv6() - local addr_string = self:take_while('[%x:]') - if not addr_string or #addr_string == 0 then - self:error("IPv6 address expected") - end - local addr, err = ipv6:pton(addr_string) - if not addr then self:error('%s', err) end - return addr -end - --- Returns a uint8_t[6]. -function Parser:parse_mac() - local addr_string = self:take_while('[%x:]') - if not addr_string or #addr_string == 0 then - self:error("Ethernet MAC address expected") - end - -- FIXME: Unlike ipv6:pton, ethernet:pton raises an error if the - -- address is invalid. - local success, addr_or_err = pcall(ethernet.pton, ethernet, addr_string) - if not success then self:error('%s', addr_or_err) end - return addr_or_err -end - -function Parser:parse_ipv4_range() - local range_begin, range_end - range_begin = self:parse_ipv4_as_uint32() - self:skip_whitespace() - if self:check('-') then - self:skip_whitespace() - range_end = self:parse_ipv4_as_uint32() - else - range_end = range_begin - end - if range_end < range_begin then - self:error('invalid IPv4 address range (end before begin)') - end - return { min=range_begin, max=range_end } -end - -function Parser:parse_ipv4_range_list() - local ranges = {} - repeat - self:skip_whitespace() - table.insert(ranges, self:parse_ipv4_range()) - self:skip_whitespace() - until not self:check(',') - return ranges -end - -function Parser:parse_quoted_string(quote, escape) - local res = {} - escape = escape or '\\' - while not self:check(quote) do - local ch = self:next() - if ch == escape then ch = self:next() end - if not ch then self:error('EOF while reading quoted string') end - table.insert(res, ch) - end - return table.concat(res) -end - -function Parser:parse_string() - local str - if self:check("'") then str = self:parse_quoted_string("'") - elseif self:check('"') then str = self:parse_quoted_string('"') - else str = self:take_while('[^%s,]') end - return str -end - -function Parser:make_path(orig_path) - if orig_path == '' then self:error('file name is empty') end - if not orig_path:match('^/') and self.name then - -- Relative paths in conf files are relative to the location of the - -- conf file, not the current working directory. - return lib.dirname(self.name)..'/'..orig_path - end - return orig_path -end - -function Parser:parse_file_name() - return self:make_path(self:parse_string()) -end - -function Parser:parse_string_or_file() - local str = self:parse_string() - if not str:match('^<') then - return str - end - -- Remove the angle bracket. - local path = self:make_path(str:sub(2)) - local filter, err = lib.readfile(path, "*a") - if filter == nil then - self:error('cannot read filter conf file "%s": %s', path, err) - end - return filter -end - -function Parser:parse_boolean() - local tok = self:take_while('[%a]') - if tok:lower() == 'true' then return true end - if tok:lower() == 'false' then return false end - self:error('expected "true" or "false", instead got "%s"', tok) -end - -function Parser:parse_number() - local tok = self:take_while('[%d.eExX]') - local num = tonumber(tok) - if not num then self:error('expected a number, instead got "%s"', tok) end - return num -end - -function Parser:parse_positive_number() - local num = self:parse_number() - if num <= 0 then - self:error('expected a positive number, instead got %s', - tostring(num)) - end - return num -end - -function Parser:parse_non_negative_number() - local num = self:parse_number() - if num < 0 then - self:error('expected a non-negative number, instead got %s', - tostring(num)) - end - return num -end - -function Parser:parse_mtu() - return self:parse_uint(0,2^16-1) -end - -function Parser:parse_psid() - return self:parse_uint(0,2^16-1) -end - -function Parser.enum_parser(enums) - return function(self) - local tok = self:parse_string() - for k,v in pairs(enums) do - if k:lower() == tok:lower() then return v end - end - -- Not found; make a nice error. - local keys = {} - for k,_ in pairs(enums) do table.insert(keys, k) end - keys = table.concat(keys, ', ') - self:error('bad value: "%s". expected one of %s', tok, keys) - end -end - -function Parser:parse_vlan_tag() - return self:parse_uint(0,2^12-1) -end diff --git a/src/program/lwaftr/migrate_configuration/migrate_configuration.lua b/src/program/lwaftr/migrate_configuration/migrate_configuration.lua deleted file mode 100644 index a960c936c8..0000000000 --- a/src/program/lwaftr/migrate_configuration/migrate_configuration.lua +++ /dev/null @@ -1,664 +0,0 @@ -module(..., package.seeall) - -local lib = require('core.lib') -local ffi = require("ffi") -local ipv4 = require("lib.protocol.ipv4") -local rangemap = require("apps.lwaftr.rangemap") -local ctable = require("lib.ctable") -local cltable = require('lib.cltable') -local mem = require('lib.stream.mem') -local util = require('lib.yang.util') -local yang = require('lib.yang.yang') -local binding_table = require("apps.lwaftr.binding_table") -local Parser = require("program.lwaftr.migrate_configuration.conf_parser").Parser -local data = require('lib.yang.data') -local schema = require('lib.yang.schema') - -local br_address_t = ffi.typeof('uint8_t[16]') -local SOFTWIRE_TABLE_LOAD_FACTOR = 0.4 - -local function show_usage(code) - print(require("program.lwaftr.migrate_configuration.README_inc")) - main.exit(code) -end - -local function parse_args(args) - local handlers = {} - local version = 'legacy' - function handlers.h() show_usage(0) end - function handlers.f(v) version = string.lower(v) end - args = lib.dogetopt(args, handlers, "hf:", { help="h", from="f" }) - if #args ~= 1 then show_usage(1) end - return args[1], version -end - -local policies = { - DROP = 1, - ALLOW = 2 -} - -local function required(key) - return function(config) - error('missing required configuration key "'..key..'"') - end -end - -local function required_if(key, otherkey) - return function(config) - if config[otherkey] then - error('missing required configuration key "'..key..'"') - end - end -end - -local function required_at_least_one_of(key, otherkey) - return function(config) - if config[otherkey] == nil then - error(string.format("At least one of '%s' and '%s' must be specified", key, otherkey)) - end - end -end - -local function default(val) - return function(config) return val end -end - -local lwaftr_conf_spec = { - parse={ - aftr_ipv4_ip=Parser.parse_ipv4, - aftr_ipv6_ip=Parser.parse_ipv6, - aftr_mac_b4_side=Parser.parse_mac, - aftr_mac_inet_side=Parser.parse_mac, - next_hop6_mac=Parser.parse_mac, - binding_table=Parser.parse_file_name, - hairpinning=Parser.parse_boolean, - icmpv4_rate_limiter_n_packets=Parser.parse_non_negative_number, - icmpv4_rate_limiter_n_seconds=Parser.parse_positive_number, - icmpv6_rate_limiter_n_packets=Parser.parse_non_negative_number, - icmpv6_rate_limiter_n_seconds=Parser.parse_positive_number, - inet_mac=Parser.parse_mac, - ipv4_mtu=Parser.parse_mtu, - ipv6_mtu=Parser.parse_mtu, - max_fragments_per_reassembly_packet=Parser.parse_positive_number, - max_ipv4_reassembly_packets=Parser.parse_positive_number, - max_ipv6_reassembly_packets=Parser.parse_positive_number, - next_hop_ipv4_addr=Parser.parse_ipv4, - next_hop_ipv6_addr=Parser.parse_ipv6, - policy_icmpv4_incoming=Parser.enum_parser(policies), - policy_icmpv4_outgoing=Parser.enum_parser(policies), - policy_icmpv6_incoming=Parser.enum_parser(policies), - policy_icmpv6_outgoing=Parser.enum_parser(policies), - v4_vlan_tag=Parser.parse_vlan_tag, - v6_vlan_tag=Parser.parse_vlan_tag, - vlan_tagging=Parser.parse_boolean, - ipv4_ingress_filter=Parser.parse_string_or_file, - ipv4_egress_filter=Parser.parse_string_or_file, - ipv6_ingress_filter=Parser.parse_string_or_file, - ipv6_egress_filter=Parser.parse_string_or_file, - }, - defaults={ - aftr_ipv4_ip=required('aftr_ipv4_ip'), - aftr_ipv6_ip=required('aftr_ipv6_ip'), - aftr_mac_b4_side=required('aftr_mac_b4_side'), - aftr_mac_inet_side=required('aftr_mac_inet_side'), - next_hop6_mac=required_at_least_one_of('next_hop6_mac', 'next_hop_ipv6_addr'), - binding_table=required('binding_table'), - hairpinning=default(true), - icmpv4_rate_limiter_n_packets=default(6e5), - icmpv4_rate_limiter_n_seconds=default(2), - icmpv6_rate_limiter_n_packets=default(6e5), - icmpv6_rate_limiter_n_seconds=default(2), - inet_mac=required_at_least_one_of('inet_mac', 'next_hop_ipv4_addr'), - ipv4_mtu=default(1460), - ipv6_mtu=default(1500), - max_fragments_per_reassembly_packet=default(40), - max_ipv4_reassembly_packets=default(20000), -- Just under 500 megs memory - max_ipv6_reassembly_packets=default(20000), -- Just under 500 megs memory - next_hop_ipv4_addr = required_at_least_one_of('next_hop_ipv4_addr', 'inet_mac'), - next_hop_ipv6_addr = required_at_least_one_of('next_hop_ipv6_addr', 'next_hop6_mac'), - policy_icmpv4_incoming=default(policies.ALLOW), - policy_icmpv4_outgoing=default(policies.ALLOW), - policy_icmpv6_incoming=default(policies.ALLOW), - policy_icmpv6_outgoing=default(policies.ALLOW), - v4_vlan_tag=required_if('v4_vlan_tag', 'vlan_tagging'), - v6_vlan_tag=required_if('v6_vlan_tag', 'vlan_tagging'), - vlan_tagging=default(false) - }, - validate=function(parser, config) end -} - -local function parse_psid_map(parser) - local psid_info_spec = { - parse={ - psid_length=Parser.parse_psid_param, - shift=Parser.parse_psid_param - }, - defaults={ - psid_length=function(config) return 16 - (config.shift or 16) end, - shift=function(config) return 16 - (config.psid_length or 0) end - }, - validate=function(parser, config) - if config.psid_length + config.shift > 16 then - parser:error('psid_length %d + shift %d should not exceed 16', - config.psid_length, config.shift) - end - end - } - - local builder = rangemap.RangeMapBuilder.new(binding_table.psid_map_value_t) - local value = binding_table.psid_map_value_t() - parser:skip_whitespace() - parser:consume_token('[%a_]', 'psid_map') - parser:skip_whitespace() - parser:consume('{') - parser:skip_whitespace() - while not parser:check('}') do - local range_list = parser:parse_ipv4_range_list() - local info = parser:parse_property_list(psid_info_spec, '{', '}') - value.psid_length, value.shift = info.psid_length, info.shift - for _, range in ipairs(range_list) do - builder:add_range(range.min, range.max, value) - end - parser:skip_whitespace() - if parser:check(',') or parser:check(';') then - parser:skip_whitespace() - end - end - return builder:build(binding_table.psid_map_value_t()) -end - -local function parse_br_addresses(parser) - local addresses = {} - parser:skip_whitespace() - parser:consume_token('[%a_]', 'br_addresses') - parser:skip_whitespace() - parser:consume('{') - parser:skip_whitespace() - while not parser:check('}') do - table.insert(addresses, parser:parse_ipv6()) - parser:skip_whitespace() - if parser:check(',') then parser:skip_whitespace() end - end - local ret = util.ffi_array(ffi.new(ffi.typeof('$[?]', br_address_t), - #addresses), - br_address_t, #addresses) - for i, addr in ipairs(addresses) do ret[i] = addr end - return ret -end - -local function parse_softwires(parser, psid_map, br_address_count) - local function required(key) - return function(config) - error('missing required configuration key "'..key..'"') - end - end - local softwire_spec = { - parse={ - ipv4=Parser.parse_ipv4_as_uint32, - psid=Parser.parse_psid, - b4=Parser.parse_ipv6, - aftr=Parser.parse_non_negative_number - }, - defaults={ - ipv4=required('ipv4'), - psid=function(config) return 0 end, - b4=required('b4'), - aftr=function(config) return 0 end - }, - validate=function(parser, config) - local psid_length = psid_map:lookup(config.ipv4).value.psid_length - if config.psid >= 2^psid_length then - parser:error('psid %d out of range for IP', config.psid) - end - if config.aftr >= br_address_count then - parser:error('only %d br addresses are defined', br_address_count) - end - end - } - - local softwire_key_t = ffi.typeof[[ - struct { - uint32_t ipv4; // Public IPv4 address of this softwire (host-endian). - uint16_t padding; // Zeroes. - uint16_t psid; // Port set ID. - } __attribute__((packed)) - ]] - -- FIXME: Pull this type from the yang model, not out of thin air. - local softwire_value_t = ffi.typeof[[ - struct { - uint8_t b4_ipv6[16]; // Address of B4. - uint32_t br; // Which border router (lwAFTR IPv6 address)? - } __attribute__((packed)) - ]] - local map = ctable.new( - { key_type = softwire_key_t, value_type = softwire_value_t }) - local key, value = softwire_key_t(), softwire_value_t() - parser:skip_whitespace() - parser:consume_token('[%a_]', 'softwires') - parser:skip_whitespace() - parser:consume('{') - parser:skip_whitespace() - while not parser:check('}') do - local entry = parser:parse_property_list(softwire_spec, '{', '}') - key.ipv4, key.psid = entry.ipv4, entry.psid - value.br, value.b4_ipv6 = entry.aftr, entry.b4 - local success = pcall(map.add, map, key, value) - if not success then - parser:error('duplicate softwire for ipv4=%s, psid=%d', - lwdebug.format_ipv4(key.ipv4), key.psid) - end - parser:skip_whitespace() - if parser:check(',') then parser:skip_whitespace() end - end - map:resize(map.size / SOFTWIRE_TABLE_LOAD_FACTOR) - return map -end - -local function parse_binding_table(parser) - local psid_map = parse_psid_map(parser) - local br_addresses = parse_br_addresses(parser) - local softwires = parse_softwires(parser, psid_map, #br_addresses) - parser:skip_whitespace() - parser:consume(nil) - return { psid_map = psid_map, - br_addresses = br_addresses, - softwires = softwires } -end - -function load_binding_table(filename) - return parse_binding_table(Parser.new(filename)) -end - - -local function config_to_string(schema, conf) - if type(schema) == "string" then - schema = yang.load_schema_by_name(schema) - end - return mem.call_with_output_string( - yang.print_config_for_schema, schema, conf) -end - - -local function migrate_conf(old) - function convert_ipv4(addr) - if addr then return util.ipv4_pton(ipv4:ntop(addr)) end - end - local external = { - ip = convert_ipv4(old.aftr_ipv4_ip), - mac = old.aftr_mac_inet_side, - mtu = old.ipv4_mtu, - ingress_filter = old.ipv4_ingress_filter, - egress_filter = old.ipv4_egress_filter, - allow_incoming_icmp = old.policy_icmpv4_incoming == policies.ALLOW, - generate_icmp_errors = old.policy_icmpv4_outgoing == policies.ALLOW, - vlan_tag = old.v4_vlan_tag, - error_rate_limiting = { - packets = old.icmpv4_rate_limiter_n_packets, - period = old.icmpv4_rate_limiter_n_seconds - }, - reassembly = { - max_fragments_per_packet = old.max_fragments_per_reassembly_packet, - max_packets = old.max_ipv4_reassembly_packets - }, - next_hop = { - ip = convert_ipv4(old.next_hop_ipv4_addr), - mac = old.inet_mac - } - } - - local internal = { - ip = old.aftr_ipv6_ip, - mac = old.aftr_mac_b4_side, - mtu = old.ipv6_mtu, - ingress_filter = old.ipv6_ingress_filter, - egress_filter = old.ipv6_egress_filter, - allow_incoming_icmp = old.policy_icmpv6_incoming == policies.ALLOW, - generate_icmp_errors = old.policy_icmpv6_outgoing == policies.ALLOW, - vlan_tag = old.v6_vlan_tag, - error_rate_limiting = { - packets = old.icmpv6_rate_limiter_n_packets, - period = old.icmpv6_rate_limiter_n_seconds - }, - reassembly = { - max_fragments_per_packet = old.max_fragments_per_reassembly_packet, - max_packets = old.max_ipv6_reassembly_packets - }, - next_hop = { - ip = old.next_hop_ipv6_addr, - mac = old.next_hop6_mac - }, - hairpinning = old.hairpinning - } - - local old_bt = load_binding_table(old.binding_table) - local psid_key_t = ffi.typeof('struct { uint32_t addr; }') - local psid_map = cltable.new({ key_type = psid_key_t }) - for addr, end_addr, params in old_bt.psid_map:iterate() do - local reserved_ports_bit_count = 16 - params.psid_length - params.shift - if end_addr == addr then end_addr = nil end - if reserved_ports_bit_count ~= 16 then - psid_map[psid_key_t(addr)] = { - end_addr = end_addr, - psid_length = params.psid_length, - shift = params.shift, - reserved_ports_bit_count = reserved_ports_bit_count - } - end - end - - -- Build a version of snabb-softwire-v1 with a 0-based index so increment_br - -- does the correct thing. - local schema = yang.load_schema_by_name("snabb-softwire-v1") - local bt = schema.body["softwire-config"].body["binding-table"].body - bt.softwire.body.br.default = "0" - return config_to_string(schema, { - softwire_config = { - external_interface = external, - internal_interface = internal, - binding_table = { - psid_map = psid_map, - br_address = old_bt.br_addresses, - softwire = old_bt.softwires - } - } - }) -end - -local function increment_br(conf) - for entry in conf.softwire_config.binding_table.softwire:iterate() do - -- Sadly it's not easy to make an updater that always works for - -- the indexing change, because changing the default from 0 to 1 - -- makes it ambiguous whether a "br" value of 1 comes from the new - -- default, or was present as such in the old configuration. Sad. - if entry.value.br ~= 1 then - entry.value.br = entry.value.br + 1 - end - end - if #conf.softwire_config.binding_table.br_address > 1 then - io.stderr:write('Migrator unable to tell whether br=1 entries are '.. - 'due to new default or old setting; manual '.. - 'verification needed.\n') - io.stderr:flush() - end - return config_to_string('snabb-softwire-v1', conf) -end - -local function remove_address_list(conf) - local bt = conf.softwire_config.binding_table - for key, entry in cltable.pairs(bt.softwire) do - local br = entry.br or 1 - entry.br_address = assert(bt.br_address[br]) - entry.br = nil - end - return conf -end - -local function remove_psid_map(conf) - -- We're actually going to load the psidmap in the schema so ranges can easily be - -- looked up. With support of end-addr simply trying to lookup by addr will fail. - -- Luckily this is the last time this should bother us hopefully. - local function load_range_map(conf) - local rangemap = require("apps.lwaftr.rangemap") - local psid_map_value_t = binding_table.psid_map_value_t - - -- This has largely been taken from the binding_table.lua at 3db2896 - -- however it only builds the psidmap and not the entire binding table. - local psid_builder = rangemap.RangeMapBuilder.new(psid_map_value_t) - local psid_value = psid_map_value_t() - for k, v in cltable.pairs(conf.psid_map) do - local psid_length, shift = v.psid_length, v.shift - shift = shift or 16 - psid_length - (v.reserved_ports_bit_count or 0) - assert(psid_length + shift <= 16, - 'psid_length '..psid_length..' + shift '..shift.. - ' should not exceed 16') - psid_value.psid_length, psid_value.shift = psid_length, shift - psid_builder:add_range(k.addr, v.end_addr or k.addr, psid_value) - end - return psid_builder:build(psid_map_value_t()) - end - - local psid_map = load_range_map(conf.softwire_config.binding_table) - - -- Remove the psid-map and add it to the softwire. - local bt = conf.softwire_config.binding_table - for key, entry in cltable.pairs(bt.softwire) do - -- Find the port set for the ipv4 address - local port_set = psid_map:lookup(key.ipv4) - assert(port_set, "Unable to migrate conf: softwire without psidmapping") - - -- Add the psidmapping to the softwire - local shift, length = port_set.value.shift, port_set.value.psid_length - entry.port_set = { - psid_length=length, - reserved_ports_bit_count=(16 - shift - length) - } - end - - return conf -end - -local function v3_migration(src, conf_file) - local v2_schema = yang.load_schema_by_name("snabb-softwire-v2") - local v3_schema = yang.load_schema_by_name("snabb-softwire-v3") - local conf = yang.load_config_for_schema( - v2_schema, mem.open_input_string(src, conf_file)) - - -- Move leaf external-interface/device up as external-device. - for device, instance in pairs(conf.softwire_config.instance) do - for id, queue in pairs(instance.queue) do - if queue.external_interface.device then - if instance.external_device then - io.stderr:write('Multiple external devices detected; '.. - 'manual verification needed.\n') - io.stderr:flush() - end - instance.external_device = queue.external_interface.device - queue.external_interface.device = nil - end - end - end - - return config_to_string(v3_schema, conf) -end - -local function multiprocess_migration(src, conf_file) - local device = "IPv6 PCI Address" - local ex_device = "IPv4 PCI address" - - -- We should build up a hybrid schema from parts of v1 and v2. - local v1_schema = yang.load_schema_by_name("snabb-softwire-v1") - -- Make sure we load a fresh schema, as not to mutate a memoized copy - local hybridscm = schema.load_schema(schema.load_schema_source_by_name("snabb-softwire-v2")) - local v1_external = v1_schema.body["softwire-config"].body["external-interface"] - local v1_internal = v1_schema.body["softwire-config"].body["internal-interface"] - local external = hybridscm.body["softwire-config"].body["external-interface"] - local internal = hybridscm.body["softwire-config"].body["internal-interface"] - local queue = hybridscm.body["softwire-config"].body.instance.body.queue - - -- Remove the mandatory requirements - queue.body["external-interface"].body.ip.mandatory = false - queue.body["external-interface"].body.mac.mandatory = false - queue.body["external-interface"].body["next-hop"].mandatory = false - queue.body["internal-interface"].body.ip.mandatory = false - queue.body["internal-interface"].body.mac.mandatory = false - queue.body["internal-interface"].body["next-hop"].mandatory = false - - hybridscm.body["softwire-config"].body["external-interface"] = v1_external - hybridscm.body["softwire-config"].body["internal-interface"] = v1_internal - - -- Extract the grammar, load the config and find the key - local hybridgmr = data.config_grammar_from_schema(hybridscm) - local instgmr = hybridgmr.members["softwire-config"].members.instance - local conf = yang.load_config_for_schema( - hybridscm, mem.open_input_string(src, conf_file)) - local queue_key = ffi.typeof(instgmr.values.queue.key_ctype) - local global_external_if = conf.softwire_config.external_interface - local global_internal_if = conf.softwire_config.internal_interface - -- If there is a external device listed we should include that too. - - - -- Build up the instance list - local instance = { - [device] = {queue={}}, - } - local key = 0 - local value = { - external_interface = { - device = ex_device, - ip = conf.softwire_config.external_interface.ip, - mac = conf.softwire_config.external_interface.mac, - next_hop = {}, - vlan_tag = conf.softwire_config.external_interface.vlan_tag - }, - internal_interface = { - ip = conf.softwire_config.internal_interface.ip, - mac = conf.softwire_config.internal_interface.mac, - next_hop = {}, - vlan_tag = conf.softwire_config.internal_interface.vlan_tag - } - } - - -- Add the list to the config - if global_external_if.next_hop.mac then - value.external_interface.next_hop.mac = global_external_if.next_hop.mac - elseif global_external_if.next_hop.ip then - value.external_interface.next_hop.ip = global_external_if.next_hop.ip - else - error("One or both of next-hop values must be provided.") - end - - if global_internal_if.next_hop.mac then - value.internal_interface.next_hop.mac = global_internal_if.next_hop.mac - elseif global_internal_if.next_hop.ip then - value.internal_interface.next_hop.ip = global_internal_if.next_hop.ip - else - error("One or both of next-hop values must be provided.") - end - instance[device].queue[key] = value - conf.softwire_config.instance = instance - - -- Remove the fields which no longer should exist - conf.softwire_config.internal_interface.ip = nil - conf.softwire_config.internal_interface.mac = nil - conf.softwire_config.internal_interface.next_hop = nil - conf.softwire_config.internal_interface.vlan_tag = nil - conf.softwire_config.external_interface.ip = nil - conf.softwire_config.external_interface.mac = nil - conf.softwire_config.external_interface.next_hop = nil - conf.softwire_config.external_interface.vlan_tag = nil - - return config_to_string(hybridscm, conf) -end - -local function v2_migration(src, conf_file) - -- Lets create a custom schema programmatically as an intermediary so we can - -- switch over to v2 of snabb-softwire config. - local v1_schema = yang.load_schema_by_name("snabb-softwire-v1") - local v1_binding_table = v1_schema.body["softwire-config"].body["binding-table"] - - -- Make sure we load a fresh schema, as not to mutate a memoized copy - local hybridscm = schema.load_schema(schema.load_schema_source_by_name("snabb-softwire-v2")) - local binding_table = hybridscm.body["softwire-config"].body["binding-table"] - - -- Add the schema from v1 that we need to convert them. - binding_table.body["br-address"] = v1_binding_table.body["br-address"] - binding_table.body["psid-map"] = v1_binding_table.body["psid-map"] - binding_table.body.softwire.body.br = v1_binding_table.body.softwire.body.br - binding_table.body.softwire.body.padding = v1_binding_table.body.softwire.body.padding - - -- Add the external and internal interfaces - local hybridconfig = hybridscm.body["softwire-config"] - local v1config = v1_schema.body["softwire-config"] - hybridconfig.body["external-interface"] = v1config.body["external-interface"] - hybridconfig.body["internal-interface"] = v1config.body["internal-interface"] - - -- Remove the mandatory requirement on softwire.br-address for the migration - binding_table.body["softwire"].body["br-address"].mandatory = false - - -- Remove the mandatory requirement on softwire.port-set.psid-length for the migration - binding_table.body["softwire"].body["port-set"].body["psid-length"].mandatory = false - - local conf = yang.load_config_for_schema( - hybridscm, mem.open_input_string(src, conf_file)) - - -- Remove the br-address leaf-list and add it onto the softwire. - conf = remove_address_list(conf) - conf.softwire_config.binding_table.br_address = nil - - -- Remove the psid-map and add it to the softwire. - conf = remove_psid_map(conf) - conf.softwire_config.binding_table.psid_map = nil - - return config_to_string(hybridscm, conf) -end - -local function migrate_legacy(stream) - local conf = Parser.new(stream):parse_property_list(lwaftr_conf_spec) - conf = migrate_conf(conf) - return conf -end - - -local function migrate_3_0_1(conf_file, src) - if src:sub(0, 15) == "softwire-config" then - return src - else - return "softwire-config { "..src.." }" - end -end - -local function migrate_3_0_1bis(conf_file, src) - return increment_br( - yang.load_config_for_schema_by_name( - 'snabb-softwire-v1', mem.open_input_string(src, conf_file))) -end - -local function migrate_3_2_0(conf_file, src) - return v2_migration(src, conf_file) -end - -local function migrate_2017_07_01(conf_file, src) - return multiprocess_migration(src, conf_file) -end - -local function migrate_2022_01_19(conf_file, src) - return v3_migration(src, conf_file) -end - - -local migrations = { - {version='legacy', migrator=migrate_legacy}, - {version='3.0.1', migrator=migrate_3_0_1}, - {version='3.0.1.1', migrator=migrate_3_0_1bis}, - {version='3.2.0', migrator=migrate_3_2_0}, - {version='2017.07.01',migrator=migrate_2017_07_01}, - {version='2022.01.19',migrator=migrate_2022_01_19}, -} - - -function run(args) - local conf_file, version = parse_args(args) - - -- Iterate over migrations until we've found the - local start - for id, migration in pairs(migrations) do - if migration.version == version then - start = id - 1 - end - end - if start == nil then - io.stderr:write("error: unknown version: "..version.."\n") - show_usage(1) - end - - local conf = io.open(conf_file, "r"):read("*a") - for _, migration in next,migrations,start do - io.stderr:write(("-> %s migration\n"):format(migration.version)) - conf = migration.migrator(conf_file, conf) - -- Prompt the garbage collection to do a full collect after each migration - collectgarbage() - end - - print(conf) - main.exit(0) -end diff --git a/src/program/lwaftr/run/run.lua b/src/program/lwaftr/run/run.lua index 20cb2fbae4..49a9577047 100644 --- a/src/program/lwaftr/run/run.lua +++ b/src/program/lwaftr/run/run.lua @@ -7,7 +7,6 @@ local csv_stats = require("program.lwaftr.csv_stats") local ethernet = require("lib.protocol.ethernet") local lib = require("core.lib") local setup = require("program.lwaftr.setup") -local cltable = require("lib.cltable") local ingress_drop_monitor = require("lib.timers.ingress_drop_monitor") local lwutil = require("apps.lwaftr.lwutil") local engine = require("core.app") diff --git a/src/program/lwaftr/setup.lua b/src/program/lwaftr/setup.lua index f565ce87f7..298224849d 100644 --- a/src/program/lwaftr/setup.lua +++ b/src/program/lwaftr/setup.lua @@ -19,7 +19,6 @@ local ipv6_reassemble = require("apps.ipv6.reassemble") local ndp = require("apps.lwaftr.ndp") local vlan = require("apps.vlan.vlan") local pci = require("lib.hardware.pci") -local cltable = require("lib.cltable") local ipv4 = require("lib.protocol.ipv4") local ipv6 = require("lib.protocol.ipv6") local ethernet = require("lib.protocol.ethernet") diff --git a/src/program/lwaftr/tests/config-migrations/selftest.sh b/src/program/lwaftr/tests/config-migrations/selftest.sh deleted file mode 100755 index 4ac6f1f575..0000000000 --- a/src/program/lwaftr/tests/config-migrations/selftest.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# Attempt to migration from legacy to latest -LEGACY_OUT=`./snabb lwaftr migrate-configuration -f legacy \ - program/lwaftr/tests/configdata/legacy.conf` - -if [[ "$?" -ne "0" ]]; then - echo "Legacy configuration migration failed (status code != 0)" - echo "$LEGACY_OUT" - exit 1 -fi - - -# Attempt to migrate part way through the chain -V320_OUT=`./snabb lwaftr migrate-configuration -f 3.2.0 \ - program/lwaftr/tests/configdata/3.2.0.conf` - -if [[ "$?" -ne "0" ]]; then - echo "3.2.0 configuration migration failed (status code != 0)" - echo "$V320_OUT" - exit 1 -fi diff --git a/src/program/lwaftr/tests/propbased/genyang.lua b/src/program/lwaftr/tests/propbased/genyang.lua index 7459516456..8491042a06 100644 --- a/src/program/lwaftr/tests/propbased/genyang.lua +++ b/src/program/lwaftr/tests/propbased/genyang.lua @@ -326,7 +326,7 @@ local function data_generator_from_grammar(production, generate_invalid) for k,v in pairs(t) do ret[k]=v end return ret end - function handlers.table(keyword, production) + function handlers.list(keyword, production) local keys = {} for k,v in pairs(production.keys) do keys[k] = shallow_copy(v) @@ -398,7 +398,7 @@ local function path_generator_from_grammar(production, generate_invalid) return head..'[position()='..math.random(1,100)..']' end end - function handlers.table(keyword, production) + function handlers.list(keyword, production) local keys, values, gen_key, gen_tail = {}, {}, {}, {} for k,v in pairs(production.keys) do table.insert(keys, k) @@ -534,7 +534,7 @@ function selftest() local val = value_from_type({ primitive_type="binary", range={ value={} }}) local cmd = string.format("echo \"%s\" | base64 -d > /dev/null", val) - assert(os.execute(cmd) == 0, string.format("test value: %s", val)) + assert(os.execute(cmd), string.format("test value: %s", val)) end print('selftest: ok') end