1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
|
--
-- (C) 2017-22 - ntop.org
--
dirs = ntop.getDirs()
package.path = dirs.installdir .. "/scripts/lua/modules/?/init.lua;" .. package.path
local ntop_info = ntop.getInfo()
local os_utils = require "os_utils"
local host_pools_nedge = {}
host_pools_nedge.DEFAULT_POOL_ID = "0"
host_pools_nedge.DEFAULT_ROUTING_POLICY_ID = "1"
host_pools_nedge.FIRST_AVAILABLE_POOL_ID = "2" -- 0 is the default, 1 is the jail
host_pools_nedge.DEFAULT_POOL_NAME = "Not Assigned"
host_pools_nedge.MAX_NUM_POOLS = 128 -- Note: keep in sync with C
--
-- BEGIN NEDGE specific code
--
function host_pools_nedge.usernameToPoolId(username)
local res = ntop.getPref("ntopng.user."..string.lower(username)..".host_pool_id")
return ternary(not isEmptyString(res), res, nil)
end
function host_pools_nedge.poolIdToUsername(pool_id)
local ifid = getInterfaceId(ifname) -- in nEdge this always takes one interface
return host_pools_nedge.getPoolName(pool_id)
end
function host_pools_nedge.getUserUrl(pool_id)
return ntop.getHttpPrefix() .."/lua/pro/nedge/admin/nf_edit_user.lua?username=" ..
ternary(tostring(pool_id) == host_pools_nedge.DEFAULT_POOL_ID, "", host_pools_nedge.poolIdToUsername(pool_id))
end
--
-- END NEDGE specific code
--
host_pools_nedge.LIMITED_NUMBER_POOL_MEMBERS = ntop_info["constants.max_num_pool_members"]
-- this takes into account the special pools
host_pools_nedge.LIMITED_NUMBER_TOTAL_HOST_POOLS = ntop_info["constants.max_num_host_pools"]
-- this does not take into account the special pools
host_pools_nedge.LIMITED_NUMBER_USER_HOST_POOLS = host_pools_nedge.LIMITED_NUMBER_TOTAL_HOST_POOLS - 1
local function get_pool_members_key(pool_id)
return "ntopng.prefs.host_pools.members." .. pool_id
end
local function get_pool_ids_key()
return "ntopng.prefs.host_pools.pool_ids"
end
local function get_pool_details_key(pool_id)
return "ntopng.prefs.host_pools.details." .. pool_id
end
local function get_pools_serialized_key(ifid)
return "ntopng.serialized_host_pools.ifid_" .. ifid
end
-- It is safe to call this multiple times
local function initInterfacePools()
host_pools_nedge.createPool(host_pools_nedge.DEFAULT_POOL_ID, host_pools_nedge.DEFAULT_POOL_NAME)
end
function host_pools_nedge.getPoolDetail(pool_id, detail)
local details_key = get_pool_details_key(pool_id)
return ntop.getHashCache(details_key, detail)
end
function host_pools_nedge.setPoolDetail(pool_id, detail, value)
local details_key = get_pool_details_key(pool_id)
return ntop.setHashCache(details_key, detail, tostring(value))
end
local function traceHostPoolEvent(severity, event)
if ntop.getPref("ntopng.prefs.enable_host_pools_log") ~= "1" then
return
end
local f_name = debug.getinfo(2, "n").name
if f_name ~= nil then
f_name = string.format("[%s] ", f_name)
end
traceError(severity, TRACE_CONSOLE, string.format("%s%s", f_name or '', event))
end
local function addMemberToRedisPool(pool_id, member_key)
if pool_id == host_pools_nedge.DEFAULT_POOL_ID then
-- avoid adding default pool members explicitly
traceHostPoolEvent(TRACE_NORMAL,
string.format("Setting DEFAULT_POOL_ID (aka 'Not Assigned'). [pool_id: %d][member: %s]",
host_pools_nedge.DEFAULT_POOL_ID, member_key))
return true
end
local members_key = get_pool_members_key(pool_id)
local n = table.len(ntop.getMembersCache(members_key) or {})
if n >= host_pools_nedge.LIMITED_NUMBER_POOL_MEMBERS then
traceHostPoolEvent(TRACE_ERROR, string.format("Unable to set host pool, maximum number of pool members hit. [max num pool members: %d][member: %s] [members_key: %s]", host_pools_nedge.LIMITED_NUMBER_POOL_MEMBERS, member_key, members_key))
return false
end
ntop.setMembersCache(members_key, member_key)
traceHostPoolEvent(TRACE_NORMAL, string.format("Member added to pool. [member: %s] [members_key: %s]", member_key, members_key))
return true
end
--------------------------------------------------------------------------------
function host_pools_nedge.getPoolMembersRaw(pool_id)
local members_key = get_pool_members_key(pool_id)
return ntop.getMembersCache(members_key) or {}
end
-- Export host pools
function host_pools_nedge.export()
local pools = {}
for _,pool in pairs(host_pools_nedge.getPoolsList()) do
pool.members = host_pools_nedge.getPoolMembersRaw(pool.id)
pools[pool.id] = pool
end
return pools
end
-- Import host pools, in case of conflict (same name) the pool is replaced
function host_pools_nedge.import(pools)
local existing_pools = host_pools_nedge.getPoolsList()
local retval = true
-- Import pools
for _,pool in pairs(pools) do
for k,existing_pool in pairs(existing_pools) do
if pool.name == existing_pool.name then
-- Same name, delete the old pool and reuse the id
pool.id = existing_pool.id
host_pools_nedge.emptyPool(existing_pool.id)
host_pools_nedge.deletePool(existing_pool.id)
end
end
-- Add pool
host_pools_nedge.createPool(pool.id, pool.name, pool.children_safe,
pool.enforce_quotas_per_pool_member, pool. enforce_shapers_per_pool_member,
true)
-- Add members
for _,member in ipairs(pool.members) do
local success = addMemberToRedisPool(pool.id, member)
if not success then
retval = false
end
end
end
return retval
end
--------------------------------------------------------------------------------
function host_pools_nedge.createPool(pool_id, pool_name, children_safe,
enforce_quotas_per_pool_member, enforce_shapers_per_pool_member, ignore_exist)
local details_key = get_pool_details_key(pool_id)
local ids_key = get_pool_ids_key()
local members = ntop.getMembersCache(ids_key) or {}
local n = table.len(members)
if n >= host_pools_nedge.LIMITED_NUMBER_TOTAL_HOST_POOLS then
return false
end
if not ignore_exist then
for _, m in pairs(members) do
if m == pool_id then
return true
end
end
end
ntop.setMembersCache(ids_key, pool_id)
ntop.setHashCache(details_key, "name", pool_name)
ntop.setHashCache(details_key, "children_safe", tostring(children_safe or false))
ntop.setHashCache(details_key, "enforce_quotas_per_pool_member", tostring(enforce_quotas_per_pool_member or false))
ntop.setHashCache(details_key, "enforce_shapers_per_pool_member", tostring(enforce_shapers_per_pool_member or false))
ntop.setHashCache(details_key, "forge_global_dns", "true")
return true
end
function host_pools_nedge.deletePool(pool_id)
local ts_utils = require "ts_utils"
local ids_key = get_pool_ids_key()
local details_key = get_pool_details_key(pool_id)
local members_key = get_pool_members_key(pool_id)
host_pools_nedge.emptyPool(pool_id)
ntop.delMembersCache(ids_key, pool_id)
ntop.delCache(details_key)
ntop.delCache(members_key)
-- Delete serialized values and timeseries across all interfaces
for ifid, ifname in pairs(interface.getIfNames()) do
local serialized_key = get_pools_serialized_key(ifid)
ntop.delHashCache(serialized_key, pool_id)
ts_utils.delete("host_pool", {ifid = tonumber(ifid), pool = pool_id})
end
end
function getMembershipInfo(member_and_vlan)
-- Check if the member is already in another pool
local hostinfo = hostkey2hostinfo(member_and_vlan)
local addr, mask = splitNetworkPrefix(hostinfo["host"])
local vlan = hostinfo["vlan"]
local is_mac = isMacAddress(addr)
if not is_mac then
addr = ntop.networkPrefix(addr, mask)
end
local find_info = interface.findMemberPool(addr, vlan, is_mac)
-- This is the normalized key, which should always be used to refer to the member
local key
if not is_mac then
key = host2member(addr, vlan, mask)
else
key = addr
end
local info = {key=key}
local exists = false
if find_info ~= nil then
-- The host has been found
if is_mac or ((not is_mac)
and (find_info.matched_prefix == addr)
and (find_info.matched_bitmask == mask)) then
info["existing_member_pool"] = find_info.pool_id
exists = true
end
end
return exists, info
end
--
-- Note:
--
-- When strict_host_mode is not set, hosts which have a MAC address will have the
-- MAC address changed instead of the IP address when their MAC address is already bound to
-- a pool.
--
function host_pools_nedge.changeMemberPool(member_and_vlan, new_pool, info --[[optional]], strict_host_mode --[[optional]])
traceHostPoolEvent(TRACE_NORMAL,
string.format("Pool change requested. [member: %s][new_pool: %s][strict_host_mode: %s]",
member_and_vlan, new_pool, tostring(strict_host_mode)))
if not strict_host_mode then
local hostkey, is_network = host_pools_nedge.getMemberKey(member_and_vlan)
if (not is_network) and (not isMacAddress(member_and_vlan)) then
-- this is a single host, try to get the MAC address
if info == nil then
local hostinfo = hostkey2hostinfo(hostkey)
info = interface.getHostInfo(hostinfo["host"], hostinfo["vlan"])
end
if not isEmptyString(info["mac"]) and (info["mac"] ~= "00:00:00:00:00:00") then
local mac_has_pool, mac_pool_info = getMembershipInfo(info["mac"])
-- Two cases:
-- 1. if we are moving to a well defined pool, we must set the mac pool
-- 2. if we are moving to the default pool, we must set the mac pool only
-- if the mac already has a pool, otherwise we set the ip pool
if (new_pool ~= host_pools_nedge.DEFAULT_POOL_ID) or mac_has_pool then
-- we must change the MAC address in order to change the host pool
member_and_vlan = info["mac"]
end
end
end
end
local member_exists, info = getMembershipInfo(member_and_vlan)
local prev_pool
if member_exists then
-- use the normalized key
member_and_vlan = info.key
prev_pool = info.existing_member_pool
else
prev_pool = host_pools_nedge.DEFAULT_POOL_ID
end
if prev_pool == new_pool then
traceHostPoolEvent(TRACE_ERROR,
string.format("Pool did't change. Exiting. [member: %s][prev_pool: %s][new_pool: %s]",
member_and_vlan, prev_pool, new_pool))
return false
end
traceHostPoolEvent(TRACE_NORMAL,
string.format("Pool change prepared. [member: %s][info.key: %s][prev_pool: %s][new_pool: %s]",
member_and_vlan, tostring(info.key), prev_pool, new_pool))
host_pools_nedge.deletePoolMember(prev_pool, info.key)
addMemberToRedisPool(new_pool, info.key)
return true
end
function host_pools_nedge.addPoolMember(pool_id, member_and_vlan)
traceHostPoolEvent(TRACE_NORMAL,
string.format("Pool member addition requested. [member: %s][pool_id: %s]",
member_and_vlan, pool_id))
local member_exists, info = getMembershipInfo(member_and_vlan)
if member_exists then
traceHostPoolEvent(TRACE_NORMAL, string.format("Member already in pool. [pool_id: %d] [member: %s]", pool_id, member_and_vlan))
return false, info
else
local rv = addMemberToRedisPool(pool_id, info.key)
return rv, info
end
end
function host_pools_nedge.deletePoolMember(pool_id, member_and_vlan)
traceHostPoolEvent(TRACE_NORMAL,
string.format("Pool member deletion requested. [member: %s][pool_id: %s]",
member_and_vlan, pool_id))
local members_key = get_pool_members_key(pool_id)
-- Possible delete non-volatile member
ntop.delMembersCache(members_key, member_and_vlan)
end
function host_pools_nedge.getPoolsList(without_info)
local ids_key = get_pool_ids_key()
local ids = ntop.getMembersCache(ids_key)
if not ids then ids = {} end
for i, id in pairs(ids) do
ids[i] = tonumber(id)
end
local pools = {}
initInterfacePools()
for _, pool_id in pairsByValues(ids, asc) do
pool_id = tostring(pool_id)
local pool
if without_info then
pool = {id=pool_id}
else
pool = {
id = pool_id,
name = host_pools_nedge.getPoolName(pool_id),
children_safe = host_pools_nedge.getChildrenSafe(pool_id),
enforce_quotas_per_pool_member = host_pools_nedge.getEnforceQuotasPerPoolMember(pool_id),
enforce_shapers_per_pool_member = host_pools_nedge.getEnforceShapersPerPoolMember(pool_id),
}
end
pools[#pools + 1] = pool
end
return pools
end
-- Delete a member (IP or Mac) from all pools if any
function host_pools_nedge.deletePoolMemberFromAllPools(member)
for _, pool in pairs(host_pools_nedge.getPoolsList()) do
host_pools_nedge.deletePoolMember(pool.id, member)
end
end
function host_pools_nedge.getPoolMembers(pool_id)
local members_key = get_pool_members_key(pool_id)
local members = {}
local all_members = ntop.getMembersCache(members_key) or {}
for _,v in pairsByValues(all_members, asc) do
local hostinfo = hostkey2hostinfo(v)
members[#members + 1] = {address=hostinfo["host"], vlan=hostinfo["vlan"], key=v}
end
return members
end
function host_pools_nedge.getMemberKey(member)
-- handle vlan
local is_network
local host_key
local address = hostkey2hostinfo(member)["host"]
if isMacAddress(address) then
host_key = address
is_network = false
else
local network, prefix = splitNetworkPrefix(address)
if(((isIPv4(network)) and (prefix ~= 32)) or
((isIPv6(network)) and (prefix ~= 128))) then
-- this is a network
host_key = address
is_network = true
else
-- this is an host
host_key = network
is_network = false
end
end
return host_key, is_network
end
function host_pools_nedge.getPoolName(pool_id)
return host_pools_nedge.getPoolDetail(pool_id, "name")
end
function host_pools_nedge.getChildrenSafe(pool_id)
return toboolean(host_pools_nedge.getPoolDetail(pool_id, "children_safe"))
end
function host_pools_nedge.setChildrenSafe(pool_id, value)
host_pools_nedge.setPoolDetail(pool_id, "children_safe", ternary(value, "true", "false"))
end
function host_pools_nedge.getRoutingPolicyId(pool_id)
local routing_policy_id = host_pools_nedge.getPoolDetail(pool_id, "routing_policy_id")
if isEmptyString(routing_policy_id) then routing_policy_id = host_pools_nedge.DEFAULT_ROUTING_POLICY_ID end
return routing_policy_id
end
function host_pools_nedge.setRoutingPolicyId(pool_id, routing_policy_id)
return host_pools_nedge.setPoolDetail(pool_id, "routing_policy_id", routing_policy_id)
end
function host_pools_nedge.getEnforceQuotasPerPoolMember(pool_id)
return toboolean(host_pools_nedge.getPoolDetail(pool_id, "enforce_quotas_per_pool_member"))
end
function host_pools_nedge.getEnforceShapersPerPoolMember(pool_id)
return toboolean(host_pools_nedge.getPoolDetail(pool_id, "enforce_shapers_per_pool_member"))
end
function host_pools_nedge.emptyPool(pool_id)
local members_key = get_pool_members_key(pool_id)
-- Remove non-volatile members
ntop.delCache(members_key)
end
function host_pools_nedge.emptyPools()
for _, ifname in pairs(interface.getIfNames()) do
local ifid = getInterfaceId(ifname)
local ifstats = interface.getStats()
local pools_list = host_pools_nedge.getPoolsList()
for _, pool in pairs(pools_list) do
host_pools_nedge.emptyPool(pool["id"])
end
end
end
function host_pools_nedge.initPools()
for _, ifname in pairs(interface.getIfNames()) do
local ifid = getInterfaceId(ifname)
local ifstats = interface.getStats()
-- Note: possible shapers are initialized in shaper_utils::initShapers
initInterfacePools()
end
end
function host_pools_nedge.getMacPool(mac_address)
local exists, info = getMembershipInfo(mac_address)
if exists then
return tostring(info.existing_member_pool)
else
return host_pools_nedge.DEFAULT_POOL_ID
end
end
function host_pools_nedge.getUndeletablePools()
local pools = {}
for user_key,_ in pairs(ntop.getKeysCache("ntopng.user.*.host_pool_id") or {}) do
local pool_id = ntop.getCache(user_key)
if tonumber(pool_id) ~= nil then
local username = string.split(user_key, "%.")[3]
local allowed_ifname = ntop.getCache("ntopng.user."..username..".allowed_ifname")
-- verify if the Captive Portal User is actually active for the interface
if getInterfaceName(ifid) == allowed_ifname then
pools[pool_id] = true
end
end
end
return pools
end
function host_pools_nedge.updateRRDs(ifid, dump_ndpi, verbose)
local ts_utils = require "ts_utils"
require "ts_5min"
-- NOTE: requires graph_utils
for pool_id, pool_stats in pairs(interface.getHostPoolsStats() or {}) do
ts_utils.append("host_pool:traffic", {ifid=ifid, pool=pool_id,
bytes_sent=pool_stats["bytes.sent"], bytes_rcvd=pool_stats["bytes.rcvd"]}, when)
if pool_id ~= tonumber(host_pools_nedge.DEFAULT_POOL_ID) then
local flows_dropped = pool_stats["flows.dropped"] or 0
ts_utils.append("host_pool:blocked_flows", {ifid=ifid, pool=pool_id,
num_flows=flows_dropped}, when)
end
-- nDPI stats
if dump_ndpi then
for proto,v in pairs(pool_stats["ndpi"] or {}) do
ts_utils.append("host_pool:ndpi", {ifid=ifid, pool=pool_id, protocol=proto,
bytes_sent=v["bytes.sent"], bytes_rcvd=v["bytes.rcvd"]}, when)
end
end
end
-- Also write info on the number of members per pool, both in terms of hosts and l2 devices
local pools = interface.getHostPoolsInfo() or {}
for pool, info in pairs(pools.num_members_per_pool or {}) do
ts_utils.append("host_pool:hosts", {ifid = ifid, pool = pool, num_hosts = info["num_hosts"]}, when)
ts_utils.append("host_pool:devices", {ifid = ifid, pool = pool, num_devices = info["num_l2_devices"]}, when)
end
end
function host_pools_nedge.hostpool2record(ifid, pool_id, pool)
local record = {}
record["key"] = tostring(pool_id)
local pool_name = host_pools_nedge.getPoolName(pool_id)
local pool_link = "<A HREF='"..ntop.getHttpPrefix()..'/lua/hosts_stats.lua?pool='..pool_id.."' title='"..pool_name.."'>"..pool_name..'</A>'
record["column_id"] = pool_link
record["column_hosts"] = pool["num_hosts"]..""
record["column_since"] = secondsToTime(os.time() - pool["seen.first"] + 1)
record["column_num_dropped_flows"] = (pool["flows.dropped"] or 0)..""
local sent2rcvd = round((pool["bytes.sent"] * 100) / (pool["bytes.sent"] + pool["bytes.rcvd"]), 0)
record["column_breakdown"] = "<div class='progress'><div class='progress-bar bg-warning' style='width: "
.. sent2rcvd .."%;'>Sent</div><div class='progress-bar bg-success' style='width: " .. (100-sent2rcvd) .. "%;'>Rcvd</div></div>"
if(throughput_type == "pps") then
record["column_thpt"] = pktsToSize(pool["throughput_pps"])
else
record["column_thpt"] = bitsToSize(8*pool["throughput_bps"])
end
record["column_traffic"] = bytesToSize(pool["bytes.sent"] + pool["bytes.rcvd"])
record["column_chart"] = ""
if areHostPoolsTimeseriesEnabled(ifid) then
record["column_chart"] = '<A HREF="'..ntop.getHttpPrefix()..'/lua/pool_details.lua?pool='..pool_id..'&page=historical"><i class=\'fas fa-chart-area fa-lg\'></i></A>'
end
return record
end
function host_pools_nedge.printQuotas(pool_id, host, page_params)
local pools_stats = interface.getHostPoolsStats()
local pool_stats = pools_stats and pools_stats[tonumber(pool_id)]
local ndpi_stats = pool_stats.ndpi
local category_stats = pool_stats.ndpi_categories
-- ifId is a global variable here
local quota_and_protos = shaper_utils.getPoolProtoShapers(ifId, pool_id)
local cross_traffic_quota, cross_time_quota = shaper_utils.getCrossApplicationQuotas(ifId, pool_id)
-- Empty check
local empty = (cross_traffic_quota == shaper_utils.NO_QUOTA) and (cross_time_quota == shaper_utils.NO_QUOTA)
if empty then
for _, proto in pairs(quota_and_protos) do
if ((tonumber(proto.traffic_quota) > 0) or (tonumber(proto.time_quota) > 0)) then
-- at least a quota is set
empty = false
break
end
end
end
if empty then
local url = "/lua/pro/nedge/admin/nf_edit_user.lua?page=protocols&username=" .. host_pools_nedge.poolIdToUsername(pool_id)
print("<div class=\"alert alert alert-danger\"><i class='fas fa-exclamation-triangle fa-lg fa-ntopng-warning'></i> "..i18n("shaping.no_quota_data")..
". " .. i18n("host_pools.create_new_quotas_here", {url=ntop.getHttpPrefix()..url}) .. "</div>")
else
print[[
<table class="table table-bordered table-striped">
<thead>
<tr>
<th>]] print(i18n("application")) print[[</th>
<th class="text-center">]] print(i18n("shaping.daily_traffic")) print[[</th>
<th class="text-center">]] print(i18n("shaping.daily_time")) print[[</th>
</tr>
</thead>
<tbody id="pool_quotas_ndpi_tbody">
</tbody>
</table>
<script>
function update_ndpi_table() {
$.ajax({
type: 'GET',
url: ']]
print(getPageUrl(ntop.getHttpPrefix().."/lua/pro/pool_details_ndpi.lua").."', data: ")
print(tableToJsObject(page_params))
print[[,
success: function(content) {
if(content)
$('#pool_quotas_ndpi_tbody').html(content);
else
$('#pool_quotas_ndpi_tbody').html('<tr><td colspan="3"><i>]] print(i18n("shaping.no_quota_traffic")) print[[</i></td></tr>');
}
});
}
setInterval(update_ndpi_table, 5000);
update_ndpi_table();
</script>]]
end
end
function host_pools_nedge.getFirstAvailablePoolId()
local ids_key = get_pool_ids_key()
local ids = ntop.getMembersCache(ids_key) or {}
for i, id in pairs(ids) do
ids[i] = tonumber(id)
end
local host_pool_id = tonumber(host_pools_nedge.FIRST_AVAILABLE_POOL_ID)
for _, pool_id in pairsByValues(ids, asc) do
if pool_id > host_pool_id then
break
end
host_pool_id = math.max(pool_id + 1, host_pool_id)
end
return tostring(host_pool_id)
end
-- @brief Perform migration from the old host pools which were configured per-interface
-- to global host pools which are now system-wide
function host_pools_nedge.migrateHostPools()
-- Migration is only performed when host pools are configured for 1 and only 1 interface (this always cover the case of nEdge)
-- https://github.com/ntop/ntopng/issues/4086
if ntop.getKeysCache(get_pool_ids_key()) then
-- Already migrated
return
end
local host_pools_migration = require "host_pools_migration"
local delete_data_utils = require "delete_data_utils"
local json = require "dkjson"
local migration_ifid
-- The migration is only done when there's 1 active interface
if table.len(interface.getIfNames()) == 1 then
-- Avoid calling interface.getId() as this migration function
-- can be executed too early
for ifid, ifname in pairs(interface.getIfNames()) do
migration_ifid = ifid
break
end
end
-- Do the actual migration
-- table.len >= 1 used to check whether pools different from the default one were present
if migration_ifid and table.len(host_pools_migration.export(migration_ifid)) >= 1 then
-- Call the `new` import with the `old` export which takes ifid as argument
host_pools_nedge.import(host_pools_migration.export(migration_ifid))
traceError(TRACE_WARNING, TRACE_CONSOLE, "Host pools configuration migrated.")
end
for ifid, ifname in pairs(delete_data_utils.list_all_interfaces()) do
if ifid == migration_ifid then
-- Don't delete migrated elements
goto continue
end
local if_pools = host_pools_migration.getPoolsList(ifid)
if table.len(if_pools) <= 1 then
-- Nothing to migrate, only the default pool is present
goto continue
end
-- Copy the pool configuration
local base_dir = os_utils.fixPath(dirs.workingdir .. "/" .. ifid .. "/migration/host_pools/")
ntop.mkdir(base_dir)
local config_filename = os_utils.fixPath(base_dir.."/".."pools_configuration.json")
local config_file = assert(io.open(config_filename, "w"))
config_file:write(json.encode(host_pools_migration.export(ifid)))
config_file:close()
-- Delete data
for _, pool in ipairs(if_pools) do
host_pools_migration.deletePool(ifid, pool.id)
end
-- Print out a message
traceError(TRACE_WARNING, TRACE_CONSOLE, string.format("[%s] host pools configuration backed up to file %s due to major changes. It is possible to take the file and re-import it manually.", ifname, config_filename))
::continue::
end
-- Cleanup old host-pool related keys
local old_keys_pattern = string.format("ntopng.prefs.*.host_pools.*")
local old_keys = ntop.getKeysCache(old_keys_pattern)
for old_key in pairs(old_keys) do
ntop.delCache(old_key)
end
ntop.reloadHostPools()
end
function host_pools_nedge.resetPoolsQuotas(pool_filter)
local serialized_key = get_pools_serialized_key(tostring(interface.getFirstInterfaceId()))
local keys_to_del
if pool_filter ~= nil then
keys_to_del = {[pool_filter]=1, }
else
keys_to_del = ntop.getHashKeysCache(serialized_key) or {}
end
-- Delete the redis serialization
for key in pairs(keys_to_del) do
ntop.delHashCache(serialized_key, tostring(key))
end
-- Delete the in-memory stats
interface.resetPoolsQuotas(pool_filter)
end
-- @brief Performs a daily check and possibly resets host quotas.
-- NOTE: This function must be called one time per day.
function host_pools_nedge.dailyCheckResetPoolsQuotas()
package.path = dirs.installdir .. "/pro/scripts/lua/nedge/modules/system_config/?.lua;" .. package.path
local nf_config = require("nf_config"):create()
local shapers_config = nf_config:getShapersConfig()
local quotas_control = shapers_config.quotas_control
local do_reset = true
if quotas_control.reset == "monthly" then
local day_of_month = os.date("*t").day
if day_of_month ~= 1 --[[ First day of the month --]] then
do_reset = false
end
elseif quotas_control.reset == "weekly" then
local day_of_week = os.date("*t").wday
if day_of_week ~= 2 --[[ Monday --]] then
do_reset = false
end
end
if do_reset then
host_pools_nedge.resetPoolsQuotas()
end
end
host_pools_nedge.traceHostPoolEvent = traceHostPoolEvent
return host_pools_nedge
|