Commit b56b6ba0 authored by Kirill Smelkov's avatar Kirill Smelkov

py2: *: Greek -> Latin

Python2 does not support unicode characters in identifiers.
parent 612a3d0f
...@@ -185,10 +185,10 @@ class Conn: ...@@ -185,10 +185,10 @@ class Conn:
# handle rx timeout ourselves. We cannot rely on global rx timeout # handle rx timeout ourselves. We cannot rely on global rx timeout
# since e.g. other replies might be coming in again and again. # since e.g. other replies might be coming in again and again.
δt = conn._ws.gettimeout() dt = conn._ws.gettimeout()
rxt = nilchan rxt = nilchan
if δt is not None: if dt is not None:
_ = time.Timer(δt) _ = time.Timer(dt)
defer(_.stop) defer(_.stop)
rxt = _.c rxt = _.c
......
...@@ -99,7 +99,7 @@ class _UE: ...@@ -99,7 +99,7 @@ class _UE:
__slots__ = ( __slots__ = (
'erab_flows', # {} erab_id -> _ERAB_Flow current state of all erabs related to UE 'erab_flows', # {} erab_id -> _ERAB_Flow current state of all erabs related to UE
'qci_flows', # {} qci -> _QCI_Flow in-progress collection of UE-related samples 'qci_flows', # {} qci -> _QCI_Flow in-progress collection of UE-related samples
'bitsync', # None | _BitSync to synchronize δtx_bytes with #tx on updates 'bitsync', # None | _BitSync to synchronize dtx_bytes with #tx on updates
) )
# _ERAB_Flow tracks data transmission on particular ERAB of particular UE. # _ERAB_Flow tracks data transmission on particular ERAB of particular UE.
...@@ -111,7 +111,7 @@ class _ERAB_Flow: ...@@ -111,7 +111,7 @@ class _ERAB_Flow:
# _QCI_Flow represents in-progress collection to make up a Sample. # _QCI_Flow represents in-progress collection to make up a Sample.
# #
# .update(δt, tx_bytes, #tx, ...) updates flow with information about next # .update(dt, tx_bytes, #tx, ...) updates flow with information about next
# transmission period and potentially yields some finalized Samples. # transmission period and potentially yields some finalized Samples.
# .finish() completes Sample collection. # .finish() completes Sample collection.
class _QCI_Flow: class _QCI_Flow:
...@@ -121,7 +121,7 @@ class _QCI_Flow: ...@@ -121,7 +121,7 @@ class _QCI_Flow:
'tx_time_err', # accuracy of ^^^ 'tx_time_err', # accuracy of ^^^
) )
# _BitSync helps _Sampler to match δtx_bytes and #tx in transmission updates. # _BitSync helps _Sampler to match dtx_bytes and #tx in transmission updates.
# #
# For example for DL a block is transmitted via PDCCH+PDSCH during one TTI, and # For example for DL a block is transmitted via PDCCH+PDSCH during one TTI, and
# then the base station awaits HARQ ACK/NACK. That ACK/NACK comes later via # then the base station awaits HARQ ACK/NACK. That ACK/NACK comes later via
...@@ -141,13 +141,13 @@ class _QCI_Flow: ...@@ -141,13 +141,13 @@ class _QCI_Flow:
# adjusted stream with #tx corresponding to tx_bytes coming together # adjusted stream with #tx corresponding to tx_bytes coming together
# synchronized in time. # synchronized in time.
# #
# .next(δt, tx_bytes, #tx, X) -> [](δt', tx_bytes', #tx', X') # .next(dt, tx_bytes, #tx, X) -> [](dt', tx_bytes', #tx', X')
# .finish() -> [](δt', tx_bytes', #tx', X') # .finish() -> [](dt', tx_bytes', #tx', X')
# #
# (*) see e.g. Figure 8.1 in "An introduction to LTE, 2nd ed." # (*) see e.g. Figure 8.1 in "An introduction to LTE, 2nd ed."
class _BitSync: class _BitSync:
__slots__ = ( __slots__ = (
'txq', # [](δt,tx_bytes,#tx,X) not-yet fully processed tail of whole txv 'txq', # [](dt,tx_bytes,#tx,X) not-yet fully processed tail of whole txv
'i_txq', # txq represents txv[i_txq:] 'i_txq', # txq represents txv[i_txq:]
'i_lshift', # next left shift will be done on txv[i_lshift] <- txv[i_lshift+1] 'i_lshift', # next left shift will be done on txv[i_lshift] <- txv[i_lshift+1]
) )
...@@ -231,9 +231,9 @@ class _Utx: # transmission state passed through bitsync ...@@ -231,9 +231,9 @@ class _Utx: # transmission state passed through bitsync
@func(_Sampler) @func(_Sampler)
def add(s, ue_stats, stats, init=False): def add(s, ue_stats, stats, init=False):
t = ue_stats['utc'] t = ue_stats['utc']
δt = t - s.t dt = t - s.t
s.t = t s.t = t
assert δt > 0 assert dt > 0
qci_samples = {} # qci -> []Sample samples finalized during this add qci_samples = {} # qci -> []Sample samples finalized during this add
ue_live = set() # of ue ue that are present in ue_stats ue_live = set() # of ue ue that are present in ue_stats
...@@ -256,7 +256,7 @@ def add(s, ue_stats, stats, init=False): ...@@ -256,7 +256,7 @@ def add(s, ue_stats, stats, init=False):
scell = stats['cells'][str(cell_id)] scell = stats['cells'][str(cell_id)]
u = _Utx() u = _Utx()
u.qtx_bytes = {} # qci -> Σδerab_qci=qci u.qtx_bytes = {} # qci -> Sδerab_qci=qci
u.rank = cell['ri'] if s.use_ri else 1 u.rank = cell['ri'] if s.use_ri else 1
u.xl_use_avg = scell['%s_use_avg' % s.dir] u.xl_use_avg = scell['%s_use_avg' % s.dir]
...@@ -265,7 +265,7 @@ def add(s, ue_stats, stats, init=False): ...@@ -265,7 +265,7 @@ def add(s, ue_stats, stats, init=False):
ue = s.ues[ue_id] = _UE(s.use_bitsync) ue = s.ues[ue_id] = _UE(s.use_bitsync)
# erabs: δ(tx_total_bytes) -> tx_bytes ; prepare per-qci tx_bytes # erabs: δ(tx_total_bytes) -> tx_bytes ; prepare per-qci tx_bytes
tx_bytes = 0 # Σδerab tx_bytes = 0 # Sδerab
eflows_live = set() # of erab erabs that are present in ue_stats for this ue eflows_live = set() # of erab erabs that are present in ue_stats for this ue
for erab in ju['erab_list']: for erab in ju['erab_list']:
erab_id = erab['erab_id'] erab_id = erab['erab_id']
...@@ -302,12 +302,12 @@ def add(s, ue_stats, stats, init=False): ...@@ -302,12 +302,12 @@ def add(s, ue_stats, stats, init=False):
if erab_id not in eflows_live: if erab_id not in eflows_live:
del ue.erab_flows[erab_id] del ue.erab_flows[erab_id]
# bitsync <- (δt, tx_bytes, #tx, u) # bitsync <- (dt, tx_bytes, #tx, u)
tx += retx # both transmission and retransmission take time tx += retx # both transmission and retransmission take time
if ue.bitsync is not None: if ue.bitsync is not None:
bitnext = ue.bitsync.next(δt, tx_bytes, tx, u) bitnext = ue.bitsync.next(dt, tx_bytes, tx, u)
else: else:
bitnext = [(δt, tx_bytes, tx, u)] bitnext = [(dt, tx_bytes, tx, u)]
# update qci flows # update qci flows
if init: if init:
...@@ -326,12 +326,12 @@ def add(s, ue_stats, stats, init=False): ...@@ -326,12 +326,12 @@ def add(s, ue_stats, stats, init=False):
return qci_samples return qci_samples
# _update_qci_flows updates .qci_flows for ue with (δt, tx_bytes, #tx, _Utx) yielded from bitsync. # _update_qci_flows updates .qci_flows for ue with (dt, tx_bytes, #tx, _Utx) yielded from bitsync.
# #
# yielded samples are appended to qci_samples ({} qci -> []Sample). # yielded samples are appended to qci_samples ({} qci -> []Sample).
@func(_UE) @func(_UE)
def _update_qci_flows(ue, bitnext, qci_samples): def _update_qci_flows(ue, bitnext, qci_samples):
for (δt, tx_bytes, tx, u) in bitnext: for (dt, tx_bytes, tx, u) in bitnext:
qflows_live = set() # of qci qci flows that get updated from current utx entry qflows_live = set() # of qci qci flows that get updated from current utx entry
# it might happen that even with correct bitsync we could end up with receiving tx=0 here. # it might happen that even with correct bitsync we could end up with receiving tx=0 here.
...@@ -341,10 +341,10 @@ def _update_qci_flows(ue, bitnext, qci_samples): ...@@ -341,10 +341,10 @@ def _update_qci_flows(ue, bitnext, qci_samples):
# <-- finish # <-- finish
# 0 10 # 0 10
# #
# if we see #tx = 0 we say that it might be anything in between 1 and δt. # if we see #tx = 0 we say that it might be anything in between 1 and dt.
tx_lo = tx_hi = tx tx_lo = tx_hi = tx
if tx == 0: if tx == 0:
tx_hi = δt/tti tx_hi = dt/tti
tx_lo = min(1, tx_hi) tx_lo = min(1, tx_hi)
for qci, tx_bytes_qci in u.qtx_bytes.items(): for qci, tx_bytes_qci in u.qtx_bytes.items():
...@@ -382,12 +382,12 @@ def _update_qci_flows(ue, bitnext, qci_samples): ...@@ -382,12 +382,12 @@ def _update_qci_flows(ue, bitnext, qci_samples):
# #
# tx_bytes(x) # tx_bytes(x)
# ───────────·#tx ≤ #tx(x) ≤ #tx # ───────────·#tx ≤ #tx(x) ≤ #tx
# Σtx_bytes # Stx_bytes
qtx_lo = tx_bytes_qci * tx_lo / tx_bytes qtx_lo = tx_bytes_qci * tx_lo / tx_bytes
if qtx_lo > tx_hi: # e.g. 6.6 * 11308 / 11308 = 6.6 + ~1e-15 if qtx_lo > tx_hi: # e.g. 6.6 * 11308 / 11308 = 6.6 + ~1e-15
qtx_lo -= 1e-4 qtx_lo -= 1e-4
assert 0 < qtx_lo <= tx_hi, (qtx_lo, tx_hi, tx_bytes_qci, tx_bytes) assert 0 < qtx_lo <= tx_hi, (qtx_lo, tx_hi, tx_bytes_qci, tx_bytes)
_ = qf.update(δt, tx_bytes_qci, qtx_lo, tx_hi, u.rank, u.xl_use_avg) _ = qf.update(dt, tx_bytes_qci, qtx_lo, tx_hi, u.rank, u.xl_use_avg)
for sample in _: for sample in _:
qci_samples.setdefault(qci, []).append(sample) qci_samples.setdefault(qci, []).append(sample)
...@@ -407,31 +407,31 @@ def __init__(qf): ...@@ -407,31 +407,31 @@ def __init__(qf):
qf.tx_time_err = 0 qf.tx_time_err = 0
# update updates flow with information that so many bytes were transmitted during # update updates flow with information that so many bytes were transmitted during
# δt with using #tx transport blocks somewhere in [tx_lo,tx_hi] and with # dt with using #tx transport blocks somewhere in [tx_lo,tx_hi] and with
# specified rank. It is also known that overall average usage of resource # specified rank. It is also known that overall average usage of resource
# blocks corresponding to tx direction in the resource map is xl_use_avg. # blocks corresponding to tx direction in the resource map is xl_use_avg.
@func(_QCI_Flow) @func(_QCI_Flow)
def update(qf, δt, tx_bytes, tx_lo, tx_hi, rank, xl_use_avg): # -> []Sample def update(qf, dt, tx_bytes, tx_lo, tx_hi, rank, xl_use_avg): # -> []Sample
#_debug('QF.update %.2ftti %5db %.1f-%.1ftx %drank %.2fuse' % (δt/tti, tx_bytes, tx_lo, tx_hi, rank, xl_use_avg)) #_debug('QF.update %.2ftti %5db %.1f-%.1ftx %drank %.2fuse' % (dt/tti, tx_bytes, tx_lo, tx_hi, rank, xl_use_avg))
tx_lo /= rank # normalize TB to TTI (if it is e.g. 2x2 mimo, we have 2x more transport blocks) tx_lo /= rank # normalize TB to TTI (if it is e.g. 2x2 mimo, we have 2x more transport blocks)
tx_hi /= rank tx_hi /= rank
vout = [] vout = []
s = qf._update(δt, tx_bytes, tx_lo, tx_hi, xl_use_avg) s = qf._update(dt, tx_bytes, tx_lo, tx_hi, xl_use_avg)
if s is not None: if s is not None:
vout.append(s) vout.append(s)
return vout return vout
@func(_QCI_Flow) @func(_QCI_Flow)
def _update(qf, δt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample def _update(qf, dt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample
assert tx_bytes > 0 assert tx_bytes > 0
δt_tti = δt / tti dt_tti = dt / tti
tx_lo = min(tx_lo, δt_tti) # protection (should not happen) tx_lo = min(tx_lo, dt_tti) # protection (should not happen)
tx_hi = min(tx_hi, δt_tti) # protection (should not happen) tx_hi = min(tx_hi, dt_tti) # protection (should not happen)
# tx time is somewhere in [tx, δt_tti] # tx time is somewhere in [tx, dt_tti]
if xl_use_avg < 0.9: if xl_use_avg < 0.9:
# not congested: it likely took the time to transmit ≈ #tx # not congested: it likely took the time to transmit ≈ #tx
pass pass
...@@ -439,7 +439,7 @@ def _update(qf, δt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample ...@@ -439,7 +439,7 @@ def _update(qf, δt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample
# potentially congested: we don't know how much congested it is and # potentially congested: we don't know how much congested it is and
# which QCIs are affected more and which less # which QCIs are affected more and which less
# -> all we can say tx_time is only somewhere in between limits # -> all we can say tx_time is only somewhere in between limits
tx_hi = δt_tti tx_hi = dt_tti
tx_time = (tx_lo + tx_hi) / 2 * tti tx_time = (tx_lo + tx_hi) / 2 * tti
tx_time_err = (tx_hi - tx_lo) / 2 * tti tx_time_err = (tx_hi - tx_lo) / 2 * tti
...@@ -454,7 +454,7 @@ def _update(qf, δt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample ...@@ -454,7 +454,7 @@ def _update(qf, δt, tx_bytes, tx_lo, tx_hi, xl_use_avg): # -> ?Sample
# - if it is not big - it coalesces and ends the sample. # - if it is not big - it coalesces and ends the sample.
# NOTE: without throwing away last tti the overall throughput statistics # NOTE: without throwing away last tti the overall throughput statistics
# stays the same irregardless of whether we do coalesce small txes or not. # stays the same irregardless of whether we do coalesce small txes or not.
if cont and tx_hi < 0.9*δt_tti: if cont and tx_hi < 0.9*dt_tti:
s = qf._sample() s = qf._sample()
qf.tx_bytes = 0 qf.tx_bytes = 0
qf.tx_time = 0 qf.tx_time = 0
...@@ -498,18 +498,18 @@ def __init__(s): ...@@ -498,18 +498,18 @@ def __init__(s):
s.i_txq = 0 s.i_txq = 0
s.i_lshift = 0 s.i_lshift = 0
# next feeds next (δt, tx_bytes, tx) into bitsync. # next feeds next (dt, tx_bytes, tx) into bitsync.
# #
# and returns ready parts of adjusted stream. # and returns ready parts of adjusted stream.
@func(_BitSync) @func(_BitSync)
def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X') def next(s, dt, tx_bytes, tx, X): # -> [](dt', tx_bytes', tx', X')
s.txq.append((δt, tx_bytes, tx, X)) s.txq.append((dt, tx_bytes, tx, X))
# XXX for simplicity we currently handle sync in between only current and # XXX for simplicity we currently handle sync in between only current and
# next frames. That is enough to support FDD. TODO handle next-next case to support TDD # next frames. That is enough to support FDD. TODO handle next-next case to support TDD
# #
# XXX for simplicity we also assume all δt are ~ 10·tti and do not generally handle them # XXX for simplicity we also assume all dt are ~ 10·tti and do not generally handle them
# TODO handle arbitrary δt # TODO handle arbitrary dt
# shift #tx to the left: # shift #tx to the left:
# #
...@@ -537,8 +537,8 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X') ...@@ -537,8 +537,8 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X')
assert s.i_txq <= i < s.i_txq + len(s.txq) assert s.i_txq <= i < s.i_txq + len(s.txq)
i -= s.i_txq i -= s.i_txq
δt1, b1, t1, X1 = s.txq[i] dt1, b1, t1, X1 = s.txq[i]
δt2, b2, t2, X2 = s.txq[i+1] dt2, b2, t2, X2 = s.txq[i+1]
if b1 != 0: if b1 != 0:
t22 = b2*t1/b1 t22 = b2*t1/b1
else: else:
...@@ -551,8 +551,8 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X') ...@@ -551,8 +551,8 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X')
assert t1 >= 0, t1 assert t1 >= 0, t1
assert t2 >= 0, t2 assert t2 >= 0, t2
s.txq[i] = (δt1, b1, t1, X1) s.txq[i] = (dt1, b1, t1, X1)
s.txq[i+1] = (δt2, b2, t2, X2) s.txq[i+1] = (dt2, b2, t2, X2)
#print(' < lshift ', s.txq) #print(' < lshift ', s.txq)
while s.i_lshift+1 < s.i_txq + len(s.txq): while s.i_lshift+1 < s.i_txq + len(s.txq):
...@@ -578,7 +578,7 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X') ...@@ -578,7 +578,7 @@ def next(s, δt, tx_bytes, tx, X): # -> [](δt', tx_bytes', tx', X')
# #
# the bitsync becomes reset. # the bitsync becomes reset.
@func(_BitSync) @func(_BitSync)
def finish(s): # -> [](δt', tx_bytes', tx', X') def finish(s): # -> [](dt', tx_bytes', tx', X')
assert len(s.txq) < 3 assert len(s.txq) < 3
s._rebalance(len(s.txq)) s._rebalance(len(s.txq))
vout = s.txq vout = s.txq
...@@ -592,14 +592,14 @@ def finish(s): # -> [](δt', tx_bytes', tx', X') ...@@ -592,14 +592,14 @@ def finish(s): # -> [](δt', tx_bytes', tx', X')
# t'_i correlates with b_i and that whole transmission time stays the same: # t'_i correlates with b_i and that whole transmission time stays the same:
# #
# b₁ t₁ t'₁ # b₁ t₁ t'₁
# b₂ t₂ -> t'₂ t'_i = α·b_i Σt' = Σt # b₂ t₂ -> t'₂ t'_i = α·b_i St' = St
# b₃ t₃ t'₃ # b₃ t₃ t'₃
# #
# that gives # that gives
# #
# Σt # St
# α = ── # α = ──
# Σb # Sb
# #
# and has the effect of moving #tx from periods with tx_bytes=0, to periods # and has the effect of moving #tx from periods with tx_bytes=0, to periods
# where transmission actually happened (tx_bytes > 0). # where transmission actually happened (tx_bytes > 0).
...@@ -609,14 +609,14 @@ def _rebalance(s, l): ...@@ -609,14 +609,14 @@ def _rebalance(s, l):
assert l <= len(s.txq) assert l <= len(s.txq)
assert l <= 3 assert l <= 3
Σb = sum(_[1] for _ in s.txq[:l]) Sb = sum(_[1] for _ in s.txq[:l])
Σt = sum(_[2] for _ in s.txq[:l]) St = sum(_[2] for _ in s.txq[:l])
if Σb != 0: if Sb != 0:
for i in range(l): for i in range(l):
δt_i, b_i, t_i, X_i = s.txq[i] dt_i, b_i, t_i, X_i = s.txq[i]
t_i = b_i * Σt / Σb t_i = b_i * St / Sb
assert t_i >= 0, t_i assert t_i >= 0, t_i
s.txq[i] = (δt_i, b_i, t_i, X_i) s.txq[i] = (dt_i, b_i, t_i, X_i)
#print(' < rebalance', s.txq[:l]) #print(' < rebalance', s.txq[:l])
...@@ -660,11 +660,11 @@ def __repr__(s): ...@@ -660,11 +660,11 @@ def __repr__(s):
# rate-limits websocket requests to execute not faster than 10ms each. # rate-limits websocket requests to execute not faster than 10ms each.
@func @func
def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
δt_rate = 10*tti dt_rate = 10*tti
# rx_ue_get_stats sends `ue_get[stats]` request and returns server response. # rx_ue_get_stats sends `ue_get[stats]` request and returns server response.
rtt_ue_stats = _IncStats() # time it takes to send ue_get and to receive response rtt_ue_stats = _IncStats() # time it takes to send ue_get and to receive response
δt_ue_stats = _IncStats() # δ(ue_stats.timestamp) dt_ue_stats = _IncStats() # δ(ue_stats.timestamp)
t_ue_stats = None # last ue_stats.timestamp t_ue_stats = None # last ue_stats.timestamp
def rx_ue_get_stats(ctx): # -> ue_stats def rx_ue_get_stats(ctx): # -> ue_stats
nonlocal t_ue_stats nonlocal t_ue_stats
...@@ -674,7 +674,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -674,7 +674,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
rtt_ue_stats.add(t_rx-t_tx) rtt_ue_stats.add(t_rx-t_tx)
t = ue_stats['utc'] t = ue_stats['utc']
if t_ue_stats is not None: if t_ue_stats is not None:
δt_ue_stats.add(t-t_ue_stats) dt_ue_stats.add(t-t_ue_stats)
t_ue_stats = t t_ue_stats = t
return ue_stats return ue_stats
...@@ -687,7 +687,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -687,7 +687,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
conn_stats = amari.connect(ctx, conn.wsuri) conn_stats = amari.connect(ctx, conn.wsuri)
defer(conn_stats.close) defer(conn_stats.close)
rtt_stats = _IncStats() # like rtt_ue_stats but for stat instead of ue_get rtt_stats = _IncStats() # like rtt_ue_stats but for stat instead of ue_get
δt_stats = _IncStats() # δ(stats.timestamp) dt_stats = _IncStats() # δ(stats.timestamp)
t_stats = None # last stats.timestamp t_stats = None # last stats.timestamp
def rx_stats(ctx): # -> stats def rx_stats(ctx): # -> stats
nonlocal t_stats nonlocal t_stats
...@@ -697,7 +697,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -697,7 +697,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
rtt_stats.add(t_rx-t_tx) rtt_stats.add(t_rx-t_tx)
t = stats['utc'] t = stats['utc']
if t_stats is not None: if t_stats is not None:
δt_stats.add(t-t_stats) dt_stats.add(t-t_stats)
t_stats = t t_stats = t
return stats return stats
# issue first dummy stats. It won't report most of statistics due to # issue first dummy stats. It won't report most of statistics due to
...@@ -706,7 +706,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -706,7 +706,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
# rx_all simultaneously issues `ue_get[stats]` and `stats` requests and returns server responses. # rx_all simultaneously issues `ue_get[stats]` and `stats` requests and returns server responses.
# the requests are issued synchronized in time. # the requests are issued synchronized in time.
δ_ue_stats = _IncStats() # ue_stats.timestamp - stats.timestamp d_ue_stats = _IncStats() # ue_stats.timestamp - stats.timestamp
def rx_all(ctx): # -> ue_stats, stats def rx_all(ctx): # -> ue_stats, stats
uq = chan(1) uq = chan(1)
sq = chan(1) sq = chan(1)
...@@ -741,7 +741,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -741,7 +741,7 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
stats = _rx stats = _rx
sq = nilchan sq = nilchan
δ_ue_stats.add(ue_stats['utc'] - stats['utc']) d_ue_stats.add(ue_stats['utc'] - stats['utc'])
return ue_stats, stats return ue_stats, stats
ueget_reqch = chan() ueget_reqch = chan()
...@@ -774,17 +774,17 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -774,17 +774,17 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
# Tmain is the main thread that drives the process overall # Tmain is the main thread that drives the process overall
def Tmain(ctx): def Tmain(ctx):
nonlocal rtt_ue_stats, δt_ue_stats nonlocal rtt_ue_stats, dt_ue_stats
nonlocal rtt_stats, δt_stats nonlocal rtt_stats, dt_stats
nonlocal δ_ue_stats nonlocal d_ue_stats
t_req = time.now() t_req = time.now()
ue_stats, stats = rx_all(ctx) ue_stats, stats = rx_all(ctx)
S = Sampler(ue_stats, stats) S = Sampler(ue_stats, stats)
qci_Σdl = {} # qci -> _Σ for dl qci_Sdl = {} # qci -> _S for dl
qci_Σul = {} # ----//---- for ul qci_Sul = {} # ----//---- for ul
class _Σ: class _S:
__slots__ = ( __slots__ = (
'tx_bytes', 'tx_bytes',
'tx_time', 'tx_time',
...@@ -793,15 +793,15 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -793,15 +793,15 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
'tx_time_notailtti_err', 'tx_time_notailtti_err',
'tx_nsamples', 'tx_nsamples',
) )
def __init__(Σ): def __init__(S):
for x in Σ.__slots__: for x in S.__slots__:
setattr(Σ, x, 0) setattr(S, x, 0)
# account accounts samples into Σtx_time/Σtx_bytes in qci_Σ. # account accounts samples into Stx_time/Stx_bytes in qci_S.
def account(qci_Σ, qci_samples): def account(qci_S, qci_samples):
for qci, samplev in qci_samples.items(): for qci, samplev in qci_samples.items():
Σ = qci_Σ.get(qci) S = qci_S.get(qci)
if Σ is None: if S is None:
Σ = qci_Σ[qci] = _Σ() S = qci_S[qci] = _S()
for s in samplev: for s in samplev:
# do not account short transmissions # do not account short transmissions
# ( tx with 1 tti should be ignored per standard, but it is # ( tx with 1 tti should be ignored per standard, but it is
...@@ -810,10 +810,10 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -810,10 +810,10 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
t_hi = s.tx_time + s.tx_time_err t_hi = s.tx_time + s.tx_time_err
if t_hi <= 1*tti or (t_hi <= 2 and s.tx_bytes < 1000): if t_hi <= 1*tti or (t_hi <= 2 and s.tx_bytes < 1000):
continue continue
Σ.tx_nsamples += 1 S.tx_nsamples += 1
Σ.tx_bytes += s.tx_bytes S.tx_bytes += s.tx_bytes
Σ.tx_time += s.tx_time S.tx_time += s.tx_time
Σ.tx_time_err += s.tx_time_err S.tx_time_err += s.tx_time_err
# also aggregate .tx_time without tail tti (IP Throughput KPI needs this) # also aggregate .tx_time without tail tti (IP Throughput KPI needs this)
tt_hi = math.ceil(t_hi/tti - 1) # in tti tt_hi = math.ceil(t_hi/tti - 1) # in tti
...@@ -822,8 +822,8 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -822,8 +822,8 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
tt_lo = math.ceil(tt_lo - 1) tt_lo = math.ceil(tt_lo - 1)
tt = (tt_lo + tt_hi) / 2 tt = (tt_lo + tt_hi) / 2
tt_err = (tt_hi - tt_lo) / 2 tt_err = (tt_hi - tt_lo) / 2
Σ.tx_time_notailtti += tt * tti S.tx_time_notailtti += tt * tti
Σ.tx_time_notailtti_err += tt_err * tti S.tx_time_notailtti_err += tt_err * tti
while 1: while 1:
...@@ -842,71 +842,71 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -842,71 +842,71 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
# wrap-up flows and account finalized samples # wrap-up flows and account finalized samples
qci_dl, qci_ul = S.finish() qci_dl, qci_ul = S.finish()
account(qci_Σdl, qci_dl) account(qci_Sdl, qci_dl)
account(qci_Σul, qci_ul) account(qci_Sul, qci_ul)
_debug() _debug()
_debug('rtt_ue: %s ms' % rtt_ue_stats .str('%.2f', time.millisecond)) _debug('rtt_ue: %s ms' % rtt_ue_stats .str('%.2f', time.millisecond))
_debug('δt_ue: %s ms' % δt_ue_stats .str('%.2f', time.millisecond)) _debug('dt_ue: %s ms' % dt_ue_stats .str('%.2f', time.millisecond))
_debug('rtt_stats: %s ms' % rtt_stats .str('%.2f', time.millisecond)) _debug('rtt_stats: %s ms' % rtt_stats .str('%.2f', time.millisecond))
_debug('δt_stats: %s ms' % δt_stats .str('%.2f', time.millisecond)) _debug('dt_stats: %s ms' % dt_stats .str('%.2f', time.millisecond))
_debug('δ(ue,stat): %s ms' % δ_ue_stats .str('%.2f', time.millisecond)) _debug('δ(ue,stat): %s ms' % d_ue_stats .str('%.2f', time.millisecond))
qci_dict = {} qci_dict = {}
Σ0 = _Σ() S0 = _S()
for qci in set(qci_Σdl.keys()) .union(qci_Σul.keys()): for qci in set(qci_Sdl.keys()) .union(qci_Sul.keys()):
Σdl = qci_Σdl.get(qci, Σ0) Sdl = qci_Sdl.get(qci, S0)
Σul = qci_Σul.get(qci, Σ0) Sul = qci_Sul.get(qci, S0)
qci_dict[qci] = { qci_dict[qci] = {
'dl_tx_bytes': Σdl.tx_bytes, 'dl_tx_bytes': Sdl.tx_bytes,
'dl_tx_time': Σdl.tx_time, 'dl_tx_time': Sdl.tx_time,
'dl_tx_time_err': Σdl.tx_time_err, 'dl_tx_time_err': Sdl.tx_time_err,
'dl_tx_time_notailtti': Σdl.tx_time_notailtti, 'dl_tx_time_notailtti': Sdl.tx_time_notailtti,
'dl_tx_time_notailtti_err': Σdl.tx_time_notailtti_err, 'dl_tx_time_notailtti_err': Sdl.tx_time_notailtti_err,
'dl_tx_nsamples': Σdl.tx_nsamples, 'dl_tx_nsamples': Sdl.tx_nsamples,
'ul_tx_bytes': Σul.tx_bytes, 'ul_tx_bytes': Sul.tx_bytes,
'ul_tx_time': Σul.tx_time, 'ul_tx_time': Sul.tx_time,
'ul_tx_time_err': Σul.tx_time_err, 'ul_tx_time_err': Sul.tx_time_err,
'ul_tx_time_notailtti': Σul.tx_time_notailtti, 'ul_tx_time_notailtti': Sul.tx_time_notailtti,
'ul_tx_time_notailtti_err': Σul.tx_time_notailtti_err, 'ul_tx_time_notailtti_err': Sul.tx_time_notailtti_err,
'u;_tx_nsamples': Σul.tx_nsamples, 'u;_tx_nsamples': Sul.tx_nsamples,
} }
r = {'time': ue_stats['time'], r = {'time': ue_stats['time'],
'utc': ue_stats['utc'], 'utc': ue_stats['utc'],
'qci_dict': qci_dict, 'qci_dict': qci_dict,
'δt_ueget': { 'dt_ueget': {
'min': δt_ue_stats.min, 'min': dt_ue_stats.min,
'avg': δt_ue_stats.avg(), 'avg': dt_ue_stats.avg(),
'max': δt_ue_stats.max, 'max': dt_ue_stats.max,
'std': δt_ue_stats.std(), 'std': dt_ue_stats.std(),
}, },
'δ_ueget_vs_stats': { 'δ_ueget_vs_stats': {
'min': δ_ue_stats.min, 'min': d_ue_stats.min,
'avg': δ_ue_stats.avg(), 'avg': d_ue_stats.avg(),
'max': δ_ue_stats.max, 'max': d_ue_stats.max,
'std': δ_ue_stats.std(), 'std': d_ue_stats.std(),
}, },
} }
respch.send(r) respch.send(r)
# reset # reset
qci_Σdl = {} qci_Sdl = {}
qci_Σul = {} qci_Sul = {}
rtt_ue_stats = _IncStats() rtt_ue_stats = _IncStats()
δt_ue_stats = _IncStats() dt_ue_stats = _IncStats()
rtt_stats = _IncStats() rtt_stats = _IncStats()
δt_stats = _IncStats() dt_stats = _IncStats()
δ_ue_stats = _IncStats() d_ue_stats = _IncStats()
# sync time to keep t_req' - t_req ≈ δt_rate # sync time to keep t_req' - t_req ≈ dt_rate
# this should automatically translate to δt(ue_stats) ≈ δt_rate # this should automatically translate to dt(ue_stats) ≈ dt_rate
t = time.now() t = time.now()
δtsleep = δt_rate - (t - t_req) dtsleep = dt_rate - (t - t_req)
if δtsleep > 0: if dtsleep > 0:
time.sleep(δtsleep) time.sleep(dtsleep)
# retrieve ue_get[stats] and stats data for next frame from enb # retrieve ue_get[stats] and stats data for next frame from enb
t_req = time.now() t_req = time.now()
...@@ -914,8 +914,8 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -914,8 +914,8 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
# pass data to sampler and account already detected samples # pass data to sampler and account already detected samples
qci_dl, qci_ul = S.add(ue_stats, stats) qci_dl, qci_ul = S.add(ue_stats, stats)
account(qci_Σdl, qci_dl) account(qci_Sdl, qci_dl)
account(qci_Σul, qci_ul) account(qci_Sul, qci_ul)
# run everything # run everything
wg = sync.WorkGroup(ctx) wg = sync.WorkGroup(ctx)
...@@ -932,25 +932,25 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn): ...@@ -932,25 +932,25 @@ def _x_stats_srv(ctx, reqch: chan, conn: amari.Conn):
class _IncStats: class _IncStats:
__slots__ = ( __slots__ = (
'n', # number of samples seen so far 'n', # number of samples seen so far
'μ', # current mean 'mu', # current mean
'σ2', # ~ current variance 's2', # ~ current variance
'min', # current min / max 'min', # current min / max
'max', 'max',
) )
def __init__(s): def __init__(s):
s.n = 0 s.n = 0
s.μ = 0. s.mu = 0.
s.σ2 = 0. s.s2 = 0.
s.min = +float('inf') s.min = +float('inf')
s.max = -float('inf') s.max = -float('inf')
def add(s, x): def add(s, x):
# https://www.johndcook.com/blog/standard_deviation/ # https://www.johndcook.com/blog/standard_deviation/
s.n += 1 s.n += 1
μ_ = s.μ # μ_{n-1} mu_ = s.mu # mu_{n-1}
s.μ += (x - μ_)/s.n s.mu += (x - mu_)/s.n
s.σ2 += (x - μ_)*(x - s.μ) s.s2 += (x - mu_)*(x - s.mu)
s.min = min(s.min, x) s.min = min(s.min, x)
s.max = max(s.max, x) s.max = max(s.max, x)
...@@ -958,12 +958,12 @@ class _IncStats: ...@@ -958,12 +958,12 @@ class _IncStats:
def avg(s): def avg(s):
if s.n == 0: if s.n == 0:
return float('nan') return float('nan')
return s.μ return s.mu
def var(s): def var(s):
if s.n == 0: if s.n == 0:
return float('nan') return float('nan')
return s.σ2 / s.n # note johndcook uses / (s.n-1) to unbias return s.s2 / s.n # note johndcook uses / (s.n-1) to unbias
def std(s): def std(s):
return math.sqrt(s.var()) return math.sqrt(s.var())
...@@ -973,17 +973,17 @@ class _IncStats: ...@@ -973,17 +973,17 @@ class _IncStats:
return s.str('%s', 1) return s.str('%s', 1)
def str(s, fmt, scale): def str(s, fmt, scale):
t = "min/avg/max/σ " t = "min/avg/max/std "
if s.n == 0: if s.n == 0:
t += "?/?/? ±?" t += "?/?/? ±?"
else: else:
μ = s.avg() / scale mu = s.avg() / scale
σ = s.std() / scale std = s.std() / scale
min = s.min / scale min = s.min / scale
max = s.max / scale max = s.max / scale
f = "%s/%s/%s ±%s" % ((fmt,)*4) f = "%s/%s/%s ±%s" % ((fmt,)*4)
t += f % (min, μ, max, σ) t += f % (min, mu, max, std)
return t return t
......
...@@ -57,8 +57,8 @@ class tSampler: ...@@ -57,8 +57,8 @@ class tSampler:
t.sampler = _Sampler('zz', ue_stats0, stats0, use_bitsync=use_bitsync, use_ri=use_ri) t.sampler = _Sampler('zz', ue_stats0, stats0, use_bitsync=use_bitsync, use_ri=use_ri)
t.qci_samples = {} # in-progress collection until final get t.qci_samples = {} # in-progress collection until final get
def add(t, δt_tti, *uev): def add(t, dt_tti, *uev):
ue_stats, stats = t.tstats.next(δt_tti, *uev) ue_stats, stats = t.tstats.next(dt_tti, *uev)
qci_samples = t.sampler.add(ue_stats, stats) qci_samples = t.sampler.add(ue_stats, stats)
t._update_qci_samples(qci_samples) t._update_qci_samples(qci_samples)
...@@ -77,21 +77,21 @@ class tSampler: ...@@ -77,21 +77,21 @@ class tSampler:
# _tUEstats provides environment to generate test ue_get[stats]. # _tUEstats provides environment to generate test ue_get[stats].
class _tUEstats: class _tUEstats:
def __init__(t): def __init__(t):
t.τ = 0 t.tau = 0
t.tx_total = {} # (ue,erab) -> tx_total_bytes t.tx_total = {} # (ue,erab) -> tx_total_bytes
# next returns next (ue_stats, stats) with specified ue transmissions # next returns next (ue_stats, stats) with specified ue transmissions
def next(t, δτ_tti, *uev): def next(t, dtau_tti, *uev):
for _ in uev: for _ in uev:
assert isinstance(_, UE) assert isinstance(_, UE)
t.τ += δτ_tti * tti t.tau += dtau_tti * tti
tx_total = t.tx_total tx_total = t.tx_total
t.tx_total = {} # if ue/erab is missing in ue_stats, its tx_total is reset t.tx_total = {} # if ue/erab is missing in ue_stats, its tx_total is reset
ue_list = [] ue_list = []
ue_stats = { ue_stats = {
'time': t.τ, 'time': t.tau,
'utc': 100 + t.τ, 'utc': 100 + t.tau,
'ue_list': ue_list 'ue_list': ue_list
} }
for ue in uev: for ue in uev:
...@@ -137,14 +137,14 @@ class _tUEstats: ...@@ -137,14 +137,14 @@ class _tUEstats:
# S is shortcut to create Sample. # S is shortcut to create Sample.
def S(tx_bytes, tx_time_tti): def S(tx_bytes, tx_time_tti):
if isinstance(tx_time_tti, tuple): if isinstance(tx_time_tti, tuple):
τ_lo, τ_hi = tx_time_tti tau_lo, tau_hi = tx_time_tti
else: else:
τ_lo = τ_hi = tx_time_tti tau_lo = tau_hi = tx_time_tti
s = Sample() s = Sample()
s.tx_bytes = tx_bytes s.tx_bytes = tx_bytes
s.tx_time = (τ_lo + τ_hi) / 2 * tti s.tx_time = (tau_lo + tau_hi) / 2 * tti
s.tx_time_err = (τ_hi - τ_lo) / 2 * tti s.tx_time_err = (tau_hi - tau_lo) / 2 * tti
return s return s
...@@ -154,7 +154,7 @@ def S(tx_bytes, tx_time_tti): ...@@ -154,7 +154,7 @@ def S(tx_bytes, tx_time_tti):
def test_Sampler1(): def test_Sampler1():
# _ constructs tSampler, feeds tx stats into it and returns yielded Samples. # _ constructs tSampler, feeds tx stats into it and returns yielded Samples.
# #
# tx_statsv = [](δt_tti, tx_bytes, #tx, #retx) # tx_statsv = [](dt_tti, tx_bytes, #tx, #retx)
# #
# only 1 ue, 1 qci and 1 erab are used in this test to verify the tricky # only 1 ue, 1 qci and 1 erab are used in this test to verify the tricky
# parts of the Sampler in how single flow is divided into samples. The other # parts of the Sampler in how single flow is divided into samples. The other
...@@ -163,8 +163,8 @@ def test_Sampler1(): ...@@ -163,8 +163,8 @@ def test_Sampler1():
def _(*tx_statsv, bitsync=None): # -> []Sample def _(*tx_statsv, bitsync=None): # -> []Sample
def b(bitsync): def b(bitsync):
t = tSampler(use_bitsync=bitsync) t = tSampler(use_bitsync=bitsync)
for (δt_tti, tx_bytes, tx, retx) in tx_statsv: for (dt_tti, tx_bytes, tx, retx) in tx_statsv:
t.add(δt_tti, UE(17, tx, retx, Etx(23, 4, tx_bytes))) t.add(dt_tti, UE(17, tx, retx, Etx(23, 4, tx_bytes)))
qci_samplev = t.get() qci_samplev = t.get()
if len(qci_samplev) == 0: if len(qci_samplev) == 0:
return [] return []
...@@ -181,7 +181,7 @@ def test_Sampler1(): ...@@ -181,7 +181,7 @@ def test_Sampler1():
return bon if bitsync else boff return bon if bitsync else boff
# δt_tti tx_bytes #tx #retx # dt_tti tx_bytes #tx #retx
assert _() == [] assert _() == []
assert _((10, 1000, 1, 0)) == [S(1000, 1)] assert _((10, 1000, 1, 0)) == [S(1000, 1)]
assert _((10, 1000, 2, 0)) == [S(1000, 2)] assert _((10, 1000, 2, 0)) == [S(1000, 2)]
...@@ -195,7 +195,7 @@ def test_Sampler1(): ...@@ -195,7 +195,7 @@ def test_Sampler1():
for retx in range(1,10-tx+1): for retx in range(1,10-tx+1):
assert _((10,1000, tx, retx)) == [S(1000, tx+retx)] assert _((10,1000, tx, retx)) == [S(1000, tx+retx)]
assert _((10, 1000, 77, 88)) == [S(1000, 10)] # tx_time ≤ δt (bug in #tx / #retx) assert _((10, 1000, 77, 88)) == [S(1000, 10)] # tx_time ≤ dt (bug in #tx / #retx)
# coalesce/wrap-up 2 frames # coalesce/wrap-up 2 frames
def _2tx(tx1, tx2): return _((10, 100*tx1, tx1, 0), def _2tx(tx1, tx2): return _((10, 100*tx1, tx1, 0),
...@@ -255,7 +255,7 @@ def test_Sampler1(): ...@@ -255,7 +255,7 @@ def test_Sampler1():
# bitsync lightly (BitSync itself is verified in details in test_BitSync) # bitsync lightly (BitSync itself is verified in details in test_BitSync)
def b(*btx_statsv): def b(*btx_statsv):
tx_statsv = [] tx_statsv = []
for (tx_bytes, tx) in btx_statsv: # note: no δt_tti, #retx for (tx_bytes, tx) in btx_statsv: # note: no dt_tti, #retx
tx_statsv.append((10, tx_bytes, tx, 0)) tx_statsv.append((10, tx_bytes, tx, 0))
return _(*tx_statsv, bitsync=True) return _(*tx_statsv, bitsync=True)
...@@ -272,7 +272,7 @@ def test_Sampler1(): ...@@ -272,7 +272,7 @@ def test_Sampler1():
( 0, 0)) == [S(1000+500,10+5), S(1000,10)] ( 0, 0)) == [S(1000+500,10+5), S(1000,10)]
# sampler starts from non-scratch - correctly detects δ for erabs. # sampler starts from non-scratch - correctly detects delta for erabs.
def test_Sampler_start_from_nonscratch(): def test_Sampler_start_from_nonscratch():
t = tSampler(UE(17, 0,0, Etx(23, 4, 10000, tx_total=True))) t = tSampler(UE(17, 0,0, Etx(23, 4, 10000, tx_total=True)))
t.add(10, UE(17, 10,0, Etx(23, 4, 123))) t.add(10, UE(17, 10,0, Etx(23, 4, 123)))
...@@ -313,7 +313,7 @@ def test_Sampler_tx_total_down(): ...@@ -313,7 +313,7 @@ def test_Sampler_tx_total_down():
# N tx transport blocks is shared/distributed between multiple QCIs # N tx transport blocks is shared/distributed between multiple QCIs
# #
# tx_lo ∼ tx_bytes / Σtx_bytes # tx_lo ∼ tx_bytes / Stx_bytes
# tx_hi = whole #tx even if tx_bytes are different # tx_hi = whole #tx even if tx_bytes are different
def test_Sampler_txtb_shared_between_qci(): def test_Sampler_txtb_shared_between_qci():
def ue(tx, *etxv): return UE(17, tx, 0, *etxv) def ue(tx, *etxv): return UE(17, tx, 0, *etxv)
...@@ -356,7 +356,7 @@ def test_Sampler_rank(): ...@@ -356,7 +356,7 @@ def test_Sampler_rank():
def test_BitSync(): def test_BitSync():
# _ passes txv_in into _BitSync and returns output stream. # _ passes txv_in into _BitSync and returns output stream.
# #
# txv_in = [](tx_bytes, #tx) ; δt=10·tti # txv_in = [](tx_bytes, #tx) ; dt=10·tti
def _(*txv_in): def _(*txv_in):
def do_bitsync(*txv_in): def do_bitsync(*txv_in):
txv_out = [] txv_out = []
...@@ -365,14 +365,14 @@ def test_BitSync(): ...@@ -365,14 +365,14 @@ def test_BitSync():
for x, (tx_bytes, tx) in enumerate(txv_in): for x, (tx_bytes, tx) in enumerate(txv_in):
_ = bitsync.next(10*tti, tx_bytes, tx, _ = bitsync.next(10*tti, tx_bytes, tx,
chr(ord('a')+x)) chr(ord('a')+x))
for (δt, tx_bytes, tx, x_) in _: for (dt, tx_bytes, tx, x_) in _:
assert δt == 10*tti assert dt == 10*tti
txv_out.append((tx_bytes, tx)) txv_out.append((tx_bytes, tx))
xv_out += x_ xv_out += x_
_ = bitsync.finish() _ = bitsync.finish()
for (δt, tx_bytes, tx, x_) in _: for (dt, tx_bytes, tx, x_) in _:
assert δt == 10*tti assert dt == 10*tti
txv_out.append((tx_bytes, tx)) txv_out.append((tx_bytes, tx))
xv_out += x_ xv_out += x_
......
...@@ -259,8 +259,8 @@ def _handle_stats(logm, stats: xlog.Message, m_prev: kpi.Measurement): ...@@ -259,8 +259,8 @@ def _handle_stats(logm, stats: xlog.Message, m_prev: kpi.Measurement):
# do init/fini correction if there was also third preceding stats message. # do init/fini correction if there was also third preceding stats message.
m = logm._m.copy() # [stats_prev, stats) m = logm._m.copy() # [stats_prev, stats)
# δcc(counter) tells how specified cumulative counter changed since last stats result. # dcc(counter) tells how specified cumulative counter changed since last stats result.
def δcc(counter): def dcc(counter):
old = _stats_cc(stats_prev, counter) old = _stats_cc(stats_prev, counter)
new = _stats_cc(stats, counter) new = _stats_cc(stats, counter)
if new < old: if new < old:
...@@ -285,38 +285,38 @@ def _handle_stats(logm, stats: xlog.Message, m_prev: kpi.Measurement): ...@@ -285,38 +285,38 @@ def _handle_stats(logm, stats: xlog.Message, m_prev: kpi.Measurement):
# overall statistics if it is computed taking both periods into account. # overall statistics if it is computed taking both periods into account.
if p is not None: if p is not None:
if p[fini] < p[init]: if p[fini] < p[init]:
δ = min(p[init]-p[fini], m[fini]) delta = min(p[init]-p[fini], m[fini])
p[fini] += δ p[fini] += delta
m[fini] -= δ m[fini] -= delta
# if we still have too much fini - throw it away pretending that it # if we still have too much fini - throw it away pretending that it
# came from even older uncovered period # came from even older uncovered period
if m[fini] > m[init]: if m[fini] > m[init]:
m[fini] = m[init] m[fini] = m[init]
# compute δ for counters. # compute delta for counters.
# any logic error in data will be reported via LogError. # any logic error in data will be reported via LogError.
try: try:
# RRC: connection establishment # RRC: connection establishment
m_initfini( m_initfini(
'RRC.ConnEstabAtt.sum', δcc('rrc_connection_request'), 'RRC.ConnEstabAtt.sum', dcc('rrc_connection_request'),
'RRC.ConnEstabSucc.sum', δcc('rrc_connection_setup_complete')) 'RRC.ConnEstabSucc.sum', dcc('rrc_connection_setup_complete'))
# S1: connection establishment # S1: connection establishment
m_initfini( m_initfini(
'S1SIG.ConnEstabAtt', δcc('s1_initial_context_setup_request'), 'S1SIG.ConnEstabAtt', dcc('s1_initial_context_setup_request'),
'S1SIG.ConnEstabSucc', δcc('s1_initial_context_setup_response')) 'S1SIG.ConnEstabSucc', dcc('s1_initial_context_setup_response'))
# ERAB: Initial establishment # ERAB: Initial establishment
# FIXME not correct if multiple ERABs are present in one message # FIXME not correct if multiple ERABs are present in one message
m_initfini( m_initfini(
'ERAB.EstabInitAttNbr.sum', δcc('s1_initial_context_setup_request'), 'ERAB.EstabInitAttNbr.sum', dcc('s1_initial_context_setup_request'),
'ERAB.EstabInitSuccNbr.sum', δcc('s1_initial_context_setup_response')) 'ERAB.EstabInitSuccNbr.sum', dcc('s1_initial_context_setup_response'))
# ERAB: Additional establishment # ERAB: Additional establishment
# FIXME not correct if multiple ERABs are present in one message # FIXME not correct if multiple ERABs are present in one message
m_initfini( m_initfini(
'ERAB.EstabAddAttNbr.sum', δcc('s1_erab_setup_request'), 'ERAB.EstabAddAttNbr.sum', dcc('s1_erab_setup_request'),
'ERAB.EstabAddSuccNbr.sum', δcc('s1_erab_setup_response')) 'ERAB.EstabAddSuccNbr.sum', dcc('s1_erab_setup_response'))
except Exception as e: except Exception as e:
if not isinstance(e, LogError): if not isinstance(e, LogError):
...@@ -383,22 +383,22 @@ def _handle_drb_stats(logm, drb_stats: xlog.Message): ...@@ -383,22 +383,22 @@ def _handle_drb_stats(logm, drb_stats: xlog.Message):
assert drb_stats_prev.message == "x.drb_stats" assert drb_stats_prev.message == "x.drb_stats"
# time coverage for current drb_stats # time coverage for current drb_stats
τ_lo = drb_stats_prev.timestamp tau_lo = drb_stats_prev.timestamp
τ_hi = drb_stats.timestamp tau_hi = drb_stats.timestamp
δτ = τ_hi - τ_lo dtau = tau_hi - tau_lo
# see with which ._m or ._m_next, if any, drb_stats overlaps with ≥ 50% of # see with which ._m or ._m_next, if any, drb_stats overlaps with ≥ 50% of
# time first, and update that measurement correspondingly. # time first, and update that measurement correspondingly.
if not (δτ > 0): if not (dtau > 0):
return return
if logm._m is not None: if logm._m is not None:
m_lo = logm._m['X.Tstart'] m_lo = logm._m['X.Tstart']
m_hi = m_lo + logm._m['X.δT'] m_hi = m_lo + logm._m['X.δT']
d = max(0, min(τ_hi, m_hi) - d = max(0, min(tau_hi, m_hi) -
max(τ_lo, m_lo)) max(tau_lo, m_lo))
if d >= δτ/2: # NOTE ≥ 50%, not > 50% not to skip drb_stats if fill is exactly 50% if d >= dtau/2: # NOTE ≥ 50%, not > 50% not to skip drb_stats if fill is exactly 50%
_drb_update(logm._m, drb_stats) _drb_update(logm._m, drb_stats)
return return
...@@ -406,9 +406,9 @@ def _handle_drb_stats(logm, drb_stats: xlog.Message): ...@@ -406,9 +406,9 @@ def _handle_drb_stats(logm, drb_stats: xlog.Message):
n_lo = logm._m_next['X.Tstart'] n_lo = logm._m_next['X.Tstart']
# n_hi - don't know as _m_next['X.δT'] is ø yet # n_hi - don't know as _m_next['X.δT'] is ø yet
d = max(0, τ_hi - d = max(0, tau_hi -
max(τ_lo, n_lo)) max(tau_lo, n_lo))
if d >= δτ/2: if d >= dtau/2:
_drb_update(logm._m_next, drb_stats) _drb_update(logm._m_next, drb_stats)
return return
...@@ -434,16 +434,16 @@ def _drb_update(m: kpi.Measurement, drb_stats: xlog.Message): ...@@ -434,16 +434,16 @@ def _drb_update(m: kpi.Measurement, drb_stats: xlog.Message):
# DRB.IPVol and DRB.IPTime are collected to compute throughput. # DRB.IPVol and DRB.IPTime are collected to compute throughput.
# #
# thp = ΣB*/ΣT* where B* is tx'ed bytes in the sample without taking last tti into account # thp = SB*/ST* where B* is tx'ed bytes in the sample without taking last tti into account
# and T* is time of tx also without taking that sample's tail tti. # and T* is time of tx also without taking that sample's tail tti.
# #
# we only know ΣB (whole amount of tx), ΣT and ΣT* with some error. # we only know SB (whole amount of tx), ST and ST* with some error.
# #
# -> thp can be estimated to be inside the following interval: # -> thp can be estimated to be inside the following interval:
# #
# ΣB ΣB # SB SB
# ───── ≤ thp ≤ ───── (1) # ───── ≤ thp ≤ ───── (1)
# ΣT_hi ΣT*_lo # ST_hi ST*_lo
# #
# the upper layer in xlte.kpi will use the following formula for # the upper layer in xlte.kpi will use the following formula for
# final throughput calculation: # final throughput calculation:
...@@ -452,28 +452,28 @@ def _drb_update(m: kpi.Measurement, drb_stats: xlog.Message): ...@@ -452,28 +452,28 @@ def _drb_update(m: kpi.Measurement, drb_stats: xlog.Message):
# thp = ────────── (2) # thp = ────────── (2)
# DRB.IPTime # DRB.IPTime
# #
# -> set DRB.IPTime and its error to mean and δ of ΣT_hi and ΣT*_lo # -> set DRB.IPTime and its error to mean and delta of ST_hi and ST*_lo
# so that (2) becomes (1). # so that (2) becomes (1).
# FIXME we account whole PDCP instead of only IP traffic # FIXME we account whole PDCP instead of only IP traffic
ΣB = trx['%s_tx_bytes' % dir] SB = trx['%s_tx_bytes' % dir]
ΣT = trx['%s_tx_time' % dir] ST = trx['%s_tx_time' % dir]
ΣT_err = trx['%s_tx_time_err' % dir] ST_err = trx['%s_tx_time_err' % dir]
ΣTT = trx['%s_tx_time_notailtti' % dir] STT = trx['%s_tx_time_notailtti' % dir]
ΣTT_err = trx['%s_tx_time_notailtti_err' % dir] STT_err = trx['%s_tx_time_notailtti_err' % dir]
ΣT_hi = ΣT + ΣT_err ST_hi = ST + ST_err
ΣTT_lo = ΣTT - ΣTT_err STT_lo = STT - STT_err
qvol[qci] = 8*ΣB # in bits qvol[qci] = 8*SB # in bits
qtime[qci] = (ΣT_hi + ΣTT_lo) / 2 qtime[qci] = (ST_hi + STT_lo) / 2
qtime_err[qci] = (ΣT_hi - ΣTT_lo) / 2 qtime_err[qci] = (ST_hi - STT_lo) / 2
# LogError(timestamp|None, *argv). # LogError(timestamp|None, *argv).
@func(LogError) @func(LogError)
def __init__(e, τ, *argv): def __init__(e, tau, *argv):
e.timestamp = τ e.timestamp = tau
super(LogError, e).__init__(*argv) super(LogError, e).__init__(*argv)
# __str__ returns human-readable form. # __str__ returns human-readable form.
......
...@@ -118,10 +118,10 @@ class tLogMeasure: ...@@ -118,10 +118,10 @@ class tLogMeasure:
if t._mok is None: if t._mok is None:
t._mok = Measurement() t._mok = Measurement()
tstart = t._mok['X.Tstart'] tstart = t._mok['X.Tstart']
δt = t._mok['X.δT'] dt = t._mok['X.δT']
t._mok = Measurement() # reinit with all NA t._mok = Measurement() # reinit with all NA
t._mok['X.Tstart'] = tstart t._mok['X.Tstart'] = tstart
t._mok['X.δT'] = δt t._mok['X.δT'] = dt
# read retrieves next measurement from LogMeasure and verifies it to be as expected. # read retrieves next measurement from LogMeasure and verifies it to be as expected.
def read(t): # -> Measurement def read(t): # -> Measurement
...@@ -169,51 +169,51 @@ def test_LogMeasure(): ...@@ -169,51 +169,51 @@ def test_LogMeasure():
# _(...) # verify effect on Measurements returned with period # _(...) # verify effect on Measurements returned with period
# _(...) # ending by timestamp of the above stats call. # _(...) # ending by timestamp of the above stats call.
# _(...) # i.e. Measurement₁ if tstats call corresponds to xlog₂. # _(...) # i.e. Measurement₁ if tstats call corresponds to xlog₂.
τ_xlog = 1 # timestamp of last emitted xlog entry tau_xlog = 1 # timestamp of last emitted xlog entry
τ_logm = τ_xlog-2+1 # timestamp of next measurement to be read from logm tau_logm = tau_xlog-2+1 # timestamp of next measurement to be read from logm
counters_prev = {} counters_prev = {}
def tstats(counters): def tstats(counters):
nonlocal τ_xlog, τ_logm, counters_prev nonlocal tau_xlog, tau_logm, counters_prev
trace('\n>>> tstats τ_xlog: %s τ_logm: %s' % (τ_xlog, τ_logm)) trace('\n>>> tstats tau_xlog: %s tau_logm: %s' % (tau_xlog, tau_logm))
t.xlog( jstats(τ_xlog+1, counters) ) # xlog τ+1 t.xlog( jstats(tau_xlog+1, counters) ) # xlog tau+1
t.read() # read+assert M for τ-1 t.read() # read+assert M for tau-1
_('X.Tstart', τ_logm+1) # start preparing next expected M at τ _('X.Tstart', tau_logm+1) # start preparing next expected M at tau
_('X.δT', 1) _('X.δT', 1)
τ_xlog += 1 tau_xlog += 1
τ_logm += 1 tau_logm += 1
counters_prev = counters counters_prev = counters
# tδstats is like tstats but takes δ for counters. # tdstats is like tstats but takes delta for counters.
def tδstats(δcounters): def tdstats(dcounters):
counters = counters_prev.copy() counters = counters_prev.copy()
for k,δv in δcounters.items(): for k,dv in dcounters.items():
counters[k] = counters.get(k,0) + δv counters[k] = counters.get(k,0) + dv
tstats(counters) tstats(counters)
# tevent is the verb to verify handling of events. # tevent is the verb to verify handling of events.
# its logic is similar to tstats. # its logic is similar to tstats.
def tevent(event): def tevent(event):
nonlocal τ_xlog, τ_logm, counters_prev nonlocal tau_xlog, tau_logm, counters_prev
trace('\n>>> tstats τ_xlog: %s τ_logm: %s' % (τ_xlog, τ_logm)) trace('\n>>> tstats tau_xlog: %s tau_logm: %s' % (tau_xlog, tau_logm))
t.xlog(json.dumps({"meta": {"event": event, "time": τ_xlog+1}})) t.xlog(json.dumps({"meta": {"event": event, "time": tau_xlog+1}}))
t.read() t.read()
_('X.Tstart', τ_logm+1) _('X.Tstart', tau_logm+1)
_('X.δT', 1) _('X.δT', 1)
τ_xlog += 1 tau_xlog += 1
τ_logm += 1 tau_logm += 1
counters_prev = {} # reset counters_prev = {} # reset
# tdrb_stats is the verb to verify handling of x.drb_stats message. # tdrb_stats is the verb to verify handling of x.drb_stats message.
# #
# it xlogs drb stats with given δτ relative to either previous (δτ > 0) or # it xlogs drb stats with given dtau relative to either previous (dtau > 0) or
# next (δτ < 0) stats or event. # next (dtau < 0) stats or event.
def tdrb_stats(δτ, qci_trx): def tdrb_stats(dtau, qci_trx):
if δτ >= 0: if dtau >= 0:
τ = τ_xlog + δτ # after previous stats or event tau = tau_xlog + dtau # after previous stats or event
else: else:
τ = τ_xlog+1 + δτ # before next stats or event tau = tau_xlog+1 + dtau # before next stats or event
trace('\n>>> tdrb_stats τ: %s τ_xlog: %s τ_logm: %s' % (τ, τ_xlog, τ_logm)) trace('\n>>> tdrb_stats tau: %s tau_xlog: %s tau_logm: %s' % (tau, tau_xlog, tau_logm))
t.xlog( jdrb_stats(τ, qci_trx) ) t.xlog( jdrb_stats(tau, qci_trx) )
...@@ -271,14 +271,14 @@ def test_LogMeasure(): ...@@ -271,14 +271,14 @@ def test_LogMeasure():
# S1SIG.ConnEstab, ERAB.InitEstab # S1SIG.ConnEstab, ERAB.InitEstab
tδstats({'s1_initial_context_setup_request': +3, tdstats({'s1_initial_context_setup_request': +3,
's1_initial_context_setup_response': +2}) 's1_initial_context_setup_response': +2})
_('S1SIG.ConnEstabAtt', 3) _('S1SIG.ConnEstabAtt', 3)
_('S1SIG.ConnEstabSucc', 3) # 2 + 1(from_next) _('S1SIG.ConnEstabSucc', 3) # 2 + 1(from_next)
_('ERAB.EstabInitAttNbr.sum', 3) # currently same as S1SIG.ConnEstab _('ERAB.EstabInitAttNbr.sum', 3) # currently same as S1SIG.ConnEstab
_('ERAB.EstabInitSuccNbr.sum', 3) # ----//---- _('ERAB.EstabInitSuccNbr.sum', 3) # ----//----
tδstats({'s1_initial_context_setup_request': +4, tdstats({'s1_initial_context_setup_request': +4,
's1_initial_context_setup_response': +3}) 's1_initial_context_setup_response': +3})
_('S1SIG.ConnEstabAtt', 4) _('S1SIG.ConnEstabAtt', 4)
_('S1SIG.ConnEstabSucc', 2) # 3 - 1(to_prev) _('S1SIG.ConnEstabSucc', 2) # 3 - 1(to_prev)
...@@ -287,24 +287,24 @@ def test_LogMeasure(): ...@@ -287,24 +287,24 @@ def test_LogMeasure():
# ERAB.EstabAdd # ERAB.EstabAdd
tδstats({'s1_erab_setup_request': +1, tdstats({'s1_erab_setup_request': +1,
's1_erab_setup_response': +1}) 's1_erab_setup_response': +1})
_('ERAB.EstabAddAttNbr.sum', 1) _('ERAB.EstabAddAttNbr.sum', 1)
_('ERAB.EstabAddSuccNbr.sum', 1) _('ERAB.EstabAddSuccNbr.sum', 1)
tδstats({'s1_erab_setup_request': +3, tdstats({'s1_erab_setup_request': +3,
's1_erab_setup_response': +2}) 's1_erab_setup_response': +2})
_('ERAB.EstabAddAttNbr.sum', 3) _('ERAB.EstabAddAttNbr.sum', 3)
_('ERAB.EstabAddSuccNbr.sum', 2) _('ERAB.EstabAddSuccNbr.sum', 2)
# DRB.IPVol / DRB.IPTime (testing all variants of stats/x.drb_stats interaction) # DRB.IPVol / DRB.IPTime (testing all variants of stats/x.drb_stats interaction)
tδstats({}) tdstats({})
tδstats({}) # ──S₁·d₁─────S₂·d₂─────S₃·d₃── tdstats({}) # ──S₁·d₁─────S₂·d₂─────S₃·d₃──
tdrb_stats(+0.1, {1: drb_trx(1.1,10, 1.2,20), tdrb_stats(+0.1, {1: drb_trx(1.1,10, 1.2,20),
11: drb_trx(1.3,30, 1.4,40)}) 11: drb_trx(1.3,30, 1.4,40)})
# nothing here - d₁ comes as the first drb_stats # nothing here - d₁ comes as the first drb_stats
tδstats({}) # S₂ tdstats({}) # S₂
tdrb_stats(+0.1, {2: drb_trx(2.1,100, 2.2,200), # d₂ is included into S₁-S₂ tdrb_stats(+0.1, {2: drb_trx(2.1,100, 2.2,200), # d₂ is included into S₁-S₂
22: drb_trx(2.3,300, 2.4,400)}) 22: drb_trx(2.3,300, 2.4,400)})
_('DRB.IPTimeDl.2', 2.1); _('DRB.IPVolDl.2', 8*100) _('DRB.IPTimeDl.2', 2.1); _('DRB.IPVolDl.2', 8*100)
...@@ -312,7 +312,7 @@ def test_LogMeasure(): ...@@ -312,7 +312,7 @@ def test_LogMeasure():
_('DRB.IPTimeDl.22', 2.3); _('DRB.IPVolDl.22', 8*300) _('DRB.IPTimeDl.22', 2.3); _('DRB.IPVolDl.22', 8*300)
_('DRB.IPTimeUl.22', 2.4); _('DRB.IPVolUl.22', 8*400) _('DRB.IPTimeUl.22', 2.4); _('DRB.IPVolUl.22', 8*400)
tδstats({}) # S₃ tdstats({}) # S₃
tdrb_stats(+0.1, {3: drb_trx(3.1,1000, 3.2,2000), # d₃ is included int S₂-S₃ tdrb_stats(+0.1, {3: drb_trx(3.1,1000, 3.2,2000), # d₃ is included int S₂-S₃
33: drb_trx(3.3,3000, 3.4,4000)}) 33: drb_trx(3.3,3000, 3.4,4000)})
_('DRB.IPTimeDl.3', 3.1); _('DRB.IPVolDl.3', 8*1000) _('DRB.IPTimeDl.3', 3.1); _('DRB.IPVolDl.3', 8*1000)
...@@ -322,20 +322,20 @@ def test_LogMeasure(): ...@@ -322,20 +322,20 @@ def test_LogMeasure():
tdrb_stats(-0.1, {1: drb_trx(1.1,11, 1.2,12)}) # ──S·d─────d·S─────d·S── tdrb_stats(-0.1, {1: drb_trx(1.1,11, 1.2,12)}) # ──S·d─────d·S─────d·S──
tδstats({}) # cont↑ tdstats({}) # cont↑
_('DRB.IPTimeDl.1', 1.1); _('DRB.IPVolDl.1', 8*11) _('DRB.IPTimeDl.1', 1.1); _('DRB.IPVolDl.1', 8*11)
_('DRB.IPTimeUl.1', 1.2); _('DRB.IPVolUl.1', 8*12) _('DRB.IPTimeUl.1', 1.2); _('DRB.IPVolUl.1', 8*12)
tdrb_stats(-0.1, {2: drb_trx(2.1,21, 2.2,22)}) tdrb_stats(-0.1, {2: drb_trx(2.1,21, 2.2,22)})
tδstats({}) tdstats({})
_('DRB.IPTimeDl.2', 2.1); _('DRB.IPVolDl.2', 8*21) _('DRB.IPTimeDl.2', 2.1); _('DRB.IPVolDl.2', 8*21)
_('DRB.IPTimeUl.2', 2.2); _('DRB.IPVolUl.2', 8*22) _('DRB.IPTimeUl.2', 2.2); _('DRB.IPVolUl.2', 8*22)
tdrb_stats(-0.1, {3: drb_trx(3.1,31, 3.2,32)}) # ──d·S─────d·S─────d·S·d── tdrb_stats(-0.1, {3: drb_trx(3.1,31, 3.2,32)}) # ──d·S─────d·S─────d·S·d──
tδstats({}) # cont↑ tdstats({}) # cont↑
_('DRB.IPTimeDl.3', 3.1); _('DRB.IPVolDl.3', 8*31) _('DRB.IPTimeDl.3', 3.1); _('DRB.IPVolDl.3', 8*31)
_('DRB.IPTimeUl.3', 3.2); _('DRB.IPVolUl.3', 8*32) _('DRB.IPTimeUl.3', 3.2); _('DRB.IPVolUl.3', 8*32)
tdrb_stats(-0.1, {4: drb_trx(4.1,41, 4.2,42)}) tdrb_stats(-0.1, {4: drb_trx(4.1,41, 4.2,42)})
tδstats({}) tdstats({})
tdrb_stats(+0.1, {5: drb_trx(5.1,51, 5.2,52)}) tdrb_stats(+0.1, {5: drb_trx(5.1,51, 5.2,52)})
_('DRB.IPTimeDl.4', 4.1); _('DRB.IPVolDl.4', 8*41) _('DRB.IPTimeDl.4', 4.1); _('DRB.IPVolDl.4', 8*41)
_('DRB.IPTimeUl.4', 4.2); _('DRB.IPVolUl.4', 8*42) _('DRB.IPTimeUl.4', 4.2); _('DRB.IPVolUl.4', 8*42)
...@@ -343,16 +343,16 @@ def test_LogMeasure(): ...@@ -343,16 +343,16 @@ def test_LogMeasure():
_('DRB.IPTimeUl.5', 5.2); _('DRB.IPVolUl.5', 8*52) _('DRB.IPTimeUl.5', 5.2); _('DRB.IPVolUl.5', 8*52)
tdrb_stats(+0.5, {6: drb_trx(6.1,61, 6.2,62)}) # ──d·S·d──d──S───d──S── tdrb_stats(+0.5, {6: drb_trx(6.1,61, 6.2,62)}) # ──d·S·d──d──S───d──S──
tδstats({}) # cont↑ tdstats({}) # cont↑
_('DRB.IPTimeDl.6', 6.1); _('DRB.IPVolDl.6', 8*61) _('DRB.IPTimeDl.6', 6.1); _('DRB.IPVolDl.6', 8*61)
_('DRB.IPTimeUl.6', 6.2); _('DRB.IPVolUl.6', 8*62) _('DRB.IPTimeUl.6', 6.2); _('DRB.IPVolUl.6', 8*62)
tdrb_stats(+0.51,{7: drb_trx(7.1,71, 7.2,72)}) tdrb_stats(+0.51,{7: drb_trx(7.1,71, 7.2,72)})
tδstats({}) tdstats({})
_('DRB.IPTimeDl.7', 7.1); _('DRB.IPVolDl.7', 8*71) _('DRB.IPTimeDl.7', 7.1); _('DRB.IPVolDl.7', 8*71)
_('DRB.IPTimeUl.7', 7.2); _('DRB.IPVolUl.7', 8*72) _('DRB.IPTimeUl.7', 7.2); _('DRB.IPVolUl.7', 8*72)
tdrb_stats(-0.1, {8: drb_trx(8.1,81, 8.2,82)}) # combined d + S with nonzero counters tdrb_stats(-0.1, {8: drb_trx(8.1,81, 8.2,82)}) # combined d + S with nonzero counters
tδstats({'s1_initial_context_setup_request': +3, # d──S────d·S── tdstats({'s1_initial_context_setup_request': +3, # d──S────d·S──
's1_initial_context_setup_response': +2}) # cont↑ 's1_initial_context_setup_response': +2}) # cont↑
_('DRB.IPTimeDl.8', 8.1); _('DRB.IPVolDl.8', 8*81) _('DRB.IPTimeDl.8', 8.1); _('DRB.IPVolDl.8', 8*81)
_('DRB.IPTimeUl.8', 8.2); _('DRB.IPVolUl.8', 8*82) _('DRB.IPTimeUl.8', 8.2); _('DRB.IPVolUl.8', 8*82)
...@@ -363,15 +363,15 @@ def test_LogMeasure(): ...@@ -363,15 +363,15 @@ def test_LogMeasure():
# service detach/attach, connect failure, xlog failure # service detach/attach, connect failure, xlog failure
tδstats({}) # untie from previous history tdstats({}) # untie from previous history
i, f = 'rrc_connection_request', 'rrc_connection_setup_complete' i, f = 'rrc_connection_request', 'rrc_connection_setup_complete'
I, F = 'RRC.ConnEstabAtt.sum', 'RRC.ConnEstabSucc.sum' I, F = 'RRC.ConnEstabAtt.sum', 'RRC.ConnEstabSucc.sum'
tδstats({i:2, f:1}) tdstats({i:2, f:1})
_(I, 2) _(I, 2)
_(F, 2) # +1(from_next) _(F, 2) # +1(from_next)
tδstats({i:2, f:2}) tdstats({i:2, f:2})
_(I, 2) _(I, 2)
_(F, 1) # -1(to_prev) _(F, 1) # -1(to_prev)
...@@ -379,10 +379,10 @@ def test_LogMeasure(): ...@@ -379,10 +379,10 @@ def test_LogMeasure():
t.expect_nodata() t.expect_nodata()
t.read() # LogMeasure flushes its queue on "service detach". t.read() # LogMeasure flushes its queue on "service detach".
_('X.Tstart', τ_logm+1) # After the flush t.read will need to go only 1 step behind _('X.Tstart', tau_logm+1) # After the flush t.read will need to go only 1 step behind
_('X.δT', 1) # corresponding t.xlog call instead of previously going 2 steps beyond. _('X.δT', 1) # corresponding t.xlog call instead of previously going 2 steps beyond.
t.expect_nodata() # Do one t.read step manually to catch up. t.expect_nodata() # Do one t.read step manually to catch up.
τ_logm += 1 tau_logm += 1
tevent("service connect failure") tevent("service connect failure")
t.expect_nodata() t.expect_nodata()
...@@ -397,8 +397,8 @@ def test_LogMeasure(): ...@@ -397,8 +397,8 @@ def test_LogMeasure():
tevent("service attach") tevent("service attach")
t.expect_nodata() t.expect_nodata()
t.xlog( jstats(τ_xlog+1, {i:1000, f:1000}) ) # LogMeasure restarts the queue after data starts to t.xlog( jstats(tau_xlog+1, {i:1000, f:1000}) ) # LogMeasure restarts the queue after data starts to
τ_xlog += 1 # come in again. Do one t.xlog step manually to tau_xlog += 1 # come in again. Do one t.xlog step manually to
# increase t.read - t.xlog distance back to 2. # increase t.read - t.xlog distance back to 2.
tstats({i:1000+2, f:1000+2}) tstats({i:1000+2, f:1000+2})
_(I, 2) # no "extra" events even if counters start with jumped values after reattach _(I, 2) # no "extra" events even if counters start with jumped values after reattach
...@@ -451,15 +451,15 @@ def test_LogMeasure_badinput(): ...@@ -451,15 +451,15 @@ def test_LogMeasure_badinput():
t.xlog( jstats(51, {cc: 50+8}) ) t.xlog( jstats(51, {cc: 50+8}) )
t.xlog( jstats(52, {cc: 50+8+9}) ) t.xlog( jstats(52, {cc: 50+8+9}) )
def readok(τ, CC_value): def readok(tau, CC_value):
_('X.Tstart', τ) _('X.Tstart', tau)
_('X.δT', 1) _('X.δT', 1)
_(CC, CC_value) _(CC, CC_value)
t.read() t.read()
def read_nodata(τ, δτ=1): def read_nodata(tau, dtau=1):
_('X.Tstart', τ) _('X.Tstart', tau)
_('X.δT', δτ) _('X.δT', dtau)
t.expect_nodata() t.expect_nodata()
t.read() t.read()
...@@ -468,8 +468,8 @@ def test_LogMeasure_badinput(): ...@@ -468,8 +468,8 @@ def test_LogMeasure_badinput():
readok(2, 3) # 2-3 readok(2, 3) # 2-3
read_nodata(3, 8) # 3-11 read_nodata(3, 8) # 3-11
def tbadcell(τ, ncell): def tbadcell(tau, ncell):
with raises(LogError, match="t%s: stats describes %d cells;" % (τ, ncell) + with raises(LogError, match="t%s: stats describes %d cells;" % (tau, ncell) +
" but only single-cell configurations are supported"): " but only single-cell configurations are supported"):
t.read() t.read()
tbadcell(11, 0) tbadcell(11, 0)
...@@ -480,8 +480,8 @@ def test_LogMeasure_badinput(): ...@@ -480,8 +480,8 @@ def test_LogMeasure_badinput():
read_nodata(13, 1) read_nodata(13, 1)
tbadcell(14, 3) tbadcell(14, 3)
def tbadstats(τ, error): def tbadstats(tau, error):
with raises(LogError, match="t%s: stats: %s" % (τ, error)): with raises(LogError, match="t%s: stats: %s" % (tau, error)):
t.read() t.read()
read_nodata(14, 7) read_nodata(14, 7)
tbadstats(21, ":10/cells/1 no `counters`") tbadstats(21, ":10/cells/1 no `counters`")
...@@ -520,9 +520,9 @@ def test_LogMeasure_cc_wraparound(): ...@@ -520,9 +520,9 @@ def test_LogMeasure_cc_wraparound():
t.xlog( jstats(4, {cc: 140}) ) # cc↑↑ - should start afresh t.xlog( jstats(4, {cc: 140}) ) # cc↑↑ - should start afresh
t.xlog( jstats(5, {cc: 150}) ) t.xlog( jstats(5, {cc: 150}) )
def readok(τ, CC_value): def readok(tau, CC_value):
_('X.Tstart', τ) _('X.Tstart', tau)
_('X.δT', int(τ+1)-τ) _('X.δT', int(tau+1)-tau)
if CC_value is not None: if CC_value is not None:
_(CC, CC_value) _(CC, CC_value)
else: else:
...@@ -553,9 +553,9 @@ def test_LogMeasure_sync(): ...@@ -553,9 +553,9 @@ def test_LogMeasure_sync():
t.xlog( '{"meta": {"event": "sync", "time": 2.5, "state": "attached", "reason": "periodic", "generator": "xlog ws://localhost:9001 stats[]/30.0s"}}' ) t.xlog( '{"meta": {"event": "sync", "time": 2.5, "state": "attached", "reason": "periodic", "generator": "xlog ws://localhost:9001 stats[]/30.0s"}}' )
t.xlog( jstats(3, {cc: 7}) ) t.xlog( jstats(3, {cc: 7}) )
def readok(τ, CC_value): def readok(tau, CC_value):
_('X.Tstart', τ) _('X.Tstart', tau)
_('X.δT', int(τ+1)-τ) _('X.δT', int(tau+1)-tau)
if CC_value is not None: if CC_value is not None:
_(CC, CC_value) _(CC, CC_value)
else: else:
...@@ -568,8 +568,8 @@ def test_LogMeasure_sync(): ...@@ -568,8 +568,8 @@ def test_LogMeasure_sync():
# jstats returns json-encoded stats message corresponding to counters dict. # jstats returns json-encoded stats message corresponding to counters dict.
# τ goes directly to stats['utc'] as is. # tau goes directly to stats['utc'] as is.
def jstats(τ, counters): # -> str def jstats(tau, counters): # -> str
g_cc = {} # global g_cc = {} # global
cell_cc = {} # per-cell cell_cc = {} # per-cell
...@@ -581,7 +581,7 @@ def jstats(τ, counters): # -> str ...@@ -581,7 +581,7 @@ def jstats(τ, counters): # -> str
s = { s = {
"message": "stats", "message": "stats",
"utc": τ, "utc": tau,
"cells": {"1": {"counters": {"messages": cell_cc}}}, "cells": {"1": {"counters": {"messages": cell_cc}}},
"counters": {"messages": g_cc}, "counters": {"messages": g_cc},
} }
...@@ -596,7 +596,7 @@ def test_jstats(): ...@@ -596,7 +596,7 @@ def test_jstats():
# jdrb_stats, similarly to jstats, returns json-encoded x.drb_stats message # jdrb_stats, similarly to jstats, returns json-encoded x.drb_stats message
# corresponding to per-QCI dl/ul tx_time/tx_bytes. # corresponding to per-QCI dl/ul tx_time/tx_bytes.
def jdrb_stats(τ, qci_dlul): # -> str def jdrb_stats(tau, qci_dlul): # -> str
qci_dlul = qci_dlul.copy() qci_dlul = qci_dlul.copy()
for qci, dlul in qci_dlul.items(): for qci, dlul in qci_dlul.items():
assert isinstance(dlul, dict) assert isinstance(dlul, dict)
...@@ -609,7 +609,7 @@ def jdrb_stats(τ, qci_dlul): # -> str ...@@ -609,7 +609,7 @@ def jdrb_stats(τ, qci_dlul): # -> str
s = { s = {
"message": "x.drb_stats", "message": "x.drb_stats",
"utc": τ, "utc": tau,
"qci_dict": qci_dlul, "qci_dict": qci_dlul,
} }
......
...@@ -190,20 +190,20 @@ def xlog(ctx, wsuri, logspecv): ...@@ -190,20 +190,20 @@ def xlog(ctx, wsuri, logspecv):
# e.g. disk full in xl.jemit itself # e.g. disk full in xl.jemit itself
log.exception('xlog failure (second level):') log.exception('xlog failure (second level):')
δt_reconnect = min(3, lsync.period) dt_reconnect = min(3, lsync.period)
_, _rx = select( _, _rx = select(
ctx.done().recv, # 0 ctx.done().recv, # 0
time.after(δt_reconnect).recv, # 1 time.after(dt_reconnect).recv, # 1
) )
if _ == 0: if _ == 0:
raise ctx.err() raise ctx.err()
# _XLogger serves xlog implementation. # _XLogger serves xlog implementation.
class _XLogger: class _XLogger:
def __init__(xl, wsuri, logspecv, δt_sync): def __init__(xl, wsuri, logspecv, dt_sync):
xl.wsuri = wsuri xl.wsuri = wsuri
xl.logspecv = logspecv xl.logspecv = logspecv
xl.δt_sync = δt_sync # = logspecv.get("meta.sync").period xl.dt_sync = dt_sync # = logspecv.get("meta.sync").period
xl.tsync = float('-inf') # never yet xl.tsync = float('-inf') # never yet
# emit saves line to the log. # emit saves line to the log.
...@@ -235,7 +235,7 @@ class _XLogger: ...@@ -235,7 +235,7 @@ class _XLogger:
def xlog1(xl, ctx): def xlog1(xl, ctx):
# emit sync periodically even in detached state # emit sync periodically even in detached state
# this is useful to still know e.g. intended logspec if the service is stopped for a long time # this is useful to still know e.g. intended logspec if the service is stopped for a long time
if time.now() - xl.tsync >= xl.δt_sync: if time.now() - xl.tsync >= xl.dt_sync:
xl.jemit_sync("detached", "periodic", {}) xl.jemit_sync("detached", "periodic", {})
# connect to the service # connect to the service
...@@ -336,11 +336,11 @@ class _XLogger: ...@@ -336,11 +336,11 @@ class _XLogger:
# TODO detect time overruns and correct schedule correspondingly # TODO detect time overruns and correct schedule correspondingly
tnow = time.now() tnow = time.now()
tarm = t0 + tmin tarm = t0 + tmin
δtsleep = tarm - tnow dtsleep = tarm - tnow
if δtsleep > 0: if dtsleep > 0:
_, _rx = select( _, _rx = select(
ctx.done().recv, # 0 ctx.done().recv, # 0
time.after(δtsleep).recv, # 1 time.after(dtsleep).recv, # 1
) )
if _ == 0: if _ == 0:
raise ctx.err() raise ctx.err()
...@@ -420,7 +420,7 @@ class _XMsgServer: ...@@ -420,7 +420,7 @@ class _XMsgServer:
resp_raw = json.dumps(resp, resp_raw = json.dumps(resp,
separators=(',', ':'), # most compact, like Amari does separators=(',', ':'), # most compact, like Amari does
ensure_ascii=False) # so that e.g. δt comes as is ensure_ascii=False) # so that e.g. dt comes as is
return resp, resp_raw return resp, resp_raw
......
...@@ -145,10 +145,10 @@ def test_Reader_readahead_vs_eof(): ...@@ -145,10 +145,10 @@ def test_Reader_readahead_vs_eof():
fxlog.seek(pos, io.SEEK_SET) fxlog.seek(pos, io.SEEK_SET)
xr = xlog.Reader(fxlog) xr = xlog.Reader(fxlog)
def expect_msg(τ, msg): def expect_msg(tau, msg):
_ = xr.read() _ = xr.read()
assert type(_) is xlog.Message assert type(_) is xlog.Message
assert _.timestamp == τ assert _.timestamp == tau
assert _.message == msg assert _.message == msg
logit('{"message": "aaa", "utc": 1}') logit('{"message": "aaa", "utc": 1}')
......
...@@ -116,26 +116,26 @@ ...@@ -116,26 +116,26 @@
"\n", "\n",
"# calc_each_period partitions mlog data into periods and yields kpi.Calc for each period.\n", "# calc_each_period partitions mlog data into periods and yields kpi.Calc for each period.\n",
"def calc_each_period(mlog: kpi.MeasurementLog, tperiod: float): # -> yield kpi.Calc\n", "def calc_each_period(mlog: kpi.MeasurementLog, tperiod: float): # -> yield kpi.Calc\n",
" τ = mlog.data()[0]['X.Tstart']\n", " tau = mlog.data()[0]['X.Tstart']\n",
" for m in mlog.data()[1:]:\n", " for m in mlog.data()[1:]:\n",
" τ_ = m['X.Tstart']\n", " tau_ = m['X.Tstart']\n",
" if (τ_ - τ) >= tperiod:\n", " if (tau_ - tau) >= tperiod:\n",
" calc = kpi.Calc(mlog, τ, τ+tperiod)\n", " calc = kpi.Calc(mlog, tau, tau+tperiod)\n",
" τ = calc.τ_hi\n", " tau = calc.tau_hi\n",
" yield calc\n", " yield calc\n",
"\n", "\n",
"tperiod = 1*60 # 1 minute\n", "tperiod = 1*60 # 1 minute\n",
"vτ = []\n", "vtau = []\n",
"vInititialEPSBEstabSR = []\n", "vInititialEPSBEstabSR = []\n",
"vAddedEPSBEstabSR = []\n", "vAddedEPSBEstabSR = []\n",
"\n", "\n",
"for calc in calc_each_period(mlog, tperiod):\n", "for calc in calc_each_period(mlog, tperiod):\n",
" vτ.append(calc.τ_lo)\n", " vtau.append(calc.tau_lo)\n",
" _ = calc.erab_accessibility() # E-RAB Accessibility\n", " _ = calc.erab_accessibility() # E-RAB Accessibility\n",
" vInititialEPSBEstabSR.append(_[0])\n", " vInititialEPSBEstabSR.append(_[0])\n",
" vAddedEPSBEstabSR .append(_[1])\n", " vAddedEPSBEstabSR .append(_[1])\n",
"\n", "\n",
"vτ = np.asarray([datetime.fromtimestamp(_) for _ in vτ])\n", "vtau = np.asarray([datetime.fromtimestamp(_) for _ in vtau])\n",
"vInititialEPSBEstabSR = np.asarray(vInititialEPSBEstabSR)\n", "vInititialEPSBEstabSR = np.asarray(vInititialEPSBEstabSR)\n",
"vAddedEPSBEstabSR = np.asarray(vAddedEPSBEstabSR)" "vAddedEPSBEstabSR = np.asarray(vAddedEPSBEstabSR)"
] ]
...@@ -188,7 +188,7 @@ ...@@ -188,7 +188,7 @@
"from xlte.demo import kpidemo\n", "from xlte.demo import kpidemo\n",
"import matplotlib.pyplot as plt\n", "import matplotlib.pyplot as plt\n",
"\n", "\n",
"kpidemo.figplot_erab_accessibility(plt.gcf(), vτ, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod)" "kpidemo.figplot_erab_accessibility(plt.gcf(), vtau, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod)"
] ]
}, },
{ {
...@@ -264,15 +264,15 @@ ...@@ -264,15 +264,15 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"tperiod = 3 # 3 seconds\n", "tperiod = 3 # 3 seconds\n",
"vτ = []\n", "vtau = []\n",
"vIPThp_qci = []\n", "vIPThp_qci = []\n",
"\n", "\n",
"for calc in calc_each_period(mlog, tperiod):\n", "for calc in calc_each_period(mlog, tperiod):\n",
" vτ.append(calc.τ_lo)\n", " vtau.append(calc.tau_lo)\n",
" _ = calc.eutran_ip_throughput() # E-UTRAN IP Throughput\n", " _ = calc.eutran_ip_throughput() # E-UTRAN IP Throughput\n",
" vIPThp_qci.append(_)\n", " vIPThp_qci.append(_)\n",
"\n", "\n",
"vτ = np.asarray([datetime.fromtimestamp(_) for _ in vτ])\n", "vtau = np.asarray([datetime.fromtimestamp(_) for _ in vtau])\n",
"vIPThp_qci = np.asarray(vIPThp_qci)" "vIPThp_qci = np.asarray(vIPThp_qci)"
] ]
}, },
...@@ -304,7 +304,7 @@ ...@@ -304,7 +304,7 @@
"source": [ "source": [
"fig = plt.gcf()\n", "fig = plt.gcf()\n",
"fig.set_size_inches(10, 8)\n", "fig.set_size_inches(10, 8)\n",
"kpidemo.figplot_eutran_ip_throughput(fig, vτ, vIPThp_qci, tperiod)" "kpidemo.figplot_eutran_ip_throughput(fig, vtau, vIPThp_qci, tperiod)"
] ]
}, },
{ {
......
...@@ -67,22 +67,22 @@ def main(): ...@@ -67,22 +67,22 @@ def main():
# calc_each_period partitions mlog data into periods and yields kpi.Calc for each period. # calc_each_period partitions mlog data into periods and yields kpi.Calc for each period.
def calc_each_period(mlog: kpi.MeasurementLog, tperiod: float): # -> yield kpi.Calc def calc_each_period(mlog: kpi.MeasurementLog, tperiod: float): # -> yield kpi.Calc
τ = mlog.data()[0]['X.Tstart'] tau = mlog.data()[0]['X.Tstart']
for m in mlog.data()[1:]: for m in mlog.data()[1:]:
τ_ = m['X.Tstart'] tau_ = m['X.Tstart']
if (τ_ - τ) >= tperiod: if (tau_ - tau) >= tperiod:
calc = kpi.Calc(mlog, τ, τ+tperiod) calc = kpi.Calc(mlog, tau, tau+tperiod)
τ = calc.τ_hi tau = calc.tau_hi
yield calc yield calc
tperiod = float(sys.argv[1]) tperiod = float(sys.argv[1])
vτ = [] vtau = []
vInititialEPSBEstabSR = [] vInititialEPSBEstabSR = []
vAddedEPSBEstabSR = [] vAddedEPSBEstabSR = []
vIPThp_qci = [] vIPThp_qci = []
for calc in calc_each_period(mlog, tperiod): for calc in calc_each_period(mlog, tperiod):
vτ.append(calc.τ_lo) vtau.append(calc.tau_lo)
_ = calc.erab_accessibility() # E-RAB Accessibility _ = calc.erab_accessibility() # E-RAB Accessibility
vInititialEPSBEstabSR.append(_[0]) vInititialEPSBEstabSR.append(_[0])
...@@ -91,7 +91,7 @@ def main(): ...@@ -91,7 +91,7 @@ def main():
_ = calc.eutran_ip_throughput() # E-UTRAN IP Throughput _ = calc.eutran_ip_throughput() # E-UTRAN IP Throughput
vIPThp_qci.append(_) vIPThp_qci.append(_)
vτ = np.asarray([datetime.fromtimestamp(_) for _ in vτ]) vtau = np.asarray([datetime.fromtimestamp(_) for _ in vtau])
vInititialEPSBEstabSR = np.asarray(vInititialEPSBEstabSR) vInititialEPSBEstabSR = np.asarray(vInititialEPSBEstabSR)
vAddedEPSBEstabSR = np.asarray(vAddedEPSBEstabSR) vAddedEPSBEstabSR = np.asarray(vAddedEPSBEstabSR)
vIPThp_qci = np.asarray(vIPThp_qci) vIPThp_qci = np.asarray(vIPThp_qci)
...@@ -125,30 +125,30 @@ def main(): ...@@ -125,30 +125,30 @@ def main():
fig = plt.figure(constrained_layout=True, figsize=(12,8)) fig = plt.figure(constrained_layout=True, figsize=(12,8))
facc, fthp = fig.subfigures(1, 2) facc, fthp = fig.subfigures(1, 2)
figplot_erab_accessibility (facc, vτ, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod) figplot_erab_accessibility (facc, vtau, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod)
figplot_eutran_ip_throughput(fthp, vτ, vIPThp_qci, tperiod) figplot_eutran_ip_throughput(fthp, vtau, vIPThp_qci, tperiod)
plt.show() plt.show()
# ---- plotting routines ---- # ---- plotting routines ----
# figplot_erab_accessibility plots E-RAB Accessibility KPI data on the figure. # figplot_erab_accessibility plots E-RAB Accessibility KPI data on the figure.
def figplot_erab_accessibility(fig: plt.Figure, vτ, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod=None): def figplot_erab_accessibility(fig: plt.Figure, vtau, vInititialEPSBEstabSR, vAddedEPSBEstabSR, tperiod=None):
ax1, ax2 = fig.subplots(2, 1, sharex=True) ax1, ax2 = fig.subplots(2, 1, sharex=True)
fig.suptitle("E-RAB Accessibility / %s" % (tpretty(tperiod) if tperiod is not None else fig.suptitle("E-RAB Accessibility / %s" % (tpretty(tperiod) if tperiod is not None else
vτ_period_pretty(vτ))) vtau_period_pretty(vtau)))
ax1.set_title("Initial E-RAB establishment success rate") ax1.set_title("Initial E-RAB establishment success rate")
ax2.set_title("Added E-RAB establishment success rate") ax2.set_title("Added E-RAB establishment success rate")
plot_success_rate(ax1, vτ, vInititialEPSBEstabSR, "InititialEPSBEstabSR") plot_success_rate(ax1, vtau, vInititialEPSBEstabSR, "InititialEPSBEstabSR")
plot_success_rate(ax2, vτ, vAddedEPSBEstabSR, "AddedEPSBEstabSR") plot_success_rate(ax2, vtau, vAddedEPSBEstabSR, "AddedEPSBEstabSR")
# figplot_eutran_ip_throughput plots E-UTRAN IP Throughput KPI data on the figure. # figplot_eutran_ip_throughput plots E-UTRAN IP Throughput KPI data on the figure.
def figplot_eutran_ip_throughput(fig: plt.Figure, vτ, vIPThp_qci, tperiod=None): def figplot_eutran_ip_throughput(fig: plt.Figure, vtau, vIPThp_qci, tperiod=None):
ax1, ax2 = fig.subplots(2, 1, sharex=True) ax1, ax2 = fig.subplots(2, 1, sharex=True)
fig.suptitle("E-UTRAN IP Throughput / %s" % (tpretty(tperiod) if tperiod is not None else fig.suptitle("E-UTRAN IP Throughput / %s" % (tpretty(tperiod) if tperiod is not None else
vτ_period_pretty(vτ))) vtau_period_pretty(vtau)))
ax1.set_title("Downlink") ax1.set_title("Downlink")
ax2.set_title("Uplink") ax2.set_title("Uplink")
ax1.set_ylabel("Mbit/s") ax1.set_ylabel("Mbit/s")
...@@ -156,8 +156,8 @@ def figplot_eutran_ip_throughput(fig: plt.Figure, vτ, vIPThp_qci, tperiod=None) ...@@ -156,8 +156,8 @@ def figplot_eutran_ip_throughput(fig: plt.Figure, vτ, vIPThp_qci, tperiod=None)
v_qci = (vIPThp_qci .view(np.float64) / 1e6) \ v_qci = (vIPThp_qci .view(np.float64) / 1e6) \
.view(vIPThp_qci.dtype) .view(vIPThp_qci.dtype)
plot_per_qci(ax1, vτ, v_qci[:,:]['dl'], 'IPThp') plot_per_qci(ax1, vtau, v_qci[:,:]['dl'], 'IPThp')
plot_per_qci(ax2, vτ, v_qci[:,:]['ul'], 'IPThp') plot_per_qci(ax2, vtau, v_qci[:,:]['ul'], 'IPThp')
_, dmax = ax1.get_ylim() _, dmax = ax1.get_ylim()
_, umax = ax2.get_ylim() _, umax = ax2.get_ylim()
...@@ -167,9 +167,9 @@ def figplot_eutran_ip_throughput(fig: plt.Figure, vτ, vIPThp_qci, tperiod=None) ...@@ -167,9 +167,9 @@ def figplot_eutran_ip_throughput(fig: plt.Figure, vτ, vIPThp_qci, tperiod=None)
# plot_success_rate plots success-rate data from vector v on ax. # plot_success_rate plots success-rate data from vector v on ax.
# v is array with Intervals. # v is array with Intervals.
def plot_success_rate(ax, vτ, v, label): def plot_success_rate(ax, vtau, v, label):
ax.plot(vτ, v['lo'], drawstyle='steps-post', label=label) ax.plot(vtau, v['lo'], drawstyle='steps-post', label=label)
ax.fill_between(vτ, v['lo'], v['hi'], ax.fill_between(vtau, v['lo'], v['hi'],
step='post', alpha=0.1, label='%s\nuncertainty' % label) step='post', alpha=0.1, label='%s\nuncertainty' % label)
ax.set_ylabel("%") ax.set_ylabel("%")
...@@ -185,8 +185,8 @@ def plot_success_rate(ax, vτ, v, label): ...@@ -185,8 +185,8 @@ def plot_success_rate(ax, vτ, v, label):
# #
# v_qci should be array[t, QCI]. # v_qci should be array[t, QCI].
# QCIs, for which v[:,qci] is all zeros, are said to be silent and are not plotted. # QCIs, for which v[:,qci] is all zeros, are said to be silent and are not plotted.
def plot_per_qci(ax, vτ, v_qci, label): def plot_per_qci(ax, vtau, v_qci, label):
ax.set_xlim((vτ[0], vτ[-1])) # to have correct x range even if we have no data ax.set_xlim((vtau[0], vtau[-1])) # to have correct x range even if we have no data
assert len(v_qci.shape) == 2 assert len(v_qci.shape) == 2
silent = True silent = True
propv = list(plt.rcParams['axes.prop_cycle']) propv = list(plt.rcParams['axes.prop_cycle'])
...@@ -196,8 +196,8 @@ def plot_per_qci(ax, vτ, v_qci, label): ...@@ -196,8 +196,8 @@ def plot_per_qci(ax, vτ, v_qci, label):
continue continue
silent = False silent = False
prop = propv[qci % len(propv)] # to have same colors for same qci in different graphs prop = propv[qci % len(propv)] # to have same colors for same qci in different graphs
ax.plot(vτ, v['lo'], label="%s.%d" % (label, qci), **prop) ax.plot(vtau, v['lo'], label="%s.%d" % (label, qci), **prop)
ax.fill_between(vτ, v['lo'], v['hi'], alpha=0.3, **prop) ax.fill_between(vtau, v['lo'], v['hi'], alpha=0.3, **prop)
if silent: if silent:
ax.plot([],[], ' ', label="all QCI silent") ax.plot([],[], ' ', label="all QCI silent")
...@@ -222,17 +222,17 @@ def tpretty(t): ...@@ -222,17 +222,17 @@ def tpretty(t):
return "%s%s" % ("%d'" % tmin if tmin else '', return "%s%s" % ("%d'" % tmin if tmin else '',
'%d"' % tsec if tsec else '') '%d"' % tsec if tsec else '')
# vτ_period_pretty returns pretty form for time period in vector vτ. # vtau_period_pretty returns pretty form for time period in vector vtau.
# for example [2,5,8,11] gives 3'. # for example [2,5,8,11] gives 3'.
def vτ_period_pretty(vτ): def vtau_period_pretty(vtau):
if len(vτ) < 2: if len(vtau) < 2:
return "?" return "?"
s = timedelta(seconds=1) s = timedelta(seconds=1)
δvτ = (vτ[1:] - vτ[:-1]) / s # in seconds dvtau = (vtau[1:] - vtau[:-1]) / s # in seconds
min = δvτ.min() min = dvtau.min()
avg = δvτ.mean() avg = dvtau.mean()
max = δvτ.max() max = dvtau.max()
std = δvτ.std() std = dvtau.std()
if min == max: if min == max:
return tpretty(min) return tpretty(min)
return "%s ±%s [%s, %s]" % (tpretty(avg), tpretty(std), tpretty(min), tpretty(max)) return "%s ±%s [%s, %s]" % (tpretty(avg), tpretty(std), tpretty(min), tpretty(max))
......
#!/bin/bash -e
for f in `git ls-files |grep -v greek2lat`; do
sed -e "
s/Σqci/Sqci/g
s/Σcause/Scause/g
s/τ/tau/g
s/Σ/S/g
s/δtau/dtau/g
s/δt/dt/g
s/δ_ue_stats/d_ue_stats/g
s/\bμ\b/mu/g
s/\bμ_\b/mu_/g
s/\bσ\b/std/g
s/\bσ2\b/s2/g
s/tδstats/tdstats/g
s/δcounters/dcounters/g
s/\bδv\b/dv/g
s/δcc/dcc/g
s/ δ / delta /g
s/ δ$/ delta/g
s/δvtau/dvtau/g
" -i $f
done
...@@ -56,7 +56,7 @@ from golang import func ...@@ -56,7 +56,7 @@ from golang import func
# Calc provides way to compute KPIs over given measurement data and time interval. # Calc provides way to compute KPIs over given measurement data and time interval.
# #
# It is constructed from MeasurementLog and [τ_lo, τ_hi) and further provides # It is constructed from MeasurementLog and [tau_lo, tau_hi) and further provides
# following methods for computing 3GPP KPIs: # following methods for computing 3GPP KPIs:
# #
# .erab_accessibility() - TS 32.450 6.1.1 "E-RAB Accessibility" # .erab_accessibility() - TS 32.450 6.1.1 "E-RAB Accessibility"
...@@ -66,15 +66,15 @@ from golang import func ...@@ -66,15 +66,15 @@ from golang import func
# Upon construction specified time interval is potentially widened to cover # Upon construction specified time interval is potentially widened to cover
# corresponding data in full granularity periods: # corresponding data in full granularity periods:
# #
# τ'lo τ'hi # tau'lo tau'hi
# ──────|─────|────[────|────)──────|──────|────────> # ──────|─────|────[────|────)──────|──────|────────>
# ←─ τ_lo τ_hi ──→ time # ←─ tau_lo tau_hi ──→ time
# #
# #
# See also: MeasurementLog, Measurement. # See also: MeasurementLog, Measurement.
class Calc: class Calc:
# ._data []Measurement - fully inside [.τ_lo, .τ_hi) # ._data []Measurement - fully inside [.tau_lo, .tau_hi)
# [.τ_lo, .τ_hi) time interval to compute over. Potentially wider than originally requested. # [.tau_lo, .tau_hi) time interval to compute over. Potentially wider than originally requested.
pass pass
...@@ -265,8 +265,8 @@ def _(): ...@@ -265,8 +265,8 @@ def _():
expv.append((name, typ, nqci)) # X.QCI[nqci] expv.append((name, typ, nqci)) # X.QCI[nqci]
elif name.endswith('.CAUSE'): elif name.endswith('.CAUSE'):
Σ, causev = _all_cause(name) S, causev = _all_cause(name)
for _ in (Σ,)+causev: for _ in (S,)+causev:
expv.append((_, typ)) expv.append((_, typ))
else: else:
...@@ -414,14 +414,14 @@ def append(mlog, m: Measurement): ...@@ -414,14 +414,14 @@ def append(mlog, m: Measurement):
# verify .Tstart↑ # verify .Tstart↑
if len(mlog._data) > 0: if len(mlog._data) > 0:
m_ = mlog._data[-1] m_ = mlog._data[-1]
τ = m ['X.Tstart'] tau = m ['X.Tstart']
τ_ = m_['X.Tstart'] tau_ = m_['X.Tstart']
δτ_ = m_['X.δT'] dtau_ = m_['X.δT']
if not (τ_ < τ): if not (tau_ < tau):
raise AssertionError(".Tstart not ↑ (%s -> %s)" % (τ_, τ)) raise AssertionError(".Tstart not ↑ (%s -> %s)" % (tau_, tau))
if not (τ_ + δτ_ <= τ): if not (tau_ + dtau_ <= tau):
raise AssertionError(".Tstart overlaps with previous measurement: %s ∈ [%s, %s)" % raise AssertionError(".Tstart overlaps with previous measurement: %s ∈ [%s, %s)" %
(τ, τ_, τ_ + δτ_)) (tau, tau_, tau_ + dtau_))
_ = np.append( _ = np.append(
mlog._data.view(Measurement._dtype0), # dtype0 because np.append does not handle aliased mlog._data.view(Measurement._dtype0), # dtype0 because np.append does not handle aliased
m.view(Measurement._dtype0)) # fields as such and increases out itemsize m.view(Measurement._dtype0)) # fields as such and increases out itemsize
...@@ -443,32 +443,32 @@ def forget_past(mlog, Tcut): ...@@ -443,32 +443,32 @@ def forget_past(mlog, Tcut):
# Calc() is initialized from slice of data in the measurement log that is # Calc() is initialized from slice of data in the measurement log that is
# covered/overlapped with [τ_lo, τ_hi) time interval. # covered/overlapped with [tau_lo, tau_hi) time interval.
# #
# The time interval, that will actually be used for computations, is potentially wider. # The time interval, that will actually be used for computations, is potentially wider.
# See Calc class documentation for details. # See Calc class documentation for details.
@func(Calc) @func(Calc)
def __init__(calc, mlog: MeasurementLog, τ_lo, τ_hi): def __init__(calc, mlog: MeasurementLog, tau_lo, tau_hi):
assert τ_lo <= τ_hi assert tau_lo <= tau_hi
data = mlog.data() data = mlog.data()
l = len(data) l = len(data)
# find min i: τ_lo < [i].(Tstart+δT) ; i=l if not found # find min i: tau_lo < [i].(Tstart+δT) ; i=l if not found
# TODO binary search # TODO binary search
i = 0 i = 0
while i < l: while i < l:
m = data[i] m = data[i]
m_τhi = m['X.Tstart'] + m['X.δT'] m_tauhi = m['X.Tstart'] + m['X.δT']
if τ_lo < m_τhi: if tau_lo < m_tauhi:
break break
i += 1 i += 1
# find min j: τ_hi ≤ [j].Tstart ; j=l if not found # find min j: tau_hi ≤ [j].Tstart ; j=l if not found
j = i j = i
while j < l: while j < l:
m = data[j] m = data[j]
m_τlo = m['X.Tstart'] m_taulo = m['X.Tstart']
if τ_hi <= m_τlo: if tau_hi <= m_taulo:
break break
j += 1 j += 1
...@@ -476,12 +476,12 @@ def __init__(calc, mlog: MeasurementLog, τ_lo, τ_hi): ...@@ -476,12 +476,12 @@ def __init__(calc, mlog: MeasurementLog, τ_lo, τ_hi):
if len(data) > 0: if len(data) > 0:
m_lo = data[0] m_lo = data[0]
m_hi = data[-1] m_hi = data[-1]
τ_lo = min(τ_lo, m_lo['X.Tstart']) tau_lo = min(tau_lo, m_lo['X.Tstart'])
τ_hi = max(τ_hi, m_hi['X.Tstart']+m_hi['X.δT']) tau_hi = max(tau_hi, m_hi['X.Tstart']+m_hi['X.δT'])
calc._data = data calc._data = data
calc.τ_lo = τ_lo calc.tau_lo = tau_lo
calc.τ_hi = τ_hi calc.tau_hi = tau_hi
# erab_accessibility computes "E-RAB Accessibility" KPI. # erab_accessibility computes "E-RAB Accessibility" KPI.
...@@ -499,20 +499,20 @@ def __init__(calc, mlog: MeasurementLog, τ_lo, τ_hi): ...@@ -499,20 +499,20 @@ def __init__(calc, mlog: MeasurementLog, τ_lo, τ_hi):
def erab_accessibility(calc): # -> InitialEPSBEstabSR, AddedEPSBEstabSR def erab_accessibility(calc): # -> InitialEPSBEstabSR, AddedEPSBEstabSR
SR = calc._success_rate SR = calc._success_rate
x = SR("Σcause RRC.ConnEstabSucc.CAUSE", x = SR("Scause RRC.ConnEstabSucc.CAUSE",
"Σcause RRC.ConnEstabAtt.CAUSE") "Scause RRC.ConnEstabAtt.CAUSE")
y = SR("S1SIG.ConnEstabSucc", y = SR("S1SIG.ConnEstabSucc",
"S1SIG.ConnEstabAtt") "S1SIG.ConnEstabAtt")
z = SR("Σqci ERAB.EstabInitSuccNbr.QCI", z = SR("Sqci ERAB.EstabInitSuccNbr.QCI",
"Σqci ERAB.EstabInitAttNbr.QCI") "Sqci ERAB.EstabInitAttNbr.QCI")
InititialEPSBEstabSR = Interval(x['lo'] * y['lo'] * z['lo'], # x·y·z InititialEPSBEstabSR = Interval(x['lo'] * y['lo'] * z['lo'], # x·y·z
x['hi'] * y['hi'] * z['hi']) x['hi'] * y['hi'] * z['hi'])
AddedEPSBEstabSR = SR("Σqci ERAB.EstabAddSuccNbr.QCI", AddedEPSBEstabSR = SR("Sqci ERAB.EstabAddSuccNbr.QCI",
"Σqci ERAB.EstabAddAttNbr.QCI") "Sqci ERAB.EstabAddAttNbr.QCI")
return _i2pc(InititialEPSBEstabSR), \ return _i2pc(InititialEPSBEstabSR), \
_i2pc(AddedEPSBEstabSR) # as % _i2pc(AddedEPSBEstabSR) # as %
...@@ -535,60 +535,60 @@ def erab_accessibility(calc): # -> InitialEPSBEstabSR, AddedEPSBEstabSR ...@@ -535,60 +535,60 @@ def erab_accessibility(calc): # -> InitialEPSBEstabSR, AddedEPSBEstabSR
# #
# This gives the following for resulting success rate confidence interval: # This gives the following for resulting success rate confidence interval:
# #
# time covered by periods with data: Σt # time covered by periods with data: St
# time covered by periods with no data: t⁺ t⁺ # time covered by periods with no data: t⁺ t⁺
# extrapolation for incoming initiation events: init⁺ = ──·Σ(init) # extrapolation for incoming initiation events: init⁺ = ──·S(init)
# Σt # St
# fini events for "no data" time is full uncertainty: fini⁺ ∈ [0,init⁺] # fini events for "no data" time is full uncertainty: fini⁺ ∈ [0,init⁺]
# #
# => success rate over whole time is uncertain in between # => success rate over whole time is uncertain in between
# #
# Σ(fini) Σ(fini) + init⁺ # S(fini) S(fini) + init⁺
# ────────────── ≤ SR ≤ ────────────── # ────────────── ≤ SR ≤ ──────────────
# Σ(init) + init⁺ Σ(init) + init⁺ # S(init) + init⁺ S(init) + init⁺
# #
# that confidence interval is returned as the result. # that confidence interval is returned as the result.
# #
# fini/init events can be prefixed with "Σqci " or "Σcause ". If such prefix is # fini/init events can be prefixed with "Sqci " or "Scause ". If such prefix is
# present, then fini/init value is obtained via call to Σqci or Σcause correspondingly. # present, then fini/init value is obtained via call to Sqci or Scause correspondingly.
@func(Calc) @func(Calc)
def _success_rate(calc, fini, init): # -> Interval in [0,1] def _success_rate(calc, fini, init): # -> Interval in [0,1]
def vget(m, name): def vget(m, name):
if name.startswith("Σqci "): if name.startswith("Sqci "):
return Σqci (m, name[len(qci "):]) return Sqci (m, name[len("Sqci "):])
if name.startswith("Σcause "): if name.startswith("Scause "):
return Σcause(m, name[len(cause "):]) return Scause(m, name[len("Scause "):])
return m[name] return m[name]
t_ = 0. t_ = 0.
Σt = 0. St = 0.
Σinit = 0 Sinit = 0
Σfini = 0 Sfini = 0
Σufini = 0 # Σinit where fini=ø but init is not ø Sufini = 0 # Sinit where fini=ø but init is not ø
for m in calc._miter(): for m in calc._miter():
τ = m['X.δT'] tau = m['X.δT']
vinit = vget(m, init) vinit = vget(m, init)
vfini = vget(m, fini) vfini = vget(m, fini)
if isNA(vinit): if isNA(vinit):
t_ += τ t_ += tau
# ignore fini, even if it is not ø. # ignore fini, even if it is not ø.
# TODO more correct approach: init⁺ for this period ∈ [fini,∞] and # TODO more correct approach: init⁺ for this period ∈ [fini,∞] and
# once we extrapolate init⁺ we should check if it lies in that # once we extrapolate init⁺ we should check if it lies in that
# interval and adjust if not. Then fini could be used as is. # interval and adjust if not. Then fini could be used as is.
else: else:
Σt += τ St += tau
Σinit += vinit Sinit += vinit
if isNA(vfini): if isNA(vfini):
Σufini += vinit Sufini += vinit
else: else:
Σfini += vfini Sfini += vfini
if Σinit == 0 or Σt == 0: if Sinit == 0 or St == 0:
return Interval(0,1) # full uncertainty return Interval(0,1) # full uncertainty
init_ = t_ * Σinit / Σt init_ = t_ * Sinit / St
a = Σfini / (Σinit + init_) a = Sfini / (Sinit + init_)
b = (Σfini + init_ + Σufini) / (Σinit + init_) b = (Sfini + init_ + Sufini) / (Sinit + init_)
return Interval(a,b) return Interval(a,b)
...@@ -606,15 +606,15 @@ def _success_rate(calc, fini, init): # -> Interval in [0,1] ...@@ -606,15 +606,15 @@ def _success_rate(calc, fini, init): # -> Interval in [0,1]
# 3GPP reference: TS 32.450 6.3.1 "E-UTRAN IP Throughput". # 3GPP reference: TS 32.450 6.3.1 "E-UTRAN IP Throughput".
@func(Calc) @func(Calc)
def eutran_ip_throughput(calc): # -> IPThp[QCI][dl,ul] def eutran_ip_throughput(calc): # -> IPThp[QCI][dl,ul]
qdlΣv = np.zeros(nqci, dtype=np.float64) qdlSv = np.zeros(nqci, dtype=np.float64)
qdlΣt = np.zeros(nqci, dtype=np.float64) qdlSt = np.zeros(nqci, dtype=np.float64)
qdlΣte = np.zeros(nqci, dtype=np.float64) qdlSte = np.zeros(nqci, dtype=np.float64)
qulΣv = np.zeros(nqci, dtype=np.float64) qulSv = np.zeros(nqci, dtype=np.float64)
qulΣt = np.zeros(nqci, dtype=np.float64) qulSt = np.zeros(nqci, dtype=np.float64)
qulΣte = np.zeros(nqci, dtype=np.float64) qulSte = np.zeros(nqci, dtype=np.float64)
for m in calc._miter(): for m in calc._miter():
τ = m['X.δT'] tau = m['X.δT']
for qci in range(nqci): for qci in range(nqci):
dl_vol = m["DRB.IPVolDl.QCI"] [qci] dl_vol = m["DRB.IPVolDl.QCI"] [qci]
...@@ -630,68 +630,68 @@ def eutran_ip_throughput(calc): # -> IPThp[QCI][dl,ul] ...@@ -630,68 +630,68 @@ def eutran_ip_throughput(calc): # -> IPThp[QCI][dl,ul]
# plain 3GPP spec for now. # plain 3GPP spec for now.
pass pass
else: else:
qdlΣv[qci] += dl_vol qdlSv[qci] += dl_vol
qdlΣt[qci] += dl_time qdlSt[qci] += dl_time
qdlΣte[qci] += dl_time_err qdlSte[qci] += dl_time_err
if isNA(ul_vol) or isNA(ul_time) or isNA(ul_time_err): if isNA(ul_vol) or isNA(ul_time) or isNA(ul_time_err):
# no uncertainty accounting - see ^^^ # no uncertainty accounting - see ^^^
pass pass
else: else:
qulΣv[qci] += ul_vol qulSv[qci] += ul_vol
qulΣt[qci] += ul_time qulSt[qci] += ul_time
qulΣte[qci] += ul_time_err qulSte[qci] += ul_time_err
thp = np.zeros(nqci, dtype=np.dtype([ thp = np.zeros(nqci, dtype=np.dtype([
('dl', Interval._dtype), ('dl', Interval._dtype),
('ul', Interval._dtype), ('ul', Interval._dtype),
])) ]))
for qci in range(nqci): for qci in range(nqci):
if qdlΣt[qci] > 0: if qdlSt[qci] > 0:
thp[qci]['dl']['lo'] = qdlΣv[qci] / (qdlΣt[qci] + qdlΣte[qci]) thp[qci]['dl']['lo'] = qdlSv[qci] / (qdlSt[qci] + qdlSte[qci])
thp[qci]['dl']['hi'] = qdlΣv[qci] / (qdlΣt[qci] - qdlΣte[qci]) thp[qci]['dl']['hi'] = qdlSv[qci] / (qdlSt[qci] - qdlSte[qci])
if qulΣt[qci] > 0: if qulSt[qci] > 0:
thp[qci]['ul']['lo'] = qulΣv[qci] / (qulΣt[qci] + qulΣte[qci]) thp[qci]['ul']['lo'] = qulSv[qci] / (qulSt[qci] + qulSte[qci])
thp[qci]['ul']['hi'] = qulΣv[qci] / (qulΣt[qci] - qulΣte[qci]) thp[qci]['ul']['hi'] = qulSv[qci] / (qulSt[qci] - qulSte[qci])
return thp return thp
# _miter iterates through [.τ_lo, .τ_hi) yielding Measurements. # _miter iterates through [.tau_lo, .tau_hi) yielding Measurements.
# #
# The measurements are yielded with consecutive timestamps. There is no gaps # The measurements are yielded with consecutive timestamps. There is no gaps
# as NA Measurements are yielded for time holes in original MeasurementLog data. # as NA Measurements are yielded for time holes in original MeasurementLog data.
@func(Calc) @func(Calc)
def _miter(calc): # -> iter(Measurement) def _miter(calc): # -> iter(Measurement)
τ = calc.τ_lo tau = calc.tau_lo
l = len(calc._data) l = len(calc._data)
i = 0 # current Measurement from data i = 0 # current Measurement from data
while i < l: while i < l:
m = calc._data[i] m = calc._data[i]
m_τlo = m['X.Tstart'] m_taulo = m['X.Tstart']
m_τhi = m_τlo + m['X.δT'] m_tauhi = m_taulo + m['X.δT']
assert m_τlo < m_τhi assert m_taulo < m_tauhi
if τ < m_τlo: if tau < m_taulo:
# <- M(ø)[τ, m_τlo) # <- M(ø)[tau, m_taulo)
h = Measurement() h = Measurement()
h['X.Tstart'] = τ h['X.Tstart'] = tau
h['X.δT'] = m_τlo - τ h['X.δT'] = m_taulo - tau
yield h yield h
# <- M from mlog # <- M from mlog
yield m yield m
τ = m_τhi tau = m_tauhi
i += 1 i += 1
assert τ <= calc.τ_hi assert tau <= calc.tau_hi
if τ < calc.τ_hi: if tau < calc.tau_hi:
# <- trailing M(ø)[τ, τ_hi) # <- trailing M(ø)[tau, tau_hi)
h = Measurement() h = Measurement()
h['X.Tstart'] = τ h['X.Tstart'] = tau
h['X.δT'] = calc.τ_hi - τ h['X.δT'] = calc.tau_hi - tau
yield h yield h
...@@ -704,28 +704,28 @@ def __new__(cls, lo, hi): ...@@ -704,28 +704,28 @@ def __new__(cls, lo, hi):
return i return i
# Σqci performs summation over all qci for m[name_qci]. # Sqci performs summation over all qci for m[name_qci].
# #
# usage example: # usage example:
# #
# Σqci(m, 'ERAB.EstabInitSuccNbr.QCI') # Sqci(m, 'ERAB.EstabInitSuccNbr.QCI')
# #
# name_qci must have '.QCI' suffix. # name_qci must have '.QCI' suffix.
def Σqci(m: Measurement, name_qci: str): def Sqci(m: Measurement, name_qci: str):
return _Σx(m, name_qci, _all_qci) return _Sx(m, name_qci, _all_qci)
# Σcause, performs summation over all causes for m[name_cause]. # Scause, performs summation over all causes for m[name_cause].
# #
# usage example: # usage example:
# #
# Σcause(m, 'RRC.ConnEstabSucc.CAUSE') # Scause(m, 'RRC.ConnEstabSucc.CAUSE')
# #
# name_cause must have '.CAUSE' suffix. # name_cause must have '.CAUSE' suffix.
def Σcause(m: Measurement, name_cause: str): def Scause(m: Measurement, name_cause: str):
return _Σx(m, name_cause, _all_cause) return _Sx(m, name_cause, _all_cause)
# _Σx serves Σqci and Σcause. # _Sx serves Sqci and Scause.
def _Σx(m: Measurement, name_x: str, _all_x: func): def _Sx(m: Measurement, name_x: str, _all_x: func):
name_sum, name_xv = _all_x(name_x) name_sum, name_xv = _all_x(name_x)
s = m[name_sum] s = m[name_sum]
if not isNA(s): if not isNA(s):
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
from __future__ import print_function, division, absolute_import from __future__ import print_function, division, absolute_import
from xlte.kpi import Calc, MeasurementLog, Measurement, Interval, NA, isNA, Σqci, Σcause, nqci from xlte.kpi import Calc, MeasurementLog, Measurement, Interval, NA, isNA, Sqci, Scause, nqci
import numpy as np import numpy as np
from pytest import raises from pytest import raises
...@@ -81,10 +81,10 @@ def test_Measurement(): ...@@ -81,10 +81,10 @@ def test_Measurement():
# verify that time fields has enough precision # verify that time fields has enough precision
t2022 = 1670691601.8999548 # in 2022.Dec t2022 = 1670691601.8999548 # in 2022.Dec
t2118 = 4670691601.1234567 # in 2118.Jan t2118 = 4670691601.1234567 # in 2118.Jan
def _(τ): def _(tau):
m['X.Tstart'] = τ m['X.Tstart'] = tau
τ_ = m['X.Tstart'] tau_ = m['X.Tstart']
assert τ_ == τ assert tau_ == tau
_(t2022) _(t2022)
_(t2118) _(t2118)
...@@ -166,15 +166,15 @@ def test_MeasurementLog(): ...@@ -166,15 +166,15 @@ def test_MeasurementLog():
assert _.shape == (0,) assert _.shape == (0,)
# verify (τ_lo, τ_hi) widening and overlapping with Measurements on Calc initialization. # verify (tau_lo, tau_hi) widening and overlapping with Measurements on Calc initialization.
def test_Calc_init(): def test_Calc_init():
mlog = MeasurementLog() mlog = MeasurementLog()
# _ asserts that Calc(mlog, τ_lo,τ_hi) has .τ_lo/.τ_hi as specified by # _ asserts that Calc(mlog, tau_lo,tau_hi) has .tau_lo/.tau_hi as specified by
# τ_wlo/τ_whi, and ._data as specified by mokv. # tau_wlo/tau_whi, and ._data as specified by mokv.
def _(τ_lo, τ_hi, τ_wlo, τ_whi, *mokv): def _(tau_lo, tau_hi, tau_wlo, tau_whi, *mokv):
c = Calc(mlog, τ_lo,τ_hi) c = Calc(mlog, tau_lo,tau_hi)
assert (c.τ_lo, c.τ_hi) == (τ_wlo, τ_whi) assert (c.tau_lo, c.tau_hi) == (tau_wlo, tau_whi)
mv = list(c._data[i] for i in range(len(c._data))) mv = list(c._data[i] for i in range(len(c._data)))
assert mv == list(mokv) assert mv == list(mokv)
...@@ -223,18 +223,18 @@ def test_Calc_init(): ...@@ -223,18 +223,18 @@ def test_Calc_init():
def test_Calc_miter(): def test_Calc_miter():
mlog = MeasurementLog() mlog = MeasurementLog()
# _ asserts that Calc(mlog, τ_lo,τ_hi)._miter yields Measurement as specified by mokv. # _ asserts that Calc(mlog, tau_lo,tau_hi)._miter yields Measurement as specified by mokv.
def _(τ_lo, τ_hi, *mokv): def _(tau_lo, tau_hi, *mokv):
c = Calc(mlog, τ_lo,τ_hi) c = Calc(mlog, tau_lo,tau_hi)
mv = list(c._miter()) mv = list(c._miter())
assert mv == list(mokv) assert mv == list(mokv)
# na returns Measurement with specified τ_lo/τ_hi and NA for all other data. # na returns Measurement with specified tau_lo/tau_hi and NA for all other data.
def na(τ_lo, τ_hi): def na(tau_lo, tau_hi):
assert τ_lo <= τ_hi assert tau_lo <= tau_hi
m = Measurement() m = Measurement()
m['X.Tstart'] = τ_lo m['X.Tstart'] = tau_lo
m['X.δT'] = τ_hi - τ_lo m['X.δT'] = tau_hi - tau_lo
return m return m
# mlog(ø) # mlog(ø)
...@@ -275,10 +275,10 @@ def test_Calc_success_rate(): ...@@ -275,10 +275,10 @@ def test_Calc_success_rate():
fini = "S1SIG.ConnEstabSucc" fini = "S1SIG.ConnEstabSucc"
# M returns Measurement with specified time coverage and init/fini values. # M returns Measurement with specified time coverage and init/fini values.
def M(τ_lo,τ_hi, vinit=None, vfini=None): def M(tau_lo,tau_hi, vinit=None, vfini=None):
m = Measurement() m = Measurement()
m['X.Tstart'] = τ_lo m['X.Tstart'] = tau_lo
m['X.δT'] = τ_hi - τ_lo m['X.δT'] = tau_hi - tau_lo
if vinit is not None: if vinit is not None:
m[init] = vinit m[init] = vinit
if vfini is not None: if vfini is not None:
...@@ -292,10 +292,10 @@ def test_Calc_success_rate(): ...@@ -292,10 +292,10 @@ def test_Calc_success_rate():
for m in mv: for m in mv:
mlog.append(m) mlog.append(m)
# _ asserts that Calc(mlog, τ_lo,τ_hi)._success_rate(fini, init) returns Interval(sok_lo, sok_hi). # _ asserts that Calc(mlog, tau_lo,tau_hi)._success_rate(fini, init) returns Interval(sok_lo, sok_hi).
def _(τ_lo, τ_hi, sok_lo, sok_hi): def _(tau_lo, tau_hi, sok_lo, sok_hi):
sok = Interval(sok_lo, sok_hi) sok = Interval(sok_lo, sok_hi)
c = Calc(mlog, τ_lo, τ_hi) c = Calc(mlog, tau_lo, tau_hi)
s = c._success_rate(fini, init) s = c._success_rate(fini, init)
assert type(s) is Interval assert type(s) is Interval
eps = np.finfo(s['lo'].dtype).eps eps = np.finfo(s['lo'].dtype).eps
...@@ -323,7 +323,7 @@ def test_Calc_success_rate(): ...@@ -323,7 +323,7 @@ def test_Calc_success_rate():
# i₁=8 # i₁=8
# f₁=4 # f₁=4
# ────|──────|─────────────|────────── # ────|──────|─────────────|──────────
# 10 t₁ 20 ←── t₂ ──→ τ_hi # 10 t₁ 20 ←── t₂ ──→ tau_hi
# #
# t with data: t₁ # t with data: t₁
# t with no data: t₂ # t with no data: t₂
...@@ -355,7 +355,7 @@ def test_Calc_success_rate(): ...@@ -355,7 +355,7 @@ def test_Calc_success_rate():
# i₁=8 i₂=50 # i₁=8 i₂=50
# f₁=4 f₂=50 # f₁=4 f₂=50
# ────|──────|──────|───────|──────────────────|────────── # ────|──────|──────|───────|──────────────────|──────────
# 10 t₁ 20 ↑ 30 t₂ 40 ↑ τ_hi # 10 t₁ 20 ↑ 30 t₂ 40 ↑ tau_hi
# │ │ # │ │
# │ │ # │ │
# `────────────────── t₃ # `────────────────── t₃
...@@ -387,18 +387,18 @@ def test_Calc_success_rate(): ...@@ -387,18 +387,18 @@ def test_Calc_success_rate():
_( 0,99, 0.18808777429467083, 0.9860675722744688) # t₃=79 _( 0,99, 0.18808777429467083, 0.9860675722744688) # t₃=79
# Σqci # Sqci
init = "Σqci ERAB.EstabInitAttNbr.QCI" init = "Sqci ERAB.EstabInitAttNbr.QCI"
fini = "Σqci ERAB.EstabInitSuccNbr.QCI" fini = "Sqci ERAB.EstabInitSuccNbr.QCI"
m = M(10,20) m = M(10,20)
m['ERAB.EstabInitAttNbr.sum'] = 10 m['ERAB.EstabInitAttNbr.sum'] = 10
m['ERAB.EstabInitSuccNbr.sum'] = 2 m['ERAB.EstabInitSuccNbr.sum'] = 2
Mlog(m) Mlog(m)
_(10,20, 1/5, 1/5) _(10,20, 1/5, 1/5)
# Σcause # Scause
init = "Σcause RRC.ConnEstabAtt.CAUSE" init = "Scause RRC.ConnEstabAtt.CAUSE"
fini = "Σcause RRC.ConnEstabSucc.CAUSE" fini = "Scause RRC.ConnEstabSucc.CAUSE"
m = M(10,20) m = M(10,20)
m['RRC.ConnEstabSucc.sum'] = 5 m['RRC.ConnEstabSucc.sum'] = 5
m['RRC.ConnEstabAtt.sum'] = 10 m['RRC.ConnEstabAtt.sum'] = 10
...@@ -496,42 +496,42 @@ def test_Calc_eutran_ip_throughput(): ...@@ -496,42 +496,42 @@ def test_Calc_eutran_ip_throughput():
assert thp[qci]['ul'] == I(0) assert thp[qci]['ul'] == I(0)
# verify Σqci. # verify Sqci.
def test_Σqci(): def test_Sqci():
m = Measurement() m = Measurement()
x = 'ERAB.EstabInitAttNbr' x = 'ERAB.EstabInitAttNbr'
def Σ(): def S():
return Σqci(m, x+'.QCI') return Sqci(m, x+'.QCI')
assert isNA(Σ()) assert isNA(S())
m[x+'.sum'] = 123 m[x+'.sum'] = 123
assert Σ() == 123 assert S() == 123
m[x+'.17'] = 17 m[x+'.17'] = 17
m[x+'.23'] = 23 m[x+'.23'] = 23
m[x+'.255'] = 255 m[x+'.255'] = 255
assert Σ() == 123 # from .sum assert S() == 123 # from .sum
m[x+'.sum'] = NA(m[x+'.sum'].dtype) m[x+'.sum'] = NA(m[x+'.sum'].dtype)
assert isNA(Σ()) # from array, but NA values lead to sum being NA assert isNA(S()) # from array, but NA values lead to sum being NA
v = m[x+'.QCI'] v = m[x+'.QCI']
l = len(v) l = len(v)
for i in range(l): for i in range(l):
v[i] = 1 + i v[i] = 1 + i
assert Σ() == 1*l + (l-1)*l/2 assert S() == 1*l + (l-1)*l/2
# verify Σcause. # verify Scause.
def test_Σcause(): def test_Scause():
m = Measurement() m = Measurement()
x = 'RRC.ConnEstabAtt' x = 'RRC.ConnEstabAtt'
def Σ(): def S():
return Σcause(m, x+'.CAUSE') return Scause(m, x+'.CAUSE')
assert isNA(Σ()) assert isNA(S())
m[x+'.sum'] = 123 m[x+'.sum'] = 123
assert Σ() == 123 assert S() == 123
# TODO sum over individual causes (when implemented) # TODO sum over individual causes (when implemented)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment