summaryrefslogtreecommitdiffstats
path: root/tc
diff options
context:
space:
mode:
Diffstat (limited to 'tc')
-rw-r--r--tc/m_police.c5
-rw-r--r--tc/q_cbq.c5
-rw-r--r--tc/q_htb.c5
-rw-r--r--tc/q_tbf.c5
-rw-r--r--tc/tc_core.c16
5 files changed, 36 insertions, 0 deletions
diff --git a/tc/m_police.c b/tc/m_police.c
index 53cbefcf..300287e9 100644
--- a/tc/m_police.c
+++ b/tc/m_police.c
@@ -322,9 +322,11 @@ int
print_police(struct action_util *a, FILE *f, struct rtattr *arg)
{
SPRINT_BUF(b1);
+ SPRINT_BUF(b2);
struct tc_police *p;
struct rtattr *tb[TCA_POLICE_MAX+1];
unsigned buffer;
+ unsigned int linklayer;
if (arg == NULL)
return 0;
@@ -360,6 +362,9 @@ print_police(struct action_util *a, FILE *f, struct rtattr *arg)
} else
fprintf(f, " ");
fprintf(f, "overhead %ub ", p->rate.overhead);
+ linklayer = (p->rate.linklayer & TC_LINKLAYER_MASK);
+ if (linklayer > TC_LINKLAYER_ETHERNET || show_details)
+ fprintf(f, "linklayer %s ", sprint_linklayer(linklayer, b2));
fprintf(f, "\nref %d bind %d\n",p->refcnt, p->bindcnt);
return 0;
diff --git a/tc/q_cbq.c b/tc/q_cbq.c
index 3c5e72c1..d76600cc 100644
--- a/tc/q_cbq.c
+++ b/tc/q_cbq.c
@@ -442,7 +442,9 @@ static int cbq_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
struct tc_cbq_wrropt *wrr = NULL;
struct tc_cbq_fopt *fopt = NULL;
struct tc_cbq_ovl *ovl = NULL;
+ unsigned int linklayer;
SPRINT_BUF(b1);
+ SPRINT_BUF(b2);
if (opt == NULL)
return 0;
@@ -486,6 +488,9 @@ static int cbq_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
char buf[64];
print_rate(buf, sizeof(buf), r->rate);
fprintf(f, "rate %s ", buf);
+ linklayer = (r->linklayer & TC_LINKLAYER_MASK);
+ if (linklayer > TC_LINKLAYER_ETHERNET || show_details)
+ fprintf(f, "linklayer %s ", sprint_linklayer(linklayer, b2));
if (show_details) {
fprintf(f, "cell %ub ", 1<<r->cell_log);
if (r->mpu)
diff --git a/tc/q_htb.c b/tc/q_htb.c
index 9321c0ad..7b6f9082 100644
--- a/tc/q_htb.c
+++ b/tc/q_htb.c
@@ -244,9 +244,11 @@ static int htb_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
struct tc_htb_opt *hopt;
struct tc_htb_glob *gopt;
double buffer,cbuffer;
+ unsigned int linklayer;
SPRINT_BUF(b1);
SPRINT_BUF(b2);
SPRINT_BUF(b3);
+ SPRINT_BUF(b4);
if (opt == NULL)
return 0;
@@ -268,6 +270,9 @@ static int htb_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
buffer = tc_calc_xmitsize(hopt->rate.rate, hopt->buffer);
fprintf(f, "ceil %s ", sprint_rate(hopt->ceil.rate, b1));
cbuffer = tc_calc_xmitsize(hopt->ceil.rate, hopt->cbuffer);
+ linklayer = (hopt->rate.linklayer & TC_LINKLAYER_MASK);
+ if (linklayer > TC_LINKLAYER_ETHERNET || show_details)
+ fprintf(f, "linklayer %s ", sprint_linklayer(linklayer, b4));
if (show_details) {
fprintf(f, "burst %s/%u mpu %s overhead %s ",
sprint_size(buffer, b1),
diff --git a/tc/q_tbf.c b/tc/q_tbf.c
index 72cfff66..34784a41 100644
--- a/tc/q_tbf.c
+++ b/tc/q_tbf.c
@@ -239,10 +239,12 @@ static int tbf_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
{
struct rtattr *tb[TCA_TBF_PTAB+1];
struct tc_tbf_qopt *qopt;
+ unsigned int linklayer;
double buffer, mtu;
double latency;
SPRINT_BUF(b1);
SPRINT_BUF(b2);
+ SPRINT_BUF(b3);
if (opt == NULL)
return 0;
@@ -294,6 +296,9 @@ static int tbf_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt)
if (qopt->rate.overhead) {
fprintf(f, "overhead %d", qopt->rate.overhead);
}
+ linklayer = (qopt->rate.linklayer & TC_LINKLAYER_MASK);
+ if (linklayer > TC_LINKLAYER_ETHERNET || show_details)
+ fprintf(f, "linklayer %s ", sprint_linklayer(linklayer, b3));
return 0;
}
diff --git a/tc/tc_core.c b/tc/tc_core.c
index 85b072ef..a5243370 100644
--- a/tc/tc_core.c
+++ b/tc/tc_core.c
@@ -102,6 +102,21 @@ static unsigned tc_adjust_size(unsigned sz, unsigned mpu, enum link_layer linkla
}
}
+/* Notice, the rate table calculated here, have gotten replaced in the
+ * kernel and is no-longer used for lookups.
+ *
+ * This happened in kernel release v3.8 caused by kernel
+ * - commit 56b765b79 ("htb: improved accuracy at high rates").
+ * This change unfortunately caused breakage of tc overhead and
+ * linklayer parameters.
+ *
+ * Kernel overhead handling got fixed in kernel v3.10 by
+ * - commit 01cb71d2d47 (net_sched: restore "overhead xxx" handling)
+ *
+ * Kernel linklayer handling got fixed in kernel v3.11 by
+ * - commit 8a8e3d84b17 (net_sched: restore "linklayer atm" handling)
+ */
+
/*
rtab[pkt_len>>cell_log] = pkt_xmit_time
*/
@@ -131,6 +146,7 @@ int tc_calc_rtable(struct tc_ratespec *r, __u32 *rtab,
r->cell_align=-1; // Due to the sz calc
r->cell_log=cell_log;
+ r->linklayer = (linklayer & TC_LINKLAYER_MASK);
return cell_log;
}