aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJesper Skriver <jesper@FreeBSD.org>2001-08-06 09:20:57 +0000
committerJesper Skriver <jesper@FreeBSD.org>2001-08-06 09:20:57 +0000
commit5af927304292572afaeb9f7b0c9988f0b8fb95cd (patch)
tree42ad05735b4b0d0c3f2dd4a39fd34a55a7bfd92e
parente6f2b760a44dee56c81567834fc2f9d9d624a226 (diff)
downloadsrc-5af927304292572afaeb9f7b0c9988f0b8fb95cd.tar.gz
src-5af927304292572afaeb9f7b0c9988f0b8fb95cd.zip
MFS
src/sys/netinet/ip_input.c rev 1.130.2.22 src/sys/netinet6/frag6.c rev 1.2.2.4 src/sys/netinet6/in6_proto.c rev 1.6.2.4 Prevent denial of service using bogus fragmented IPv4 packets. A attacker sending a lot of bogus fragmented packets to the target (with different IPv4 identification field - ip_id), may be able to put the target machine into mbuf starvation state. By setting a upper limit on the number of reassembly queues we prevent this situation. This upper limit is controlled by the new sysctl net.inet.ip.maxfragpackets which defaults to nmbclusters/4 If you want old behaviour (no upper limit) set this sysctl to a negative value. If you don't want to accept any fragments (not recommended) set the sysctl to 0 (zero) Obtained from: NetBSD (partially)
Notes
Notes: svn path=/stable/3/; revision=81196
-rw-r--r--sys/netinet/ip_input.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/sys/netinet/ip_input.c b/sys/netinet/ip_input.c
index 29a29fa42eed..af6dab5cfce7 100644
--- a/sys/netinet/ip_input.c
+++ b/sys/netinet/ip_input.c
@@ -175,6 +175,12 @@ int (*fr_checkp) __P((struct ip *, int, struct ifnet *, int, struct mbuf **)) =
#endif
+static int ip_nfragpackets = 0;
+static int ip_maxfragpackets; /* initialized in ip_init() */
+SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW,
+ &ip_maxfragpackets, 0,
+ "Maximum number of IPv4 fragment reassembly queue entries");
+
/*
* We need to save the IP options in case a protocol wants to respond
* to an incoming packet over the same route if the packet got here
@@ -235,7 +241,8 @@ ip_init()
for (i = 0; i < IPREASS_NHASH; i++)
ipq[i].next = ipq[i].prev = &ipq[i];
- maxnipq = nmbclusters/4;
+ maxnipq = nmbclusters / 4;
+ ip_maxfragpackets = nmbclusters / 4;
ip_id = time_second & 0xffff;
ipintrq.ifq_maxlen = ipqmaxlen;
@@ -766,6 +773,15 @@ ip_reass(m, fp, where)
* If first fragment to arrive, create a reassembly queue.
*/
if (fp == 0) {
+ /*
+ * Enforce upper bound on number of fragmented packets
+ * for which we attempt reassembly;
+ * If maxfrag is 0, never accept fragments.
+ * If maxfrag is -1, accept all fragments without limitation.
+ */
+ if ((ip_maxfragpackets >= 0) && (ip_nfragpackets >= ip_maxfragpackets))
+ goto dropfrag;
+ ip_nfragpackets++;
if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL)
goto dropfrag;
fp = mtod(t, struct ipq *);
@@ -908,6 +924,7 @@ inserted:
remque(fp);
nipq--;
(void) m_free(dtom(fp));
+ ip_nfragpackets--;
m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
/* some debugging cruft by sklower, below, will go away soon */
@@ -948,6 +965,7 @@ ip_freef(fp)
}
remque(fp);
(void) m_free(dtom(fp));
+ ip_nfragpackets--;
nipq--;
}
@@ -976,6 +994,20 @@ ip_slowtimo()
}
}
}
+ /*
+ * If we are over the maximum number of fragments
+ * (due to the limit being lowered), drain off
+ * enough to get down to the new limit.
+ */
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ if (ip_maxfragpackets >= 0) {
+ while ((ip_nfragpackets > ip_maxfragpackets) &&
+ (ipq[i].next != &ipq[i])) {
+ ipstat.ips_fragdropped++;
+ ip_freef(ipq[i].next);
+ }
+ }
+ }
ipflow_slowtimo();
splx(s);
}