summaryrefslogtreecommitdiff
path: root/libgo/go/net/http/lex.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/net/http/lex.go')
-rw-r--r--libgo/go/net/http/lex.go73
1 files changed, 73 insertions, 0 deletions
diff --git a/libgo/go/net/http/lex.go b/libgo/go/net/http/lex.go
index cb33318f49b..50b14f8b325 100644
--- a/libgo/go/net/http/lex.go
+++ b/libgo/go/net/http/lex.go
@@ -4,6 +4,11 @@
package http
+import (
+ "strings"
+ "unicode/utf8"
+)
+
// This file deals with lexical matters of HTTP
var isTokenTable = [127]bool{
@@ -94,3 +99,71 @@ func isToken(r rune) bool {
func isNotToken(r rune) bool {
return !isToken(r)
}
+
+// headerValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func headerValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}