diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 0000000..1d6eebc --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,3 @@ +# https://github.com/thecoshman/http/issues/140 +[advisories] +ignore = ["RUSTSEC-2021-0078", "RUSTSEC-2021-0079", "RUSTSEC-2020-0071", "RUSTSEC-2021-0139", "RUSTSEC-2023-0081", "RUSTSEC-2021-0144", "RUSTSEC-2021-0145", "RUSTSEC-2020-0027", "RUSTSEC-2022-0022"] diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..a239d3a --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,4 @@ +# Needed for st_dev/st_ino on windows ("windows_by_handle") +# available since 2019 in 1.38: https://github.com/rust-lang/rust/commit/c69f367bafb3a2f90d44fe54fc20d57996fa294a +[env] +RUSTC_BOOTSTRAP = "1" diff --git a/.gitignore b/.gitignore index bec9a66..69deb53 100644 --- a/.gitignore +++ b/.gitignore @@ -6,10 +6,15 @@ !Cargo.toml !rustfmt.toml !build.rs +!build-ioctl.c !http-manifest.rc !install.* !*.md +!.cargo +!.cargo/** !src !src/** !assets !assets/** +!vendor +!vendor/** diff --git a/Cargo.toml b/Cargo.toml index fe1fe0d..86af081 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,8 +8,8 @@ keywords = ["http", "server", "https", "webdav", "directory"] categories = ["network-programming", "web-programming::http-server"] license = "MIT" build = "build.rs" -# Remember to also update in appveyor.yml -version = "1.13.2" +# Remember to also update in appveyor.yml and the http-crates.io branch +version = "2.0.0" # Remember to also update in http.md authors = ["thecoshman ", "nabijaczleweli ", @@ -24,24 +24,19 @@ authors = ["thecoshman ", [dependencies] hyper-native-tls = "0.3" percent-encoding = "2.1" -lazy_static = "1.4" -serde_json = "0.9" +serde_json = "1.0" mime_guess = "1.8" tabwriter = "1.1" itertools = "0.8" -lazysort = "0.2" -unicase = "2.4" +arrayvec = "0.7" walkdir = "2.2" -base64 = "0.10" blake3 = "1.3" flate2 = "1.0" -rfsapi = "0.1" xml-rs = "0.8" -bzip2 = "0.4" ctrlc = "3.1" -regex = "1.2" -serde = "0.9" +serde = "1.0" clap = "2.33" +libc = "0.2" rand = "0.7" time = "0.1" @@ -49,31 +44,32 @@ time = "0.1" version = "0.3" default-features = false +[dependencies.rfsapi] +path = "vendor/rfsapi-0.2.0" + [dependencies.cidr] version = "0.1" default-features = false [dependencies.brotli] -version = "3.3" -default-features = false -features = ["std"] +version = "6.0" +features = ["simd"] [dependencies.iron] -version = "0.6" +path = "vendor/iron-0.6.1" features = ["hyper-native-tls"] +[patch.crates-io.hyper] +path = "vendor/hyper-0.10.16" + [target.'cfg(target_os = "windows")'.dependencies.winapi] version = "0.3" features = ["fileapi"] -[target.'cfg(not(target_os = "windows"))'.dependencies.os-str-generic] -version = "0.2" - -[target.'cfg(not(any(target_os = "windows", target_os = "macos")))'.dependencies.libc] -version = "0.2" [build-dependencies] embed-resource = "1.3" +base64 = "0.10" [target.'cfg(not(any(target_os = "windows", target_os = "macos")))'.build-dependencies.cc] version = "1.0" diff --git a/README.md b/README.md index 59e6f77..b1a3641 100644 --- a/README.md +++ b/README.md @@ -33,8 +33,9 @@ See [the manpage](http.md) for full list. If you have `cargo` installed (you're a Rust developer) all you need to do is: ```sh -cargo install https +cargo install --git https://github.com/thecoshman/http ``` +(the `https` crates.io package *was* http, but [is now unpublishable](//github.com/thecoshman/http/pull/160#issuecomment-2143877822)) Which will install `http` and `httplz` (identical, disable one or another if they clash) in the folder where all other binaries go. @@ -80,4 +81,4 @@ The idea is to make a program that can compile down to a simple binary that can * [x] If an index file isn't provided, one will be generated (in memory, no touching the disk, why would you do that you dirty freak you), that will list the current files and folders (and then sub directories will have index files generated as required) * [x] Changes made to files should be reflected instantly, as I don't see why anything would be cached... you request a file, a file will be looked for -It's not going to be a 'production ready' tool, it's a quick and dirty way of hosting a folder, so whilst I'll try to make it secure, it is not going to be a serious goal. +~~It's not going to be a 'production ready' tool, it's a quick and dirty way of hosting a folder, so whilst I'll try to make it secure, it is not going to be a serious goal.~~ diff --git a/appveyor.yml b/appveyor.yml index fc0a70f..f9dded1 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,7 +1,7 @@ image: - Visual Studio 2022 -version: 1.13.2-{build} +version: 2.0.0-{build} skip_tags: false @@ -27,21 +27,21 @@ build: off build_script: - git submodule update --init --recursive - cargo build --verbose --release - - cp target\release\http.exe http-v1.13.2.exe - - strip --strip-all --remove-section=.comment --remove-section=.note http-v1.13.2.exe - - makensis -DHTTP_VERSION=v1.13.2 install.nsi + - cp target\release\http.exe http-v2.0.0.exe + - strip --strip-all --remove-section=.comment --remove-section=.note http-v2.0.0.exe + - makensis -DHTTP_VERSION=v2.0.0 install.nsi test: off test_script: - cargo test --verbose --release artifacts: - - path: http-v1.13.2.exe - - path: http v1.13.2 installer.exe + - path: http-v2.0.0.exe + - path: http v2.0.0 installer.exe deploy: provider: GitHub - artifact: /http.*v1.13.2.*\.exe/ + artifact: /http.*v2.0.0.*\.exe/ auth_token: secure: ZTXvCrv9y01s7Hd60w8W7NaouPnPoaw9YJt9WhWQ2Pep8HLvCikt9Exjkz8SGP9P on: diff --git a/assets/adjust_tz.js b/assets/adjust_tz.js index 83c1054..62d2bb0 100644 --- a/assets/adjust_tz.js +++ b/assets/adjust_tz.js @@ -1,15 +1,14 @@ "use strict"; -window.addEventListener("load", function() { - const FORMAT = "yyyy-MM-dd HH:mm:ss"; - +window.addEventListener("DOMContentLoaded", function() { let modtime_h = document.getElementsByTagName("th")[2]; if(modtime_h) - modtime_h.innerText = modtime_h.innerText.replace("(UTC)", "").trim(); + modtime_h.innerText = modtime_h.innerText.replace(" (UTC)", ""); - let timestamps = document.getElementsByClassName("datetime"); - Array.from(timestamps).forEach(function(r) { - let dt = r.innerText.replace("UTC", "").trim(); - r.innerText = Date.parseString(dt, FORMAT).format(FORMAT) - }); + let timestamps = document.getElementsByTagName("time"); + for(let r of timestamps) { + let dt = new Date(parseInt(r.getAttribute("ms"))); + dt.setMinutes(dt.getMinutes() - dt.getTimezoneOffset()) + r.innerText = dt.toISOString().slice(0, 19).replace("T", " "); + } }); diff --git a/assets/date.js b/assets/date.js deleted file mode 100644 index a288131..0000000 --- a/assets/date.js +++ /dev/null @@ -1,10 +0,0 @@ -/** - * Copyright (c)2005-2009 Matt Kruse (javascripttoolbox.com) - * - * Dual licensed under the MIT and GPL licenses. - * This basically means you can use this code however you want for - * free, but don't claim to have written it yourself! - * Donations always accepted: http://www.JavascriptToolbox.com/donate/ - * - */ -eval(function(p,a,c,k,e,d){e=function(c){return(c9?"":"0")+x};¨.×=¹ É(\'õ\',\'ö\',\'ó\',\'ò\',\'Ý\',\'ï\',\'ð\',\'î\',\'÷\',\'ø\',\'þ\',\'ÿ\');¨.È=¹ É(\'ý\',\'ü\',\'ù\',\'ú\',\'Ý\',\'û\',\'¢¡\',\'æ\',\'â\',\'á\',\'í\',\'ê\');¨.Ñ=¹ É(\'ë\',\'ì\',\'é\',\'è\',\'ä\',\'å\',\'ã\');¨.Ö=¹ É(\'ç\',\'ñ\',\'¢Ã\',\'¢¹\',\'¢º\',\'¢¸\',\'¢·\');¢(!¨.Ó.Ø){¨.Ó.Ø=À(){£ ¸=­.Þ();ª(¸<Ä?¸+Ä:¸)}}¨.¢´=À(¬,®){­.Ù=À(¬){Â(£ i=0;i<¬.¥;i++){¢("¢µ".¢¶(¬.º(i))==-1){ª ¢»}}ª ¢¼};­.³=À(Ü,i,Ï,à){Â(£ x=à;x>=Ï;x--){£ ¡=Ü.»(i,i+x);¢(¡.¥<Ï){ª ©}¢(­.Ù(¡)){ª ¡}}ª ©};¬=¬+"";®=®+"";£ ¦=0;£ ±=0;£ c="";£ ¡="";£ ¢Á="";£ x,y;£ °=¹ ¨().Ø();£ ¯=1;£ ²=1;£ §=0;£ µ=0;£ ¶=0;£ ¾="";Ç(±<®.¥){c=®.º(±);¡="";Ç((®.º(±)==c)&&(±<®.¥)){¡+=®.º(±++)}¢(¡=="Ã"||¡=="¸"||¡=="y"){¢(¡=="Ã"){x=4;y=4}¢(¡=="¸"){x=2;y=2}¢(¡=="y"){x=2;y=4}°=­.³(¬,¦,x,y);¢(°==©){ª ©}¦+=°.¥;¢(°.¥==2){¢(°>¢Â){°=Ä+(°-0)}«{°=¢À+(°-0)}}}« ¢(¡=="Å"||¡=="Ú"){¯=0;£ ¿=(¡=="Å"?(¨.×.¢¿(¨.È)):¨.È);Â(£ i=0;i<¿.¥;i++){£ Ê=¿[i];¢(¬.»(¦,¦+Ê.¥).¼()==Ê.¼()){¯=(i%´)+1;¦+=Ê.¥;ß}}¢((¯<1)||(¯>´)){ª ©}}« ¢(¡=="Õ"||¡=="E"){£ ¿=(¡=="Õ"?¨.Ñ:¨.Ö);Â(£ i=0;i<¿.¥;i++){£ Á=¿[i];¢(¬.»(¦,¦+Á.¥).¼()==Á.¼()){¦+=Á.¥;ß}}}« ¢(¡=="Ì"||¡=="M"){¯=­.³(¬,¦,¡.¥,2);¢(¯==©||(¯<1)||(¯>´)){ª ©}¦+=¯.¥}« ¢(¡=="Ô"||¡=="d"){²=­.³(¬,¦,¡.¥,2);¢(²==©||(²<1)||(²>¢½)){ª ©}¦+=².¥}« ¢(¡=="§"||¡=="h"){§=­.³(¬,¦,¡.¥,2);¢(§==©||(§<1)||(§>´)){ª ©}¦+=§.¥}« ¢(¡=="Î"||¡=="H"){§=­.³(¬,¦,¡.¥,2);¢(§==©||(§<0)||(§>¢¾)){ª ©}¦+=§.¥}« ¢(¡=="Ë"||¡=="K"){§=­.³(¬,¦,¡.¥,2);¢(§==©||(§<0)||(§>Æ)){ª ©}¦+=§.¥;§++}« ¢(¡=="Í"||¡=="k"){§=­.³(¬,¦,¡.¥,2);¢(§==©||(§<1)||(§>¢³)){ª ©}¦+=§.¥;§--}« ¢(¡=="µ"||¡=="m"){µ=­.³(¬,¦,¡.¥,2);¢(µ==©||(µ<0)||(µ>Û)){ª ©}¦+=µ.¥}« ¢(¡=="¶"||¡=="s"){¶=­.³(¬,¦,¡.¥,2);¢(¶==©||(¶<0)||(¶>Û)){ª ©}¦+=¶.¥}« ¢(¡=="a"){¢(¬.»(¦,¦+2).¼()=="¢²"){¾="Ð"}« ¢(¬.»(¦,¦+2).¼()=="¢¢"){¾="Ò"}«{ª ©}¦+=2}«{¢(¬.»(¦,¦+¡.¥)!=¡){ª ©}«{¦+=¡.¥}}}¢(¦!=¬.¥){ª ©}¢(¯==2){¢(((°%4==0)&&(°%¢¨!=0))||(°%¢©==0)){¢(²>¢§){ª ©}}«{¢(²>¢¦){ª ©}}}¢((¯==4)||(¯==6)||(¯==9)||(¯==Æ)){¢(²>¢£){ª ©}}¢(§<´&&¾=="Ò"){§=§-0+´}« ¢(§>Æ&&¾=="Ð"){§-=´}ª ¹ ¨(¨.¢¤(°,¯-1,²,§,µ,¶))};¨.Ó.®=À(®){®=®+"";£ ½="";£ ±=0;£ c="";£ ¡="";£ y=­.Þ()+"";£ M=­.¢¥()+1;£ d=­.¢ª();£ E=­.¢«();£ H=­.¢°();£ m=­.¢±();£ s=­.¢¯();£ Ã,¸,Å,Ì,Ô,§,h,µ,¶,¾,Î,H,Ë,K,Í,k;£ ¤=¹ ¢®();¢(y.¥<4){y=""+(+y+Ä)}¤["y"]=""+y;¤["Ã"]=y;¤["¸"]=y.»(2,4);¤["M"]=M;¤["Ì"]=¨.·(M);¤["Å"]=¨.×[M-1];¤["Ú"]=¨.È[M-1];¤["d"]=d;¤["Ô"]=¨.·(d);¤["E"]=¨.Ö[E];¤["Õ"]=¨.Ñ[E];¤["H"]=H;¤["Î"]=¨.·(H);¢(H==0){¤["h"]=´}« ¢(H>´){¤["h"]=H-´}«{¤["h"]=H}¤["§"]=¨.·(¤["h"]);¤["K"]=¤["h"]-1;¤["k"]=¤["H"]+1;¤["Ë"]=¨.·(¤["K"]);¤["Í"]=¨.·(¤["k"]);¢(H>Æ){¤["a"]="Ò"}«{¤["a"]="Ð"}¤["m"]=m;¤["µ"]=¨.·(m);¤["s"]=s;¤["¶"]=¨.·(s);Ç(±<®.¥){c=®.º(±);¡="";Ç((®.º(±)==c)&&(±<®.¥)){¡+=®.º(±++)}¢(¢¬(¤[¡])!="¢­"){½=½+¤[¡]}«{½=½+¡}}ª ½};',95,130,'token|if|var|value|length|i_val|hh|Date|null|return|else|val|this|format|month|year|i_format|date|getInt|12|mm|ss|LZ|yy|new|charAt|substring|toLowerCase|result|ampm|names|function|day_name|for|yyyy|1900|MMM|11|while|monthAbbreviations|Array|month_name|KK|MM|kk|HH|minlength|AM|dayNames|PM|prototype|dd|EE|dayAbbreviations|monthNames|getFullYear|isInteger|NNN|59|str|May|getYear|break|maxlength|Oct|Sep|Saturday|Thursday|Friday|Aug|Sun|Wednesday|Tuesday|Dec|Sunday|Monday|Nov|August|June|July|Mon|April|March|02|January|February|September|October|Mar|Apr|Jun|Feb|Jan|November|December|Jul|pm|30|UTC|getMonth|28|29|100|400|getDate|getDay|typeof|undefined|Object|getSeconds|getHours|getMinutes|am|24|parseString|1234567890|indexOf|Sat|Fri|Wed|Thu|false|true|31|23|concat|2000|token2|70|Tue'.split('|'),0,{})); diff --git a/assets/directory_listing.html b/assets/directory_listing.html index de79c5c..58aa583 100644 --- a/assets/directory_listing.html +++ b/assets/directory_listing.html @@ -2,25 +2,19 @@ - - + - - - - {1} + + + {1}{2}{3} Directory listing — {0} - -
-

The requested directory {0} contains the following files:

- - {5} - {2} - {6} - {3} -
Name Last modified (UTC) Size
-
+

The requested directory {0} contains the following files:

+ + {7} + {4} + {8} + {5} +
Name Last modified (UTC) Size

- {4} + {6}

- Host These Things Please — a basic HTTP server for hosting a folder fast and simply + Host These Things Please — a basic HTTP server for hosting a folder fast and simply

diff --git a/assets/directory_listing_mobile.html b/assets/directory_listing_mobile.html index 76a1da7..ff02784 100644 --- a/assets/directory_listing_mobile.html +++ b/assets/directory_listing_mobile.html @@ -2,55 +2,34 @@ - - + - - - - {2} + + + {1}{2}{3} Directory listing — {0} - -
- {0} - {3} - {6} - {4} -
+ {0} + {4} + {7} {5} - - Host These Things Please — a basic HTTP server for hosting a folder fast and simply - + {6} + Host These Things Please — a basic HTTP server for hosting a folder fast and simply diff --git a/assets/encoding_blacklist b/assets/encoding_blacklist index e1fc854..61425da 100644 --- a/assets/encoding_blacklist +++ b/assets/encoding_blacklist @@ -1,9 +1,10 @@ ## Stolen from https://en.wikipedia.org/wiki/List_of_archive_formats # Compression-only files +br bz2 -F gz lz +lz4 lzma lzo rz @@ -11,49 +12,57 @@ sfark sz xz z -Z +zst # Compression and archiving 7z -s7z +aar ace afa alz apk arc +arc arj +ark b1 +b6z ba bh cab car +cdx cfs cpt dar dd +deb dgc dmg ear gca +genozip ha hki ice jar kgb -lzh lha +lzh lzx pak -partimg paq6 paq7 paq8 +partimg pea +phar pim pit qda rar rk +s7z sda sea sen @@ -62,17 +71,18 @@ shk sit sitx sqx -tgz tbz2 +tgz tlz +txz uc uc0 uc2 +uca ucn -ur2 ue2 -uca uha +ur2 war wim xar @@ -87,35 +97,47 @@ zz ## Stolen from https://en.wikipedia.org/wiki/Video_file_format # Compressed videos -webm -mkv -flv -ogv -ogg +3g2 +3gp +amv +asf +avi drc +f4a +f4b +f4p +f4v +flv +gif gifv -wmv -rm -rmvb -asf -amv -mp4 +m2ts +m2v m4p m4v -mpg +mkv +mng +mov mp2 -mpeg +mp4 mpe +mpeg +mpg mpv -m2v -3gp -3g2 +mts mxf -flv -f4v -f4p -f4a -f4b +nsv +ogg +ogv +qt +rm +rmvb +roq +svi +viv +vob +webm +wmv +yuv ## Stolen from https://en.wikipedia.org/wiki/Audio_file_format @@ -135,11 +157,11 @@ ivs m4a m4b mmf +mogg mp3 mpc msv oga -mogg opus ra sln @@ -151,14 +173,17 @@ wv ## Stolen from https://en.wikipedia.org/wiki/Image_file_format # Compressed images -jpeg -jpg +avif +avifs +bpg +gif +heic jfif +jpeg jpeg2000 +jpg pcx +png tif tiff -gif -png webp -bpg diff --git a/assets/error.html b/assets/error.html index 2d7e5d9..237a746 100644 --- a/assets/error.html +++ b/assets/error.html @@ -2,12 +2,11 @@ - - + - + {0} @@ -17,7 +16,7 @@ {2}

- Host These Things Please — a basic HTTP server for hosting a folder fast and simply + Host These Things Please — a basic HTTP server for hosting a folder fast and simply

diff --git a/assets/favicon.ico b/assets/favicon.ico index 0d481a6..09be227 100644 Binary files a/assets/favicon.ico and b/assets/favicon.ico differ diff --git a/assets/favicon.png b/assets/favicon.png index a7e5e38..1bcff62 100644 Binary files a/assets/favicon.png and b/assets/favicon.png differ diff --git a/assets/icons/LICENSE b/assets/icons/LICENSE index 6394969..a4f67d9 100644 --- a/assets/icons/LICENSE +++ b/assets/icons/LICENSE @@ -1,5 +1,5 @@ Unless otherwise specified, all files in this directory come from the Apache Public Domain Icons repository as seen under https://web.archive.org/web/20160303193836/http://www.apache.org/icons/ delete_file.png based on https://commons.wikimedia.org/wiki/File:Ballot_x_no_small.png by Mankash licensed under CC BY-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0/) -new_directory.gif and new_directory_icon.psd based on dir.png and small/burst.png from the Apache Public Domain Icons repository and released into the public domain -confirm.png based on https://commons.wikimedia.org/wiki/File:True-Symbol.gif by Abnormaal and released into the public domain +new_directory.gif and new_directory.psd based on dir.png and small/burst.png from the Apache Public Domain Icons repository and released into the public domain +confirm.gif derived from the public-domain https://commons.wikimedia.org/wiki/File:Green_check.png diff --git a/assets/icons/confirm.gif b/assets/icons/confirm.gif new file mode 100644 index 0000000..3e875a7 Binary files /dev/null and b/assets/icons/confirm.gif differ diff --git a/assets/icons/confirm.png b/assets/icons/confirm.png deleted file mode 100644 index 5037607..0000000 Binary files a/assets/icons/confirm.png and /dev/null differ diff --git a/assets/icons/rename.gif b/assets/icons/rename.gif new file mode 100644 index 0000000..9bfb4e1 Binary files /dev/null and b/assets/icons/rename.gif differ diff --git a/assets/icons/rename.png b/assets/icons/rename.png deleted file mode 100644 index 51f1dd8..0000000 Binary files a/assets/icons/rename.png and /dev/null differ diff --git a/assets/manage.js b/assets/manage.js index 561dcb6..959bd81 100644 --- a/assets/manage.js +++ b/assets/manage.js @@ -1,67 +1,64 @@ "use strict"; -window.addEventListener("load", function() { - let delete_file_links = document.getElementsByClassName("delete_file_icon"); - let rename_links = document.getElementsByClassName("rename_icon"); +function delete_onclick(ev) { + ev.preventDefault(); + let link = ev.target; + let line = link.parentElement.parentElement; + make_request("DELETE", get_href_for_line(line), link); +} - for(let i = delete_file_links.length - 1; i >= 0; --i) { - let link = delete_file_links[i]; - link.addEventListener("click", function(ev) { - ev.preventDefault(); +function rename_onclick(ev) { + ev.preventDefault(); - let line = link.parentElement.parentElement; - make_request("DELETE", get_href_for_line(line), link); - }); - } + let link = ev.target; + let line = link.parentElement.parentElement; + let filename_cell = get_filename_cell_for_line(line); + let original_name = filename_cell.innerText; - let first_rename_onclick = function(link, first_onclick, ev) { - ev.preventDefault(); - - let line = link.parentElement.parentElement; - let filename_cell = get_filename_cell_for_line(line); - let original_name = filename_cell.innerText; + let submit_callback = function() { + rename(original_name, new_name_input.value, link); + }; + let new_name_input = make_filename_input(filename_cell, original_name, submit_callback); - let submit_callback = function() { - rename(original_name, new_name_input.value, link); - }; - let new_name_input = make_filename_input(filename_cell, original_name, submit_callback); + make_confirm_icon(link, submit_callback); +} - link.removeEventListener("click", first_onclick); - make_confirm_icon(link, submit_callback); +function make_confirm_icon(element, callback) { + element.classList.add("confirm_icon"); + element.innerText = "Confirm"; + element.onclick = function(ev) { + ev.preventDefault(); + ev.stopImmediatePropagation(); + callback(); }; - for(let i = rename_links.length - 1; i >= 0; --i) { - let link = rename_links[i]; +} - let first_onclick = function(ev) { - first_rename_onclick(link, first_onclick, ev); - }; - link.addEventListener("click", first_onclick); - } +function rename(fname_from, fname_to, status_out) { + let root_url = window.location.origin + window.location.pathname; + if(!root_url.endsWith("/")) + root_url += "/"; - function rename(fname_from, fname_to, status_out) { - let root_url = window.location.origin + window.location.pathname; - if(!root_url.endsWith("/")) - root_url += "/"; - - if(fname_from.endsWith("/")) - fname_from = fname_from.substr(0, fname_from.length - 1); - if(fname_to.endsWith("/")) - fname_to = fname_to.substr(0, fname_to.length - 1); + if(fname_from.endsWith("/")) + fname_from = fname_from.substr(0, fname_from.length - 1); + if(fname_to.endsWith("/")) + fname_to = fname_to.substr(0, fname_to.length - 1); + if(fname_from == fname_to) // 403 Forbidden nominally + window.location.reload(); + else make_request("MOVE", root_url + encodeURI(fname_from), status_out, function(request) { request.setRequestHeader("Destination", root_url + encodeURI(fname_to)); }); - }; -}); +} function make_filename_input(input_container, initial, callback) { input_container.innerHTML = ""; let input_elem = input_container.children[0]; - input_elem.value = initial; + input_elem.value = initial.endsWith('/') ? initial.slice(0, -1) : initial; input_elem.addEventListener("keypress", function(ev) { if(ev.keyCode === 13) { // Enter @@ -74,19 +71,8 @@ function make_filename_input(input_container, initial, callback) { }); input_elem.focus(); - return input_elem; -}; - -function make_confirm_icon(element, callback) { - element.classList.add("confirm_icon"); - element.href = "#confirm"; - element.innerText = "Confirm"; - element.addEventListener("click", function(ev) { - ev.preventDefault(); - callback(); - }); -}; +} function create_new_directory(fname, status_out) { let req_url = window.location.origin + window.location.pathname; @@ -95,8 +81,9 @@ function create_new_directory(fname, status_out) { req_url += encodeURI(fname); make_request("MKCOL", req_url, status_out); -}; +} +let make_request_error = false; function make_request(verb, url, status_out, request_modifier) { let request = new XMLHttpRequest(); request.addEventListener("loadend", function() { @@ -105,10 +92,11 @@ function make_request(verb, url, status_out, request_modifier) { else { status_out.innerHTML = request.status + " " + request.statusText + (request.response ? " — " : "") + request.response; status_out.classList.add("has-log"); + make_request_error = true; } }); request.open(verb, url); if(request_modifier) request_modifier(request); request.send(); -}; +} diff --git a/assets/manage_desktop.js b/assets/manage_desktop.js index 65f1f00..8eb9fdc 100644 --- a/assets/manage_desktop.js +++ b/assets/manage_desktop.js @@ -1,33 +1,31 @@ "use strict"; -window.addEventListener("load", function() { - let new_directory_line = document.getElementById("new_directory"); - - if(new_directory_line) { - let new_directory_filename_cell = new_directory_line.children[1]; - let new_directory_status_output = new_directory_line.children[4].children[0]; - let new_directory_filename_input = null; - - new_directory_line.addEventListener("click", function(ev) { - if(new_directory_filename_input === null) - ev.preventDefault(); - else if(ev.target === new_directory_status_output) - ; - else if(ev.target !== new_directory_filename_input) { - ev.preventDefault(); - new_directory_filename_input.focus(); - } - - if(new_directory_filename_input === null) { - let submit_callback = function() { - create_new_directory(new_directory_filename_input.value, new_directory_status_output); - }; - - new_directory_filename_input = make_filename_input(new_directory_filename_cell, "", submit_callback); - make_confirm_icon(new_directory_status_output, submit_callback); - } - }, true); - } +window.addEventListener("DOMContentLoaded", function() { + let new_directory = document.getElementById('new"directory'); + let new_directory_filename_cell = new_directory.children[1]; + let new_directory_status_output = new_directory.children[2].children[0]; + let new_directory_filename_input = null; + + new_directory.addEventListener("click", function(ev) { + if(new_directory_filename_input === null) + ev.preventDefault(); + else if(ev.target === new_directory_status_output) + ; + else if(ev.target !== new_directory_filename_input) { + ev.preventDefault(); + new_directory_filename_input.focus(); + } + + if(new_directory_filename_input === null) { + let submit_callback = function() { + create_new_directory(new_directory_filename_input.value, new_directory_status_output); + }; + + ev.stopImmediatePropagation(); + new_directory_filename_input = make_filename_input(new_directory_filename_cell, "", submit_callback); + make_confirm_icon(new_directory_status_output, submit_callback); + } + }, true); }); diff --git a/assets/manage_mobile.js b/assets/manage_mobile.js index cc76513..64603e6 100644 --- a/assets/manage_mobile.js +++ b/assets/manage_mobile.js @@ -1,48 +1,38 @@ "use strict"; -window.addEventListener("load", function() { - let new_directory_line = document.getElementById("new_directory"); - - if(new_directory_line) { - let new_directory_status_output = new_directory_line.children[0]; - let new_directory_filename_input = null; - - new_directory_line.addEventListener("click", function(ev) { - if(new_directory_filename_input === null || ev.target === new_directory_filename_input) - ev.preventDefault(); - else if(ev.target === new_directory_status_output) - ; - else if(ev.target !== new_directory_filename_input) { - ev.preventDefault(); - new_directory_filename_input.focus(); - } - - if(new_directory_filename_input === null) { - let new_directory_filename_cell = document.createElement("span"); - new_directory_filename_cell.id = "newdir_input"; - new_directory_line.append(new_directory_filename_cell); - - let first_onclick = true; - let submit_callback = function() { - if(first_onclick) { - first_onclick = false; - return; - } - create_new_directory(new_directory_filename_input.value, new_directory_status_output); - }; - - new_directory_filename_input = make_filename_input(new_directory_filename_cell, "", submit_callback); - make_confirm_icon(new_directory_status_output, submit_callback); - } - }, true); - } +window.addEventListener("DOMContentLoaded", function() { + let new_directory = document.getElementById('new"directory'); + + let first_onclick = true, input; + let submit_callback = function() { + if(make_request_error) { + first_onclick = true; + make_request_error = false; + } + if(first_onclick) { + first_onclick = false; + create_new_directory(input.value, new_directory.firstChild); + } + }; + + new_directory.onclick = function(ev) { + ev.preventDefault(); + + if(!input) { + make_confirm_icon(new_directory.firstChild, submit_callback); + let c = document.createElement("span"); + new_directory.appendChild(c); + input = make_filename_input(c, "", submit_callback); + } else + input.focus(); + }; }); function get_href_for_line(line) { - return line.href; + return line.parentElement.href; } function get_filename_cell_for_line(line) { - return line.children[0]; + return line.firstChild; } diff --git a/assets/upload.js b/assets/upload.js index 7650fe9..37d1de1 100644 --- a/assets/upload.js +++ b/assets/upload.js @@ -1,10 +1,10 @@ "use strict"; -window.addEventListener("load", function() { +window.addEventListener("DOMContentLoaded", function() { const SUPPORTED_TYPES = ["Files", "application/x-moz-file"]; let body = document.getElementsByTagName("body")[0]; - let file_upload = document.getElementById("file_upload"); + let file_upload_text = null; let remaining_files = 0; let url = document.location.pathname; if(!url.endsWith("/")) @@ -23,13 +23,6 @@ window.addEventListener("load", function() { })) { ev.preventDefault(); - for(let i = ev.dataTransfer.files.length - 1; i >= 0; --i) { - if(!ev.dataTransfer.items[i].webkitGetAsEntry) - ++remaining_files; - else - recurse_count(ev.dataTransfer.items[i].webkitGetAsEntry()); - } - for(let i = ev.dataTransfer.files.length - 1; i >= 0; --i) { if(!ev.dataTransfer.items[i].webkitGetAsEntry) { let file = ev.dataTransfer.files[i]; @@ -40,9 +33,8 @@ window.addEventListener("load", function() { } }); + let file_upload = document.querySelector("input[type=file]"); file_upload.addEventListener("change", function() { - remaining_files += file_upload.files.length; - for(let i = file_upload.files.length - 1; i >= 0; --i) { let file = file_upload.files[i]; upload_file(url + encodeURIComponent(file.name), file); @@ -50,12 +42,25 @@ window.addEventListener("load", function() { }); function upload_file(req_url, file) { + ++remaining_files; + if(!file_upload_text) { + file_upload_text = document.createTextNode(1); + file_upload.parentNode.insertBefore(file_upload_text, file_upload.nextSibling); // insertafter + } else + file_upload_text.data = remaining_files; + let request = new XMLHttpRequest(); request.addEventListener("loadend", function(e) { - if(--remaining_files === 0) - window.location.reload(); + if(request.status >= 200 && request.status < 300) { + if(!--remaining_files) + window.location.reload(); + file_upload_text.data = remaining_files; + } else + file_upload.outerHTML = req_url + "
" + request.response; }); request.open("PUT", req_url); + if(file.lastModified) + request.setRequestHeader("X-Last-Modified", file.lastModified); request.send(file); } @@ -67,20 +72,22 @@ window.addEventListener("load", function() { }); else upload_file(base_url + entry.fullPath.split("/").filter(function(seg) { return seg; }).map(encodeURIComponent).join("/"), entry.getFile()); - } else - entry.createReader().readEntries(function(e) { - e.forEach(function(f) { - recurse_upload(f, base_url) - }); + } else // https://developer.mozilla.org/en-US/docs/Web/API/DataTransferItem/webkitGetAsEntry#javascript: + // Note: To read all files in a directory, readEntries needs to be + // called repeatedly until it returns an empty array. In + // Chromium-based browsers, the following example will only return a + // max of 100 entries. + // This is actually true. + all_in_reader(entry.createReader(), function(f) { + recurse_upload(f, base_url) }); } - function recurse_count(entry) { - if(entry.isFile) { - ++remaining_files; - } else - entry.createReader().readEntries(function(e) { - e.forEach(recurse_count); - }); + function all_in_reader(reader, f) { + reader.readEntries(function(e) { + e.forEach(f); + if(e.length) + all_in_reader(reader, f); + }); } }); diff --git a/build-ioctl.c b/build-ioctl.c new file mode 100644 index 0000000..3b7cf61 --- /dev/null +++ b/build-ioctl.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: 0BSD +// Derived from https://git.sr.ht/~nabijaczleweli/voreutils/tree/02bcd701febb555147b67e0fa7fdc1504fe3cca2/item/cmd/wc.cpp#L155-177 + +#include +#include +#include +#if __linux__ +#include +#elif __OpenBSD__ +#include +#include +#elif __has_include() // NetBSD, FreeBSD +#include +#include +#include +#elif __has_include() // illumos +#include +#include +#include +#endif + + +extern uint64_t http_blkgetsize(int fd); +uint64_t http_blkgetsize(int fd) { + int ret = -1; + +#ifdef BLKGETSIZE64 // Linux + uint64_t sz; + ret = ioctl(fd, BLKGETSIZE64, &sz); +#elif defined(DIOCGMEDIASIZE) // NetBSD disk(9), FreeBSD disk(4) + off_t sz; + ret = ioctl(fd, DIOCGMEDIASIZE, &sz); +#elif defined(DIOCGDINFO) // OpenBSD + struct disklabel dl; + ret = ioctl(fd, DIOCGDINFO, &dl); + uint64_t sz = DL_GETDSIZE(&dl); + if(__builtin_mul_overflow(sz, dl.d_secsize, &sz)) + ret = -1; +#elif defined(DKIOCGMEDIAINFO) // illumos + struct dk_minfo mi; + ret = ioctl(fd, DKIOCGMEDIAINFO, &mi); + uint64_t sz = mi.dki_capacity; + if(__builtin_mul_overflow(sz, mi.dki_lbsize, &sz)) + ret = -1; +#endif + + if(ret == -1) + return -1; + else + return sz; +} diff --git a/build.rs b/build.rs index 4f46851..846cbf9 100644 --- a/build.rs +++ b/build.rs @@ -1,64 +1,195 @@ extern crate embed_resource; +extern crate base64; #[cfg(not(any(target_os = "windows", target_os = "macos")))] extern crate cc; -#[cfg(not(any(target_os = "windows", target_os = "macos")))] use std::env; -#[cfg(not(any(target_os = "windows", target_os = "macos")))] -use std::io::Write; -#[cfg(not(any(target_os = "windows", target_os = "macos")))] use std::path::Path; -#[cfg(not(any(target_os = "windows", target_os = "macos")))] use std::fs::{self, File}; +use base64::display::Base64Display; +use std::io::{BufReader, BufRead, Write}; +use std::collections::{BTreeMap, BTreeSet}; -/// The last line of this, after running it through a preprocessor, will expand to the value of `BLKGETSIZE` -#[cfg(not(any(target_os = "windows", target_os = "macos")))] -static IOCTL_CHECK_SOURCE: &str = r#" -#include -BLKGETSIZE -"#; +fn main() { + htmls(); + extensions(); + + embed_resource::compile("http-manifest.rc"); -/// Replace `{}` with the `BLKGETSIZE` expression from `IOCTL_CHECK_SOURCE` #[cfg(not(any(target_os = "windows", target_os = "macos")))] -static IOCTL_INCLUDE_SKELETON: &str = r#" -/// Return `device size / 512` (`long *` arg) -static BLKGETSIZE: {type} = {expr} as {type}; -"#; + cc::Build::new().file("build-ioctl.c").define("_GNU_SOURCE", "1").compile("http-ioctl"); +} -fn main() { - embed_resources(); - get_ioctl_data(); +fn assets() -> Vec<(&'static str, String)> { + let mut assets = Vec::new(); + { + println!("cargo:rerun-if-changed=Cargo.toml"); + assets.push(("generator", + format!("http {}", + BufReader::new(File::open("Cargo.toml").unwrap()).lines().flatten().find(|l| l.starts_with("version = ")).unwrap() + ["version = ".len()..] + .trim_matches('"')))); + } + for (key, mime, file) in + [("favicon", "image/png", "assets/favicon.png"), + ("dir_icon", "image/gif", "assets/icons/directory.gif"), + ("file_icon", "image/gif", "assets/icons/file.gif"), + ("file_binary_icon", "image/gif", "assets/icons/file_binary.gif"), + ("file_image_icon", "image/gif", "assets/icons/file_image.gif"), + ("file_text_icon", "image/gif", "assets/icons/file_text.gif"), + ("back_arrow_icon", "image/gif", "assets/icons/back_arrow.gif"), + ("new_dir_icon", "image/gif", "assets/icons/new_directory.gif"), + ("delete_file_icon", "image/png", "assets/icons/delete_file.png"), + ("rename_icon", "image/gif", "assets/icons/rename.gif"), + ("confirm_icon", "image/gif", "assets/icons/confirm.gif")] { + println!("cargo:rerun-if-changed={}", file); + assets.push((key, + format!("data:{};base64,{}", + mime, + Base64Display::with_config(&fs::read(file).unwrap()[..], base64::STANDARD)))); + } + let outd = env::var("OUT_DIR").unwrap(); + fs::create_dir_all(format!("{}/{}", outd, "assets")).unwrap(); + for (key, file) in [("manage", "assets/manage.js"), + ("manage_mobile", "assets/manage_mobile.js"), + ("manage_desktop", "assets/manage_desktop.js"), + ("upload", "assets/upload.js"), + ("adjust_tz", "assets/adjust_tz.js")] { + println!("cargo:rerun-if-changed={}", file); + let data = fs::read_to_string(file).unwrap(); + fs::write(format!("{}/{}", outd, file), + data.lines().flat_map(|l| [l.trim(), "\n"]).collect::().as_bytes()) + .unwrap(); + assets.push((key, data)); + } + assets } -fn embed_resources() { - embed_resource::compile("http-manifest.rc"); +fn htmls() { + let assets = assets(); + for html in ["error.html", "directory_listing.html", "directory_listing_mobile.html"] { + println!("cargo:rerun-if-changed=assets/{}", html); + + let with_assets = assets.iter() + .fold(fs::read_to_string(format!("assets/{}", html)).unwrap(), + |d, (k, v)| d.replace(&format!("{{{}}}", k), v)) + .lines() + .flat_map(|l| [l.trim(), "\n"]) + .collect::(); + + let mut arguments = BTreeMap::new(); + for i in 0.. { + let len_pre = arguments.len(); + arguments.extend(with_assets.match_indices(&format!("{{{}}}", i)).map(|(start, s)| (start, (s.len(), i)))); + if arguments.len() == len_pre { + break; + } + } + + let mut data = Vec::new(); + let mut argsused = BTreeMap::::new(); + let mut idx = 0; + for (start, (len, argi)) in arguments { + if with_assets[idx..start].len() != 0 { + data.push(Ok(&with_assets[idx..start])); + } + data.push(Err(argi)); + *argsused.entry(argi).or_default() += 1; + idx = start + len; + } + + + let mut out = File::create(Path::new(&env::var("OUT_DIR").unwrap()).join(format!("{}.rs", html))).unwrap(); + write!(&mut out, "pub fn {}<", html.replace('.', "_")).unwrap(); + for (arg, nused) in &argsused { + if *nused == 1 { + write!(&mut out, "T{}: HtmlResponseElement, ", arg).unwrap(); + } else { + write!(&mut out, "T{}: HtmlResponseElement + Copy, ", arg).unwrap(); + } + } + write!(&mut out, ">(").unwrap(); + for (arg, _) in &argsused { + write!(&mut out, "a{}: T{}, ", arg, arg).unwrap(); + } + let raw_bytes = data.iter().fold(0, |sz, dt| match dt { + Ok(s) => sz + s.len(), + Err(_) => sz, + }); + writeln!(&mut out, + r#") -> String {{ + let mut ret = Vec::with_capacity({}); // {}"#, + if html == "error.html" { + raw_bytes.next_power_of_two() + } else { + 32 * 1024 + }, + raw_bytes) + .unwrap(); + for dt in data { + match dt { + Ok(s) => writeln!(&mut out, " ret.extend({:?}.as_bytes());", s).unwrap(), + Err(i) => writeln!(&mut out, " a{}.commit(&mut ret);", i).unwrap(), + } + } + writeln!(&mut out, " ret.extend({:?}.as_bytes());", &with_assets[idx..]).unwrap(); + + writeln!(&mut out, + r#" + ret.shrink_to_fit(); + unsafe {{ String::from_utf8_unchecked(ret) }} +}}"#) + .unwrap(); + } } -#[cfg(any(target_os = "windows", target_os = "macos"))] -fn get_ioctl_data() {} -#[cfg(not(any(target_os = "windows", target_os = "macos")))] -fn get_ioctl_data() { - let ioctl_dir = Path::new(&env::var("OUT_DIR").unwrap()).join("ioctl-data"); - fs::create_dir_all(&ioctl_dir).unwrap(); +fn extensions() { + println!("cargo:rerun-if-changed={}", "assets/encoding_blacklist"); + let mut out = File::create(Path::new(&env::var("OUT_DIR").unwrap()).join("extensions.rs")).unwrap(); - let ioctl_source = ioctl_dir.join("ioctl.c"); - File::create(&ioctl_source).unwrap().write_all(IOCTL_CHECK_SOURCE.as_bytes()).unwrap(); + let raw = fs::read_to_string("assets/encoding_blacklist").unwrap(); + let mut exts = BTreeMap::new(); + for ext in raw.split('\n').map(str::trim).filter(|s| !s.is_empty() && !s.starts_with('#')) { + exts.entry(ext.len()).or_insert(BTreeSet::new()).insert(ext); + } + writeln!(out, "pub fn extension_is_blacklisted(ext: &OsStr) -> bool {{").unwrap(); + writeln!(out, "#[cfg(not(target_os = \"windows\"))] use std::os::unix::ffi::OsStrExt;").unwrap(); - let ioctl_preprocessed = String::from_utf8(cc::Build::new().file(ioctl_source).expand()).unwrap(); - let blkgetsize_expr = ioctl_preprocessed.lines().next_back().unwrap().replace("U", ""); - let ioctl_request_type = match &env::var("CARGO_CFG_TARGET_ENV").expect("CARGO_CFG_TARGET_ENV")[..] { - "musl" => "libc::c_int", - _ => "libc::c_ulong", - }; + write!(out, "if !matches!(ext.len(),").unwrap(); + for (i, len) in exts.keys().enumerate() { + write!(out, " {} {}", if i == 0 { "" } else { "|" }, len).unwrap(); + } + writeln!(out, ") {{ return false; }}").unwrap(); - let ioctl_include = ioctl_dir.join("ioctl.rs"); - File::create(&ioctl_include) - .unwrap() - .write_all(IOCTL_INCLUDE_SKELETON.replace("{type}", ioctl_request_type).replace("{expr}", &blkgetsize_expr).as_bytes()) + let maxlen = exts.keys().max().unwrap(); + writeln!(out, + r#" +let mut buf = [0u8; {}]; +#[cfg(not(target_os = "windows"))] +let bytes = ext.as_bytes(); +#[cfg(target_os = "windows")] +let bytes = ext.as_encoded_bytes(); +for (i, b) in bytes.iter().enumerate() {{ +if !b.is_ascii_alphanumeric() {{ + return false; +}} +buf[i] = b.to_ascii_lowercase(); +}} +let lcase = &buf[0..ext.len()]; +"#, + maxlen) .unwrap(); + + write!(out, "matches!(lcase,").unwrap(); + for (i, ext) in exts.values().flatten().enumerate() { + write!(out, " {} b{:?}", if i == 0 { "" } else { "|" }, ext).unwrap(); + } + writeln!(out, ")").unwrap(); + + writeln!(out, "}}").unwrap(); } diff --git a/http.md b/http.md index fcd5fd0..3cc202e 100644 --- a/http.md +++ b/http.md @@ -182,6 +182,9 @@ pass parameters like what port to use. Currently supported write operations: PUT and DELETE. + With PUTs, the X-Last-Modified/X-OC-MTime headers can be specified to + milliseconds/seconds since epoch, and this will be set on the file's st_mtim. + This is false by default because it's most likely not something you want to do. @@ -214,6 +217,25 @@ pass parameters like what port to use. This is false by default because it's useful for reducing bandwidth usage. + --encoded-filesystem FS_LIMIT + --encoded-generated GEN_LIMIT + + Consume at most FS_LIMIT space for encoded filesystem files (in TEMP) and + at most GEN_LIMIT memory for encoded generated responses. + + The arguments are an integer, optionally followed by case-insensitive + k (kilobyte), m (megabyte), g (gigabyte), t (terabyte), or p (petabyte), + optionally followed by case-insensitive b. + + This quota may be exceeded temporarily while servicing a request. + + --encoded-prune MAX_AGE + + Prune cached encoded data older than MAX_AGE. + + The argument is given in seconds, optionally followed by case-insensitive + s (seconds), m (minutes), h (hours), or d (days). + -x --strip-extensions Allow stripping index extensions from served paths: @@ -231,6 +253,10 @@ pass parameters like what port to use. N >= 2 – suppress startup except for auth data, if present N >= 3 – suppress all startup messages + -Q --quiet-time + + Don't prepend log lines with the timestamp. + -c --no-colour Don't colourise log output. @@ -241,6 +267,24 @@ pass parameters like what port to use. False by default. +## NOTES + +When returning files from the filesystem, the `ETag` returned +is based on the filesystem, i-node, and precise modification time. + +Naturally, this means that when serving files from filesystems with coarse +timestamps (like FAT with its 1s-resolution), changes may be hidden from +`ETag`-using user agents (if a file was modified at +2023-02-12T01:00:00.100000000, a UA requested and cached a response for +2023-02-12T01:00:00.000000000, then the file was modified again at +2023-02-12T01:00:00.900000000, subsequent requests with +`If-None-Match: 2023-02-12T01:00:00.000000000` will all return +304 Not Modified). + +This isn't really much of an issue, +don't use FAT as a High-Performance File System (or reload w/o cache), +and `If-Modified-Since` is affected with this by design, on all back-ends. + ## EXAMPLES `http` diff --git a/src/error.rs b/src/error.rs deleted file mode 100644 index 2e6e439..0000000 --- a/src/error.rs +++ /dev/null @@ -1,42 +0,0 @@ -use self::super::util::uppercase_first; -use std::borrow::Cow; -use std::fmt; - - -/// An application failure. -/// -/// # Examples -/// -/// ``` -/// # use https::Error; -/// assert_eq!(Error { -/// desc: "network", -/// op: "write", -/// more: "full buffer".into(), -/// }.to_string(), -/// "Writing network failed: full buffer."); -/// ``` -#[derive(Debug, Clone, Hash, PartialEq, Eq)] -pub struct Error { - /// The file the I/O operation regards. - pub desc: &'static str, - /// The failed operation. - /// - /// This should be lowercase and imperative ("create", "open"). - pub op: &'static str, - /// Additional data. - pub more: Cow<'static, str>, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Strip the last 'e', if any, so we get correct inflection for continuous times - let op = uppercase_first(if self.op.ends_with('e') { - &self.op[..self.op.len() - 1] - } else { - self.op - }); - - write!(f, "{}ing {} failed: {}.", op, self.desc, self.more) - } -} diff --git a/src/main.rs b/src/main.rs index 9f5e7c4..ef4bb97 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,66 +1,59 @@ +#![cfg_attr(target_os = "windows", feature(windows_by_handle))] + extern crate hyper_native_tls; extern crate percent_encoding; extern crate trivial_colours; -#[cfg(not(target_os = "windows"))] -extern crate os_str_generic; -#[macro_use] -extern crate lazy_static; extern crate serde_json; extern crate mime_guess; extern crate itertools; extern crate tabwriter; -extern crate lazysort; -extern crate unicase; +extern crate arrayvec; extern crate walkdir; -extern crate base64; extern crate blake3; extern crate brotli; extern crate flate2; extern crate rfsapi; #[cfg(target_os = "windows")] extern crate winapi; -extern crate bzip2; extern crate ctrlc; extern crate serde; -extern crate regex; extern crate cidr; #[macro_use] extern crate clap; extern crate iron; -#[cfg(not(any(target_os = "windows", target_os = "macos")))] extern crate libc; extern crate rand; extern crate time; extern crate xml; -mod error; mod options; pub mod ops; pub mod util; -pub use error::Error; +pub struct Error(pub String); pub use options::{LogLevel, Options}; use std::mem; +use libc::exit; use iron::Iron; use std::net::IpAddr; -use std::process::exit; +use std::time::Duration; use tabwriter::TabWriter; use std::io::{Write, stdout}; use std::collections::BTreeSet; -use std::sync::{Arc, Mutex, Condvar}; +use std::sync::{Mutex, Condvar}; use hyper_native_tls::NativeTlsServer; fn main() { let result = actual_main(); - exit(result); + unsafe { exit(result) } } fn actual_main() -> i32 { if let Err(err) = result_main() { - eprintln!("{}", err); + eprintln!("{}", err.0); 1 } else { 0 @@ -76,30 +69,18 @@ fn result_main() -> Result<(), Error> { opts.path_auth_data.insert(path, Some(ops::generate_auth_data())); } - let handler = ops::SimpleChain { - handler: ops::HttpHandler::new(&opts), + let handler: &_ = Box::leak(Box::new(ops::SimpleChain:: { + handler: ops::PruneChain::new(&opts), after: opts.request_bandwidth.map(ops::LimitBandwidthMiddleware::new), - }; + })); let mut responder = if let Some(p) = opts.port { if let Some(&((_, ref id), ref pw)) = opts.tls_data.as_ref() { Iron::new(handler).https((opts.bind_address, p), - NativeTlsServer::new(id, pw).map_err(|err| { - Error { - desc: "TLS certificate", - op: "open", - more: err.to_string().into(), - } - })?) + NativeTlsServer::new(id, pw).map_err(|err| Error(format!("Opening TLS certificate: {}", err)))?) } else { Iron::new(handler).http((opts.bind_address, p)) } - .map_err(|_| { - Error { - desc: "server", - op: "start", - more: "port taken".into(), - } - }) + .map_err(|_| Error(format!("Starting server: port taken"))) } else { ops::try_ports(handler, opts.bind_address, util::PORT_SCAN_LOWEST, util::PORT_SCAN_HIGHEST, &opts.tls_data) }?; @@ -108,9 +89,7 @@ fn result_main() -> Result<(), Error> { if opts.log_colour { print!("{}", trivial_colours::Reset); } - print!("Hosting \"{}\" on port {}", - opts.hosted_directory.0, - responder.socket.port()); + print!("Hosting \"{}\" on port {}", opts.hosted_directory.0, responder.socket.port()); if responder.socket.ip() != IpAddr::from([0, 0, 0, 0]) { print!(" under address {}", responder.socket.ip()); } @@ -132,9 +111,9 @@ fn result_main() -> Result<(), Error> { } for (ext, mime_type) in opts.mime_type_overrides { - match &ext[..] { + match &ext.to_string_lossy()[..] { "" => println!("Serving files with no extension as {}.", mime_type), - _ => println!("Serving files with .{} extension as {}.", ext, mime_type), + ext => println!("Serving files with .{} extension as {}.", ext, mime_type), } } @@ -178,18 +157,23 @@ fn result_main() -> Result<(), Error> { println!("Ctrl-C to stop."); println!(); } + let Options { encoded_prune: opts_encoded_prune, temp_directory: opts_temp_directory, generate_tls: opts_generate_tls, .. } = opts; + + static END_HANDLER: Condvar = Condvar::new(); + ctrlc::set_handler(|| END_HANDLER.notify_one()).unwrap(); + if opts_encoded_prune.is_some() { + loop { + if !END_HANDLER.wait_timeout(Mutex::new(()).lock().unwrap(), Duration::from_secs(handler.handler.prune_interval)).unwrap().1.timed_out() { + break; + } - let end_handler = Arc::new(Condvar::new()); - ctrlc::set_handler({ - let r = end_handler.clone(); - move || r.notify_one() - }) - .unwrap(); - drop(end_handler.wait(Mutex::new(()).lock().unwrap()).unwrap()); - responder.close().unwrap(); - - // This is necessary because the server isn't Drop::drop()ped when the responder is - ops::HttpHandler::clean_temp_dirs(&opts.temp_directory, opts.loglevel, opts.log_colour); + handler.handler.prune(); + } + } else { + drop(END_HANDLER.wait(Mutex::new(()).lock().unwrap()).unwrap()); + } + responder.close().unwrap(); + handler.handler.handler.clean_temp_dirs(&opts_temp_directory, opts_generate_tls); Ok(()) } diff --git a/src/ops/bandwidth.rs b/src/ops/bandwidth.rs index c82117e..d82b6a0 100644 --- a/src/ops/bandwidth.rs +++ b/src/ops/bandwidth.rs @@ -9,15 +9,19 @@ use std::thread; pub const DEFAULT_SLEEP: Duration = Duration::from_millis(1); -#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct SimpleChain { +#[derive(Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct SimpleChain + where &'static H: Handler +{ pub handler: H, pub after: Option, } -impl Handler for SimpleChain { +impl Handler for &'static SimpleChain + where &'static H: Handler +{ fn handle(&self, req: &mut Request) -> IronResult { - let resp = self.handler.handle(req)?; + let resp = (&self.handler).handle(req)?; match self.after.as_ref() { Some(am) => am.after(req, resp), None => Ok(resp), diff --git a/src/ops/mod.rs b/src/ops/mod.rs index 1669890..13a526b 100644 --- a/src/ops/mod.rs +++ b/src/ops/mod.rs @@ -1,19 +1,20 @@ use blake3; use serde_json; -use std::{fmt, str}; -use std::ffi::OsStr; -use std::borrow::Cow; use std::net::IpAddr; use serde::Serialize; -use unicase::UniCase; use std::sync::RwLock; -use lazysort::SortedBy; +use std::{fmt, str, mem}; use cidr::{Cidr, IpCidr}; +use time::precise_time_ns; +use arrayvec::ArrayString; use std::fs::{self, File}; use std::default::Default; use rand::{Rng, thread_rng}; use iron::modifiers::Header; use std::path::{PathBuf, Path}; +use std::ffi::{OsString, OsStr}; +use std::fmt::Write as FmtWrite; +use iron::headers::EncodingType; use iron::url::Url as GenericUrl; use mime_guess::get_mime_type_opt; use hyper_native_tls::NativeTlsServer; @@ -21,18 +22,18 @@ use std::collections::{BTreeMap, HashMap}; use self::super::{LogLevel, Options, Error}; use std::process::{ExitStatus, Command, Child, Stdio}; use rfsapi::{RawFsApiHeader, FilesetData, RawFileData}; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use rand::distributions::uniform::Uniform as UniformDistribution; use rand::distributions::Alphanumeric as AlphanumericDistribution; -use iron::mime::{Mime, SubLevel as MimeSubLevel, TopLevel as MimeTopLevel}; -use std::io::{self, ErrorKind as IoErrorKind, SeekFrom, Write, Error as IoError, Read, Seek}; -use iron::{headers, status, method, mime, IronResult, Listening, Response, TypeMap, Request, Handler, Iron}; -use self::super::util::{WwwAuthenticate, DisplayThree, CommaList, Spaces, Dav, url_path, file_hash, is_symlink, encode_str, encode_file, file_length, - html_response, file_binary, client_mobile, percent_decode, escape_specials, file_icon_suffix, is_actually_file, is_descendant_of, - response_encoding, detect_file_as_dir, encoding_extension, file_time_modified, file_time_modified_p, get_raw_fs_metadata, - human_readable_size, encode_tail_if_trimmed, is_nonexistent_descendant_of, USER_AGENT, ERROR_HTML, MAX_SYMLINKS, INDEX_EXTENSIONS, - MIN_ENCODING_GAIN, MAX_ENCODING_SIZE, MIN_ENCODING_SIZE, DAV_LEVEL_1_METHODS, DIRECTORY_LISTING_HTML, MOBILE_DIRECTORY_LISTING_HTML, - BLACKLISTED_ENCODING_EXTENSIONS}; - +use iron::{headers, status, method, IronResult, Listening, Response, Headers, Request, Handler, Iron}; +use std::io::{self, ErrorKind as IoErrorKind, BufReader, SeekFrom, Write, Error as IoError, Read, Seek}; +use iron::mime::{Mime, Attr as MimeAttr, Value as MimeAttrValue, SubLevel as MimeSubLevel, TopLevel as MimeTopLevel}; +use self::super::util::{HumanReadableSize, WwwAuthenticate, NoDoubleQuotes, NoHtmlLiteral, XLastModified, DisplayThree, CommaList, XOcMTime, MsAsS, Maybe, Dav, + url_path, file_etag, file_hash, set_mtime_f, is_symlink, encode_str, error_html, encode_file, file_length, file_binary, client_mobile, + percent_decode, escape_specials, file_icon_suffix, is_actually_file, is_descendant_of, response_encoding, detect_file_as_dir, + encoding_extension, file_time_modified, file_time_modified_p, dav_level_1_methods, get_raw_fs_metadata, encode_tail_if_trimmed, + extension_is_blacklisted, directory_listing_html, directory_listing_mobile_html, is_nonexistent_descendant_of, USER_AGENT, MAX_SYMLINKS, + INDEX_EXTENSIONS, MIN_ENCODING_GAIN, MAX_ENCODING_SIZE, MIN_ENCODING_SIZE}; macro_rules! log { ($logcfg:expr, $fmt:expr) => { @@ -40,8 +41,10 @@ macro_rules! log { use trivial_colours::{Reset as CReset, Colour as C}; if $logcfg.0 { - if $logcfg.1 { - print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset); + if $logcfg.2 { + if $logcfg.1 { + print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset); + } println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"), black = C::Black, red = C::Red, @@ -53,7 +56,9 @@ macro_rules! log { white = C::White, reset = CReset); } else { - print!("[{}] ", now().strftime("%F %T").unwrap()); + if $logcfg.1 { + print!("[{}] ", now().strftime("%F %T").unwrap()); + } println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"), black = "", red = "", @@ -72,8 +77,10 @@ macro_rules! log { use trivial_colours::{Reset as CReset, Colour as C}; if $logcfg.0 { - if $logcfg.1 { - print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset); + if $logcfg.2 { + if $logcfg.1 { + print!("{}[{}]{} ", C::Cyan, now().strftime("%F %T").unwrap(), CReset); + } println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"), $($arg)*, black = C::Black, @@ -86,7 +93,9 @@ macro_rules! log { white = C::White, reset = CReset); } else { - print!("[{}] ", now().strftime("%F %T").unwrap()); + if $logcfg.1 { + print!("[{}] ", now().strftime("%F %T").unwrap()); + } println!(concat!($fmt, "{black:.0}{red:.0}{green:.0}{yellow:.0}{blue:.0}{magenta:.0}{cyan:.0}{white:.0}{reset:.0}"), $($arg)*, black = "", @@ -103,14 +112,15 @@ macro_rules! log { }; } +mod prune; mod webdav; mod bandwidth; +pub use self::prune::PruneChain; pub use self::bandwidth::{LimitBandwidthMiddleware, SimpleChain}; -// TODO: ideally this String here would be Encoding instead but hyper is bad -type CacheT = HashMap<(blake3::Hash, String), Cnt>; +type CacheT = HashMap<(blake3::Hash, EncodingType), (Cnt, AtomicU64)>; pub struct HttpHandler { pub hosted_directory: (String, PathBuf), @@ -119,8 +129,8 @@ pub struct HttpHandler { pub generate_listings: bool, pub check_indices: bool, pub strip_extensions: bool, - /// (at all, log_colour) - pub log: (bool, bool), + /// (at all, log_time, log_colour) + pub log: (bool, bool, bool), pub webdav: bool, pub global_auth_data: Option<(String, Option)>, pub path_auth_data: BTreeMap)>>, @@ -128,10 +138,18 @@ pub struct HttpHandler { pub encoded_temp_dir: Option<(String, PathBuf)>, pub proxies: BTreeMap, pub proxy_redirs: BTreeMap, - pub mime_type_overrides: BTreeMap, + pub mime_type_overrides: BTreeMap, pub additional_headers: Vec<(String, Vec)>, - cache_gen: RwLock>>, - cache_fs: RwLock>, + + pub cache_gen: RwLock>>, + pub cache_fs_files: RwLock>, // etag -> cache key + pub cache_fs: RwLock>, + pub cache_gen_size: AtomicU64, + pub cache_fs_size: AtomicU64, + pub encoded_filesystem_limit: u64, + pub encoded_generated_limit: u64, + + pub allowed_methods: &'static [method::Method], } impl HttpHandler { @@ -153,6 +171,14 @@ impl HttpHandler { } } + let allowed_methods = [method::Options, method::Get, method::Head, method::Trace] + .iter() + .chain(dav_level_1_methods(opts.allow_writes).iter().filter(|_| opts.webdav)) + .chain([method::Put, method::Delete].iter().filter(|_| opts.allow_writes)) + .cloned() + .collect::>() + .leak(); + HttpHandler { hosted_directory: opts.hosted_directory.clone(), follow_symlinks: opts.follow_symlinks, @@ -160,7 +186,7 @@ impl HttpHandler { generate_listings: opts.generate_listings, check_indices: opts.check_indices, strip_extensions: opts.strip_extensions, - log: (opts.loglevel < LogLevel::NoServeStatus, opts.log_colour), + log: (opts.loglevel < LogLevel::NoServeStatus, opts.log_time, opts.log_colour), webdav: opts.webdav, global_auth_data: global_auth_data, path_auth_data: path_auth_data, @@ -168,21 +194,32 @@ impl HttpHandler { encoded_temp_dir: HttpHandler::temp_subdir(&opts.temp_directory, opts.encode_fs, "encoded"), cache_gen: Default::default(), cache_fs: Default::default(), + cache_fs_files: Default::default(), + cache_gen_size: Default::default(), + cache_fs_size: Default::default(), + encoded_filesystem_limit: opts.encoded_filesystem_limit.unwrap_or(u64::MAX), + encoded_generated_limit: opts.encoded_generated_limit.unwrap_or(u64::MAX), proxies: opts.proxies.clone(), proxy_redirs: opts.proxy_redirs.clone(), mime_type_overrides: opts.mime_type_overrides.clone(), additional_headers: opts.additional_headers.clone(), + allowed_methods: allowed_methods, } } - pub fn clean_temp_dirs(temp_dir: &(String, PathBuf), loglevel: LogLevel, log_colour: bool) { - for (temp_name, temp_dir) in ["writes", "encoded", "tls"].iter().flat_map(|tn| HttpHandler::temp_subdir(temp_dir, true, tn)) { - if temp_dir.exists() && fs::remove_dir_all(&temp_dir).is_ok() { - log!((loglevel < LogLevel::NoServeStatus, log_colour), - "Deleted temp dir {magenta}{}{reset}", - temp_name); + pub fn clean_temp_dirs(&self, temp_directory: &(String, PathBuf), generate_tls: bool) { + mem::forget(self.cache_fs_files.write()); + mem::forget(self.cache_fs.write()); + + let tls = HttpHandler::temp_subdir(temp_directory, generate_tls, "tls"); + for (temp_name, temp_dir) in [self.writes_temp_dir.as_ref(), self.encoded_temp_dir.as_ref(), tls.as_ref()].iter().flatten() { + if fs::remove_dir_all(&temp_dir).is_ok() { + log!(self.log, "Deleted temp dir {magenta}{}{reset}", temp_name); } } + if fs::remove_dir(&temp_directory.1).is_ok() { + log!(self.log, "Deleted temp dir {magenta}{}{reset}", temp_directory.0); + } } fn temp_subdir(&(ref temp_name, ref temp_dir): &(String, PathBuf), flag: bool, name: &str) -> Option<(String, PathBuf)> { @@ -202,7 +239,7 @@ impl HttpHandler { } } -impl Handler for HttpHandler { +impl Handler for &'static HttpHandler { fn handle(&self, req: &mut Request) -> IronResult { if self.global_auth_data.is_some() || !self.path_auth_data.is_empty() { if let Some(resp) = self.verify_auth(req)? { @@ -222,28 +259,20 @@ impl Handler for HttpHandler { }) } method::Trace => self.handle_trace(req), - method::Extension(ref ext) => { - if self.webdav { - match &ext[..] { - "COPY" => self.handle_webdav_copy(req), - "MKCOL" => self.handle_webdav_mkcol(req), - "MOVE" => self.handle_webdav_move(req), - "PROPFIND" => self.handle_webdav_propfind(req), - "PROPPATCH" => self.handle_webdav_proppatch(req), - - _ => self.handle_bad_method(req), - } - } else { - self.handle_bad_method(req) - } - } + + method::DavCopy if self.webdav => self.handle_webdav_copy(req), + method::DavMkcol if self.webdav => self.handle_webdav_mkcol(req), + method::DavMove if self.webdav => self.handle_webdav_move(req), + method::DavPropfind if self.webdav => self.handle_webdav_propfind(req), + method::DavProppatch if self.webdav => self.handle_webdav_proppatch(req), + _ => self.handle_bad_method(req), }?; if self.webdav { resp.headers.set(Dav::LEVEL_1); } for (h, v) in &self.additional_headers { - resp.headers.append_raw(h.clone(), v.clone()); + resp.headers.append_raw(&h[..], v[..].into()); } Ok(resp) } @@ -321,19 +350,7 @@ impl HttpHandler { fn handle_options(&self, req: &mut Request) -> IronResult { log!(self.log, "{} asked for {red}OPTIONS{reset}", self.remote_addresses(&req)); - - let mut allowed_methods = Vec::with_capacity(6 + - if self.webdav { - DAV_LEVEL_1_METHODS.len() - } else { - 0 - }); - allowed_methods.extend_from_slice(&[method::Options, method::Get, method::Put, method::Delete, method::Head, method::Trace]); - if self.webdav { - allowed_methods.extend_from_slice(&DAV_LEVEL_1_METHODS); - } - - Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.to_string())), Header(headers::Allow(allowed_methods))))) + Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.into())), Header(headers::Allow(self.allowed_methods.into()))))) } fn handle_get(&self, req: &mut Request) -> IronResult { @@ -355,13 +372,13 @@ impl HttpHandler { } let is_file = is_actually_file(&req_p.metadata().expect("Failed to get file metadata").file_type(), &req_p); - let range = req.headers.get().map(|r: &headers::Range| (*r).clone()); + let range = req.headers.get_mut().map(|r: &mut headers::Range| mem::replace(r, headers::Range::Bytes(vec![]))); let raw_fs = req.headers.get().map(|r: &RawFsApiHeader| r.0).unwrap_or(false); if is_file { if raw_fs { self.handle_get_raw_fs_file(req, req_p) - } else if range.is_some() { - self.handle_get_file_range(req, req_p, range.unwrap()) + } else if let Some(range) = range { + self.handle_get_file_range(req, req_p, range) } else { self.handle_get_file(req, req_p) } @@ -382,9 +399,7 @@ impl HttpHandler { req.url, &cause[3..cause.len() - 4]); // Strip

tags - self.handle_generated_response_encoding(req, - status::BadRequest, - html_response(ERROR_HTML, &["400 Bad Request", "The request URL was invalid.", cause])) + self.handle_generated_response_encoding(req, status::BadRequest, error_html("400 Bad Request", "The request URL was invalid.", cause)) } #[inline(always)] @@ -402,8 +417,9 @@ impl HttpHandler { let url_p = url_path(&req.url); self.handle_generated_response_encoding(req, status, - html_response(ERROR_HTML, - &[&status.to_string()[..], &format!("The requested entity \"{}\" doesn't exist.", url_p), ""])) + error_html(&status.canonical_reason().unwrap()[..], + format_args!("The requested entity \"{}\" doesn't exist.", url_p), + "")) } fn handle_get_raw_fs_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult { @@ -420,26 +436,59 @@ impl HttpHandler { }) } + fn etag_match(req_tags: &[headers::EntityTag], etag: &str) -> bool { + req_tags.iter().any(|retag| retag.tag() == etag) + } + + fn should_304_path(req: &mut Request, req_p: &Path, etag: &str) -> bool { + if let Some(headers::IfNoneMatch::Items(inm)) = req.headers.get::() { + if HttpHandler::etag_match(inm, &etag) { + return true; + } + } else if let Some(headers::IfModifiedSince(since)) = req.headers.get::() { + // unavoidable truncation, the timestamp format is second-resolution; to_timespec() is what does + if file_time_modified_p(req_p).to_timespec().sec <= since.0.to_timespec().sec { + return true; + } + } + + return false; + } + fn handle_get_file_range(&self, req: &mut Request, req_p: PathBuf, range: headers::Range) -> IronResult { match range { headers::Range::Bytes(ref brs) => { if brs.len() == 1 { - let flen = file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p); + let metadata = req_p.metadata().expect("Failed to get requested file metadata"); + let flen = file_length(&metadata, &req_p); + + let mut etag = file_etag(&metadata).into_bytes(); // normaletag+123-41231 + let _ = write!(&mut etag, "+{}", brs[0]); + let etag = unsafe { String::from_utf8_unchecked(etag) }; + if HttpHandler::should_304_path(req, &req_p, &etag) { + log!(self.log, "{:w$} Not Modified", "", w = self.remote_addresses(req).width()); + return Ok(Response::with((status::NotModified, + (Header(headers::Server(USER_AGENT.into())), + Header(headers::LastModified(headers::HttpDate(file_time_modified_p(&req_p)))), + Header(headers::AcceptRanges(headers::RangeUnit::Bytes))), + Header(headers::ETag(headers::EntityTag::strong(etag)))))); + } + match brs[0] { // Cases where from is bigger than to are filtered out by iron so can never happen - headers::ByteRangeSpec::FromTo(from, to) => self.handle_get_file_closed_range(req, req_p, from, to), + headers::ByteRangeSpec::FromTo(from, to) => self.handle_get_file_closed_range(req, req_p, from, to, etag), headers::ByteRangeSpec::AllFrom(from) => { if flen < from { - self.handle_get_file_empty_range(req, req_p, from, flen) + self.handle_get_file_empty_range(req, req_p, from, flen, etag) } else { - self.handle_get_file_right_opened_range(req, req_p, from) + self.handle_get_file_right_opened_range(req, req_p, from, etag) } } headers::ByteRangeSpec::Last(from) => { if flen < from { - self.handle_get_file_empty_range(req, req_p, from, flen) + self.handle_get_file_empty_range(req, req_p, from, flen, etag) } else { - self.handle_get_file_left_opened_range(req, req_p, from) + self.handle_get_file_left_opened_range(req, req_p, from, etag) } } } @@ -451,7 +500,7 @@ impl HttpHandler { } } - fn handle_get_file_closed_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult { + fn handle_get_file_closed_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64, etag: String) -> IronResult { let mime_type = self.guess_mime_type(&req_p); log!(self.log, "{} was served byte range {}-{} of file {magenta}{}{reset} as {blue}{}{reset}", @@ -461,24 +510,24 @@ impl HttpHandler { req_p.display(), mime_type); - let mut buf = vec![0; (to + 1 - from) as usize]; let mut f = File::open(&req_p).expect("Failed to open requested file"); f.seek(SeekFrom::Start(from)).expect("Failed to seek requested file"); - f.read_exact(&mut buf).expect("Failed to read requested file"); Ok(Response::with((status::PartialContent, - (Header(headers::Server(USER_AGENT.to_string())), + (Header(headers::Server(USER_AGENT.into())), Header(headers::LastModified(headers::HttpDate(file_time_modified_p(&req_p)))), Header(headers::ContentRange(headers::ContentRangeSpec::Bytes { range: Some((from, to)), instance_length: Some(file_length(&f.metadata().expect("Failed to get requested file metadata"), &req_p)), })), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))), - buf, - mime_type))) + Header(headers::ETag(headers::EntityTag::strong(etag))), + Header(headers::AcceptRanges(headers::RangeUnit::Bytes))), + f, + mime_type, + Header(headers::ContentLength(to + 1 - from))))) } - fn handle_get_file_right_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult { + fn handle_get_file_right_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64, etag: String) -> IronResult { let mime_type = self.guess_mime_type(&req_p); log!(self.log, "{} was served file {magenta}{}{reset} from byte {} as {blue}{}{reset}", @@ -487,11 +536,10 @@ impl HttpHandler { from, mime_type); - let flen = file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p); - self.handle_get_file_opened_range(req_p, SeekFrom::Start(from), from, flen - from, mime_type) + self.handle_get_file_opened_range(req_p, |flen| (SeekFrom::Start(from), from, flen - from), mime_type, etag) } - fn handle_get_file_left_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64) -> IronResult { + fn handle_get_file_left_opened_range(&self, req: &mut Request, req_p: PathBuf, from: u64, etag: String) -> IronResult { let mime_type = self.guess_mime_type(&req_p); log!(self.log, "{} was served last {} bytes of file {magenta}{}{reset} as {blue}{}{reset}", @@ -500,41 +548,41 @@ impl HttpHandler { req_p.display(), mime_type); - let flen = file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p); - self.handle_get_file_opened_range(req_p, SeekFrom::End(-(from as i64)), flen - from, from, mime_type) + self.handle_get_file_opened_range(req_p, |flen| (SeekFrom::End(-(from as i64)), flen - from, from), mime_type, etag) } - fn handle_get_file_opened_range(&self, req_p: PathBuf, s: SeekFrom, b_from: u64, clen: u64, mt: Mime) -> IronResult { + fn handle_get_file_opened_range (SeekFrom, u64, u64)>(&self, req_p: PathBuf, cb: F, mt: Mime, etag: String) -> IronResult { let mut f = File::open(&req_p).expect("Failed to open requested file"); let fmeta = f.metadata().expect("Failed to get requested file metadata"); let flen = file_length(&fmeta, &req_p); + let (s, b_from, clen) = cb(flen); f.seek(s).expect("Failed to seek requested file"); Ok(Response::with((status::PartialContent, f, - (Header(headers::Server(USER_AGENT.to_string())), + (Header(headers::Server(USER_AGENT.into())), Header(headers::LastModified(headers::HttpDate(file_time_modified(&fmeta)))), Header(headers::ContentRange(headers::ContentRangeSpec::Bytes { range: Some((b_from, flen - 1)), instance_length: Some(flen), })), + Header(headers::ETag(headers::EntityTag::strong(etag))), Header(headers::ContentLength(clen)), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))), + Header(headers::AcceptRanges(headers::RangeUnit::Bytes))), mt))) } fn handle_invalid_range(&self, req: &mut Request, req_p: PathBuf, range: &headers::Range, reason: &str) -> IronResult { self.handle_generated_response_encoding(req, status::RangeNotSatisfiable, - html_response(ERROR_HTML, - &["416 Range Not Satisfiable", - &format!("Requested range {} could not be fulfilled for file {}.", - range, - req_p.display()), - reason])) + error_html("416 Range Not Satisfiable", + format_args!("Requested range {} could not be fulfilled for file {}.", + range, + req_p.display()), + reason)) } - fn handle_get_file_empty_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64) -> IronResult { + fn handle_get_file_empty_range(&self, req: &mut Request, req_p: PathBuf, from: u64, to: u64, etag: String) -> IronResult { let mime_type = self.guess_mime_type(&req_p); log!(self.log, "{} was served an empty range from file {magenta}{}{reset} as {blue}{}{reset}", @@ -543,13 +591,14 @@ impl HttpHandler { mime_type); Ok(Response::with((status::NoContent, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::LastModified(headers::HttpDate(file_time_modified_p(&req_p)))), - Header(headers::ContentRange(headers::ContentRangeSpec::Bytes { - range: Some((from, to)), - instance_length: Some(file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p)), - })), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])), + (Header(headers::Server(USER_AGENT.into())), + Header(headers::LastModified(headers::HttpDate(file_time_modified_p(&req_p)))), + Header(headers::ContentRange(headers::ContentRangeSpec::Bytes { + range: Some((from, to)), + instance_length: Some(file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p)), + }))), + Header(headers::ETag(headers::EntityTag::strong(etag))), + Header(headers::AcceptRanges(headers::RangeUnit::Bytes)), mime_type))) } @@ -562,110 +611,155 @@ impl HttpHandler { mime_type); let metadata = req_p.metadata().expect("Failed to get requested file metadata"); + let etag = file_etag(&metadata); + let headers = (Header(headers::Server(USER_AGENT.into())), + Header(headers::LastModified(headers::HttpDate(file_time_modified(&metadata)))), + Header(headers::AcceptRanges(headers::RangeUnit::Bytes))); + if HttpHandler::should_304_path(req, &req_p, &etag) { + log!(self.log, "{:w$} Not Modified", "", w = self.remote_addresses(req).width()); + return Ok(Response::with((status::NotModified, headers, Header(headers::ETag(headers::EntityTag::strong(etag)))))); + } + let flen = file_length(&metadata, &req_p); if self.encoded_temp_dir.is_some() && flen > MIN_ENCODING_SIZE && flen < MAX_ENCODING_SIZE && - req_p.extension().and_then(|s| s.to_str()).map(|s| !BLACKLISTED_ENCODING_EXTENSIONS.contains(&UniCase::new(s))).unwrap_or(true) { - self.handle_get_file_encoded(req, req_p, mime_type) + req_p.extension().map(|s| !extension_is_blacklisted(s)).unwrap_or(true) { + self.handle_get_file_encoded(req, req_p, mime_type, headers, etag) } else { let file = match File::open(&req_p) { Ok(file) => file, Err(err) => return self.handle_requested_entity_unopenable(req, err, "file"), }; Ok(Response::with((status::Ok, - (Header(headers::Server(USER_AGENT.to_string())), - Header(headers::LastModified(headers::HttpDate(file_time_modified(&metadata)))), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))), + headers, + Header(headers::ETag(headers::EntityTag::strong(etag))), file, - Header(headers::ContentLength(file_length(&metadata, &req_p))), - mime_type))) + mime_type, + Header(headers::ContentLength(file_length(&metadata, &req_p)))))) } } - fn handle_get_file_encoded(&self, req: &mut Request, req_p: PathBuf, mt: Mime) -> IronResult { + fn handle_get_file_encoded(&self, req: &mut Request, req_p: PathBuf, mt: Mime, + headers: (Header, Header, Header), etag: String) + -> IronResult { if let Some(encoding) = req.headers.get_mut::().and_then(|es| response_encoding(&mut **es)) { self.create_temp_dir(&self.encoded_temp_dir); - let cache_key = match file_hash(&req_p) { - Ok(h) => (h, encoding.to_string()), - Err(err) => return self.handle_requested_entity_unopenable(req, err, "file"), + let hash = self.cache_fs_files.read().expect("Filesystem file cache read lock poisoned").get(&etag).cloned(); + let hash = match hash { + Some(hash) => hash, + None => { + match file_hash(&req_p) { + Ok(h) => { + self.cache_fs_files.write().expect("Filesystem file cache write lock poisoned").insert(etag.clone(), h); + h + } + Err(err) => return self.handle_requested_entity_unopenable(req, err, "file"), + } + } }; + let cache_key = (hash, encoding.0); - { + let forgor = { match self.cache_fs.read().expect("Filesystem cache read lock poisoned").get(&cache_key) { - Some(&(ref resp_p, true)) => { - log!(self.log, - "{} encoded as {} for {:.1}% ratio (cached)", - Spaces(self.remote_addresses(req).to_string().len()), - encoding, - ((file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p) as f64) / - (file_length(&resp_p.metadata().expect("Failed to get encoded file metadata"), &resp_p) as f64)) * - 100f64); - - return Ok(Response::with((status::Ok, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::ContentEncoding(vec![encoding])), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])), - resp_p.as_path(), - mt))); + Some(&((ref resp_p, true, _), ref atime)) => { + match File::open(resp_p) { + Ok(resp) => { + atime.store(precise_time_ns(), AtomicOrdering::Relaxed); + log!(self.log, + "{:w$} encoded as {} for {:.1}% ratio (cached)", + "", + encoding, + ((file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p) as f64) / + (file_length(&resp.metadata().expect("Failed to get encoded file metadata"), &resp_p) as f64)) * + 100f64, + w = self.remote_addresses(req).width()); + + return Ok(Response::with((status::Ok, + headers, + Header(headers::ETag(headers::EntityTag::strong(etag))), + Header(headers::ContentEncoding([encoding].into())), + resp, + mt))); + }, + Err(err) if err.kind() == IoErrorKind::NotFound => true, + e @ Err(_) => { + e.expect("Failed to get encoded file metadata"); + unsafe { std::hint::unreachable_unchecked() } + }, + } } - Some(&(ref resp_p, false)) => { - return Ok(Response::with((status::Ok, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::LastModified(headers::HttpDate(file_time_modified_p(resp_p)))), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])), - resp_p.as_path(), - mt))); + Some(&((_, false, _), _)) => { + let file = match File::open(&req_p) { + Ok(file) => file, + Err(err) => return self.handle_requested_entity_unopenable(req, err, "file"), + }; + return Ok(Response::with((status::Ok, headers, Header(headers::ETag(headers::EntityTag::strong(etag))), file, mt))); } - None => (), + None => false, } + }; + if forgor { + self.cache_fs_files.write().expect("Filesystem file cache write lock poisoned").retain(|_, v| *v == hash); + self.cache_fs.write().expect("Filesystem cache write lock poisoned").remove(&cache_key); + return self.handle_get_file_encoded(req, req_p, mt, headers, etag) } let mut resp_p = self.encoded_temp_dir.as_ref().unwrap().1.join(cache_key.0.to_hex().as_str()); match (req_p.extension(), encoding_extension(&encoding)) { - (Some(ext), Some(enc)) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), enc)), - (Some(ext), None) => resp_p.set_extension(format!("{}.{}", ext.to_str().unwrap_or("ext"), encoding)), + (Some(ext), Some(enc)) => { + let mut new_ext = ext.as_encoded_bytes().to_vec(); + new_ext.push(b'.'); + new_ext.extend_from_slice(enc.as_bytes()); + resp_p.set_extension(unsafe { OsStr::from_encoded_bytes_unchecked(&new_ext) }) + } (None, Some(enc)) => resp_p.set_extension(enc), - (None, None) => resp_p.set_extension(format!("{}", encoding)), + (_, None) => unsafe { std::hint::unreachable_unchecked() }, }; if encode_file(&req_p, &resp_p, &encoding) { - let gain = (file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p) as f64) / - (file_length(&resp_p.metadata().expect("Failed to get encoded file metadata"), &resp_p) as f64); - if gain < MIN_ENCODING_GAIN { + let resp_p_len = file_length(&resp_p.metadata().expect("Failed to get encoded file metadata"), &resp_p); + let gain = (file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p) as f64) / (resp_p_len as f64); + if gain < MIN_ENCODING_GAIN || resp_p_len > self.encoded_filesystem_limit { let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned"); - cache.insert(cache_key, (req_p.clone(), false)); + cache.insert(cache_key, ((PathBuf::new(), false, 0), AtomicU64::new(u64::MAX))); fs::remove_file(resp_p).expect("Failed to remove too big encoded file"); } else { log!(self.log, - "{} encoded as {} for {:.1}% ratio", - Spaces(self.remote_addresses(req).to_string().len()), + "{:w$} encoded as {} for {:.1}% ratio", + "", encoding, - gain * 100f64); + gain * 100f64, + w = self.remote_addresses(req).width()); let mut cache = self.cache_fs.write().expect("Filesystem cache write lock poisoned"); - cache.insert(cache_key, (resp_p.clone(), true)); + self.cache_fs_size.fetch_add(resp_p_len, AtomicOrdering::Relaxed); + cache.insert(cache_key, ((resp_p.clone(), true, resp_p_len), AtomicU64::new(precise_time_ns()))); return Ok(Response::with((status::Ok, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::ContentEncoding(vec![encoding])), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes])), + headers, + Header(headers::ETag(headers::EntityTag::strong(etag))), + Header(headers::ContentEncoding([encoding].into())), resp_p.as_path(), mt))); } } else { log!(self.log, - "{} failed to encode as {}, sending identity", - Spaces(self.remote_addresses(req).to_string().len()), - encoding); + "{:w$} failed to encode as {}, sending identity", + "", + encoding, + w = self.remote_addresses(req).width()); } } + let file = match File::open(&req_p) { + Ok(file) => file, + Err(err) => return self.handle_requested_entity_unopenable(req, err, "file"), + }; Ok(Response::with((status::Ok, - (Header(headers::Server(USER_AGENT.to_string())), - Header(headers::LastModified(headers::HttpDate(file_time_modified_p(&req_p)))), - Header(headers::AcceptRanges(vec![headers::RangeUnit::Bytes]))), - req_p.as_path(), - Header(headers::ContentLength(file_length(&req_p.metadata().expect("Failed to get requested file metadata"), &req_p))), + headers, + Header(headers::ETag(headers::EntityTag::strong(etag))), + Header(headers::ContentLength(file_length(&file.metadata().expect("Failed to get requested file metadata"), &req_p))), + file, mt))) } @@ -698,7 +792,7 @@ impl HttpHandler { get_raw_fs_metadata(f.path()) } else { RawFileData { - mime_type: "text/directory".parse().unwrap(), + mime_type: Mime(MimeTopLevel::Text, MimeSubLevel::Ext("directory".to_string()), Default::default()), // text/directory name: f.file_name().into_string().expect("Failed to get file name"), last_modified: file_time_modified_p(&f.path()), size: 0, @@ -723,9 +817,10 @@ impl HttpHandler { if req.url.as_ref().path_segments().unwrap().next_back() == Some("") { let r = self.handle_get_file(req, idx); log!(self.log, - "{} found index file for directory {magenta}{}{reset}", - Spaces(self.remote_addresses(req).to_string().len()), - req_p.display()); + "{:w$} found index file for directory {magenta}{}{reset}", + "", + req_p.display(), + w = self.remote_addresses(req).width()); return r; } else { return self.handle_get_dir_index_no_slash(req, e); @@ -781,233 +876,279 @@ impl HttpHandler { // https://cloud.githubusercontent.com/assets/6709544/21442017/9eb20d64-c89b-11e6-8c7b-888b5f70a403.png // - With following slash: // https://cloud.githubusercontent.com/assets/6709544/21442028/a50918c4-c89b-11e6-8936-c29896947f6a.png - Ok(Response::with((status::SeeOther, Header(headers::Server(USER_AGENT.to_string())), Header(headers::Location(new_url))))) + Ok(Response::with((status::SeeOther, Header(headers::Server(USER_AGENT.into())), Header(headers::Location(new_url))))) } fn handle_get_mobile_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult { - let relpath = (url_path(&req.url) + "/").replace("//", "/"); - let is_root = req.url.as_ref().path_segments().unwrap().count() + !req.url.as_ref().as_str().ends_with('/') as usize == 1; + let relpath = url_path(&req.url); + let is_root = relpath == "/"; + let mut relpath_escaped = escape_specials(&relpath); + if relpath_escaped.as_bytes().last() != Some(&b'/') { + relpath_escaped.to_mut().push('/'); + } let show_file_management_controls = self.writes_temp_dir.is_some(); log!(self.log, "{} was served mobile directory listing for {magenta}{}{reset}", self.remote_addresses(&req), req_p.display()); - let parent_s = if is_root { - String::new() - } else { - let rel_noslash = &relpath[0..relpath.len() - 1]; - let slash_idx = rel_noslash.rfind('/'); - format!("Parent directory \ - @\ - {} UTC", - file_time_modified_p(req_p.parent().unwrap_or(&req_p)) - .strftime("%F %T") - .unwrap(), - up_path = escape_specials(slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or("")), - up_path_slash = if slash_idx.is_some() { "/" } else { "" }) + let parent_f = |out: &mut Vec| if !is_root { + let mut parentpath = relpath_escaped.as_bytes(); + while parentpath.last() == Some(&b'/') { + parentpath = &parentpath[0..parentpath.len() - 1]; + } + while parentpath.last() != Some(&b'/') { + parentpath = &parentpath[0..parentpath.len() - 1]; + } + let modified = file_time_modified_p(req_p.parent().unwrap_or(&req_p)); + let modified_ts = modified.to_timespec(); + let _ = write!(out, + r#"

"#, + modified_ts.sec, + modified_ts.nsec / 1000_000, + modified.strftime("%F %T").unwrap(), + up_path = unsafe { str::from_utf8_unchecked(parentpath) }); }; - let list_s = req_p.read_dir() - .expect("Failed to read requested directory") - .map(|p| p.expect("Failed to iterate over requested directory")) - .filter(|f| { - let fp = f.path(); - let mut symlink = false; - !((!self.follow_symlinks && - { - symlink = is_symlink(&fp); - symlink - }) || (self.follow_symlinks && self.sandbox_symlinks && symlink && !is_descendant_of(fp, &self.hosted_directory.1))) - }) - .sorted_by(|lhs, rhs| { + let list_f = |out: &mut Vec| { + let mut list = req_p.read_dir() + .expect("Failed to read requested directory") + .map(|p| p.expect("Failed to iterate over requested directory")) + .filter(|f| { + let fp = f.path(); + let mut symlink = false; + !((!self.follow_symlinks && + { + symlink = is_symlink(&fp); + symlink + }) || (self.follow_symlinks && self.sandbox_symlinks && symlink && !is_descendant_of(fp, &self.hosted_directory.1))) + }) + .collect::>(); + list.sort_by(|lhs, rhs| { (is_actually_file(&lhs.file_type().expect("Failed to get file type"), &lhs.path()), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase()) .cmp(&(is_actually_file(&rhs.file_type().expect("Failed to get file type"), &rhs.path()), rhs.file_name().to_str().expect("Failed to get file name").to_lowercase())) - }) - .fold("".to_string(), |cur, f| { + }); + for f in list { let is_file = is_actually_file(&f.file_type().expect("Failed to get file type"), &f.path()); let fmeta = f.metadata().expect("Failed to get requested file metadata"); let fname = f.file_name().into_string().expect("Failed to get file name"); let path = f.path(); - - format!("{}{}{}{} \ - @{} UTC{}\n", - cur, - if is_file { "file" } else { "dir" }, - file_icon_suffix(&path, is_file), - path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname), - fname.replace('&', "&").replace('<', "<"), - if is_file { "" } else { "/" }, - if show_file_management_controls { - DisplayThree("Delete", - if self.webdav { - " Rename" - } else { - "" - }, - "") - } else { - DisplayThree("", "", "") - }, - file_time_modified(&fmeta).strftime("%F %T").unwrap(), - if is_file { - DisplayThree("", human_readable_size(file_length(&fmeta, &path)), "") - } else { - DisplayThree("", String::new(), "") - }, - path = escape_specials(format!("/{}", relpath).replace("//", "/")), - fname = encode_tail_if_trimmed(escape_specials(&fname))) - }); + let modified = file_time_modified(&fmeta); + let modified_ts = modified.to_timespec(); + + let _ = writeln!(out, + concat!(r#"
{}{}{}
"#, + r#"
{}
"#), + NoDoubleQuotes(&fname), + if is_file { "file" } else { "dir" }, + file_icon_suffix(&path, is_file), + NoHtmlLiteral(&fname), + if is_file { "" } else { "/" }, + if show_file_management_controls { + DisplayThree(r#"Delete"#, + if self.webdav { + r#" Rename"# + } else { + "" + }, + "") + } else { + DisplayThree("", "", "") + }, + modified_ts.sec, + modified_ts.nsec / 1000_000, + modified.strftime("%F %T").unwrap(), + if is_file { + DisplayThree("", Maybe(Some(HumanReadableSize(file_length(&fmeta, &path)))), "") + } else { + DisplayThree("", Maybe(None), "") + }, + path = relpath_escaped, + fname = encode_tail_if_trimmed(escape_specials(&fname))); + } + }; self.handle_generated_response_encoding(req, status::Ok, - html_response(MOBILE_DIRECTORY_LISTING_HTML, - &[&relpath[..], - if is_root { "" } else { "/" }, - if show_file_management_controls { - r#""# - } else { - "" - }, - &parent_s[..], - &list_s[..], - if show_file_management_controls { - " \ - Upload files: \ - " - } else { - "" - }, - if show_file_management_controls && self.webdav { - " - Create directory" - } else { - "" - }])) + directory_listing_mobile_html(&relpath_escaped[!is_root as usize..], + if show_file_management_controls { + concat!(r#""#) + } else { + "" + }, + parent_f, + list_f, + if show_file_management_controls { + concat!(r#"Upload files: "#, + r#""#) + } else { + "" + }, + if show_file_management_controls && self.webdav { + r#"Create directory"# + } else { + "" + })) } fn handle_get_dir_listing(&self, req: &mut Request, req_p: PathBuf) -> IronResult { - let relpath = (url_path(&req.url) + "/").replace("//", "/"); - let is_root = req.url.as_ref().path_segments().unwrap().count() + !req.url.as_ref().as_str().ends_with('/') as usize == 1; + let relpath = url_path(&req.url); + let is_root = relpath == "/"; + let mut relpath_escaped = escape_specials(&relpath); + if relpath_escaped.as_bytes().last() != Some(&b'/') { + relpath_escaped.to_mut().push('/'); + } let show_file_management_controls = self.writes_temp_dir.is_some(); log!(self.log, "{} was served directory listing for {magenta}{}{reset}", self.remote_addresses(&req), req_p.display()); - let parent_s = if is_root { - String::new() - } else { - let rel_noslash = &relpath[0..relpath.len() - 1]; - let slash_idx = rel_noslash.rfind('/'); - format!(" \ - Parent directory \ - {} \ -   \ -  ", - file_time_modified_p(req_p.parent().unwrap_or(&req_p)).strftime("%F %T").unwrap(), - up_path = escape_specials(slash_idx.map(|i| &rel_noslash[0..i]).unwrap_or("")), - up_path_slash = if slash_idx.is_some() { "/" } else { "" }) + let parent_f = |out: &mut Vec| if !is_root { + let mut parentpath = relpath_escaped.as_bytes(); + while parentpath.last() == Some(&b'/') { + parentpath = &parentpath[0..parentpath.len() - 1]; + } + while parentpath.last() != Some(&b'/') { + parentpath = &parentpath[0..parentpath.len() - 1]; + } + let modified = file_time_modified_p(req_p.parent().unwrap_or(&req_p)); + let modified_ts = modified.to_timespec(); + let _ = write!(out, + " Parent directory \ +    ", + modified_ts.sec, + modified_ts.nsec / 1000_000, + modified.strftime("%F %T").unwrap(), + up_path = unsafe { str::from_utf8_unchecked(parentpath) }); }; - let rd = match req_p.read_dir() { Ok(rd) => rd, Err(err) => return self.handle_requested_entity_unopenable(req, err, "directory"), }; - let list_s = rd.map(|p| p.expect("Failed to iterate over requested directory")) - .filter(|f| { - let fp = f.path(); - let mut symlink = false; - !((!self.follow_symlinks && - { - symlink = is_symlink(&fp); - symlink - }) || (self.follow_symlinks && self.sandbox_symlinks && symlink && !is_descendant_of(fp, &self.hosted_directory.1))) - }) - .sorted_by(|lhs, rhs| { + let list_f = |out: &mut Vec| { + let mut list = rd.map(|p| p.expect("Failed to iterate over requested directory")) + .filter(|f| { + let fp = f.path(); + let mut symlink = false; + !((!self.follow_symlinks && + { + symlink = is_symlink(&fp); + symlink + }) || (self.follow_symlinks && self.sandbox_symlinks && symlink && !is_descendant_of(fp, &self.hosted_directory.1))) + }) + .collect::>(); + list.sort_by(|lhs, rhs| { (is_actually_file(&lhs.file_type().expect("Failed to get file type"), &lhs.path()), lhs.file_name().to_str().expect("Failed to get file name").to_lowercase()) .cmp(&(is_actually_file(&rhs.file_type().expect("Failed to get file type"), &rhs.path()), rhs.file_name().to_str().expect("Failed to get file name").to_lowercase())) - }) - .fold("".to_string(), |cur, f| { - let is_file = is_actually_file(&f.file_type().expect("Failed to get file type"), &f.path()); + }); + for f in list { + let path = f.path(); + let is_file = is_actually_file(&f.file_type().expect("Failed to get file type"), &path); let fmeta = f.metadata().expect("Failed to get requested file metadata"); let fname = f.file_name().into_string().expect("Failed to get file name"); - let path = f.path(); let len = file_length(&fmeta, &path); - - format!("{} \ - {}{} {} \ - {}{}{} {}\n", - cur, - path.file_name().map(|p| p.to_str().expect("Filename not UTF-8").replace('.', "_")).as_ref().unwrap_or(&fname), - if is_file { "file" } else { "dir" }, - file_icon_suffix(&path, is_file), - fname.replace('&', "&").replace('<', "<"), - if is_file { "" } else { "/" }, - file_time_modified(&fmeta).strftime("%F %T").unwrap(), - if is_file { - DisplayThree("") - } else { - DisplayThree(" ", String::new(), "") - }, - if is_file { - human_readable_size(len) - } else { - String::new() - }, - if is_file { "" } else { "" }, - if show_file_management_controls { - DisplayThree("Delete", - if self.webdav { - " Rename" - } else { - "" - }, - "") + let modified = file_time_modified(&fmeta); + let modified_ts = modified.to_timespec(); + struct FileSizeDisplay(bool, u64); + impl fmt::Display for FileSizeDisplay { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.0 { + write!(f, "", self.1) } else { - DisplayThree("", "", "") - }, - path = escape_specials(format!("/{}", relpath).replace("//", "/")), - fname = encode_tail_if_trimmed(escape_specials(&fname))) - }); + f.write_str(" ") + } + } + } + + let _ = write!(out, + " {}{} \ + {}{}{} {}\n", + NoDoubleQuotes(&fname), + if is_file { "file" } else { "dir" }, + file_icon_suffix(&path, is_file), + NoHtmlLiteral(&fname), + if is_file { "" } else { "/" }, + modified_ts.sec, + modified_ts.nsec / 1000_000, + modified.strftime("%F %T").unwrap(), + FileSizeDisplay(is_file, len), + if is_file { + Maybe(Some(HumanReadableSize(len))) + } else { + Maybe(None) + }, + if is_file { "" } else { "" }, + if show_file_management_controls { + DisplayThree("Delete", + if self.webdav { + " Rename" + } else { + "" + }, + "") + } else { + DisplayThree("", "", "") + }, + path = relpath_escaped, + fname = encode_tail_if_trimmed(escape_specials(&fname))); + } + }; self.handle_generated_response_encoding(req, status::Ok, - html_response(DIRECTORY_LISTING_HTML, - &[&relpath[..], - if show_file_management_controls { - r#""# - } else { - "" - }, - &parent_s[..], - &list_s[..], - if show_file_management_controls { - "
\ -

\ - Drag&Drop to upload or . \ -

" - } else { - "" - }, - if show_file_management_controls { - "Manage" - } else { - "" - }, - if show_file_management_controls && self.webdav { - " \ - Create directory \ -   \ -   \ -  " - } else { - "" - }])) + directory_listing_html(&relpath_escaped[!is_root as usize..], + if show_file_management_controls { + concat!(r#""#) + } else { + "" + }, + parent_f, + list_f, + if show_file_management_controls { + "
\ +

Drag&Drop to upload or .

" + } else { + "" + }, + if show_file_management_controls { + "Manage" + } else { + "" + }, + if show_file_management_controls && self.webdav { + "\ + Create directory\ +  " + } else { + "" + })) } fn handle_put(&self, req: &mut Request) -> IronResult { @@ -1020,58 +1161,36 @@ impl HttpHandler { if url_err { self.handle_invalid_url(req, "

Percent-encoding decoded to invalid UTF-8.

") } else if req_p.is_dir() { - self.handle_disallowed_method(req, - &[&[method::Options, method::Get, method::Delete, method::Head, method::Trace], - if self.webdav { - &DAV_LEVEL_1_METHODS[..] - } else { - &[] - }], - "directory") + self.handle_disallowed_method(req, "directory") } else if detect_file_as_dir(&req_p) { self.handle_invalid_url(req, "

Attempted to use file as directory.

") } else if req.headers.has::() { self.handle_put_partial_content(req) - } else if (symlink && !self.follow_symlinks) || - (symlink && self.follow_symlinks && self.sandbox_symlinks && !is_nonexistent_descendant_of(&req_p, &self.hosted_directory.1)) { - self.create_temp_dir(&self.writes_temp_dir); - self.handle_put_file(req, req_p, false) } else { - self.create_temp_dir(&self.writes_temp_dir); - self.handle_put_file(req, req_p, true) + let illegal = (symlink && !self.follow_symlinks) || + (symlink && self.follow_symlinks && self.sandbox_symlinks && !is_nonexistent_descendant_of(&req_p, &self.hosted_directory.1)); + if illegal { + return self.handle_nonexistent(req, req_p); + } + self.handle_put_file(req, req_p) } } - fn handle_disallowed_method(&self, req: &mut Request, allowed: &[&[method::Method]], tpe: &str) -> IronResult { - let allowed_s = allowed.iter() - .flat_map(|mms| mms.iter()) - .enumerate() - .fold("".to_string(), |cur, (i, m)| { - cur + &m.to_string() + - if i == allowed.len() - 2 { - ", and " - } else if i == allowed.len() - 1 { - "" - } else { - ", " - } - }) - .to_string(); - + fn handle_disallowed_method(&self, req: &mut Request, tpe: &str) -> IronResult { log!(self.log, "{} tried to {red}{}{reset} on {magenta}{}{reset} ({blue}{}{reset}) but only {red}{}{reset} are allowed", self.remote_addresses(&req), req.method, url_path(&req.url), tpe, - allowed_s); + CommaList(self.allowed_methods.iter())); - let resp_text = - html_response(ERROR_HTML, - &["405 Method Not Allowed", &format!("Can't {} on a {}.", req.method, tpe), &format!("

Allowed methods: {}

", allowed_s)]); + let resp_text = error_html("405 Method Not Allowed", + format_args!("Can't {} on a {}.", req.method, tpe), + format_args!("

Allowed methods: {}

", CommaList(self.allowed_methods.iter()))); self.handle_generated_response_encoding(req, status::MethodNotAllowed, resp_text) .map(|mut r| { - r.headers.set(headers::Allow(allowed.iter().flat_map(|mms| mms.iter()).cloned().collect())); + r.headers.set(headers::Allow(self.allowed_methods.into())); r }) } @@ -1084,44 +1203,89 @@ impl HttpHandler { self.handle_generated_response_encoding(req, status::BadRequest, - html_response(ERROR_HTML, - &["400 Bad Request", - "RFC7231 forbids \ - partial-content PUT requests.", - ""])) + error_html("400 Bad Request", + "RFC7231 forbids partial-content \ + PUT requests.", + "")) } - fn handle_put_file(&self, req: &mut Request, req_p: PathBuf, legal: bool) -> IronResult { - let existent = !legal || req_p.exists(); + fn handle_put_file(&self, req: &mut Request, req_p: PathBuf) -> IronResult { + let _ = fs::create_dir_all(req_p.parent().expect("Failed to get requested file's parent directory")); + let direct_output = File::create_new(&req_p); + + let existent = direct_output.is_err(); + let mtime = req.headers.get::().map(|xlm| xlm.0).or_else(|| req.headers.get::().map(|xocmt| xocmt.0 * 1000)); log!(self.log, - "{} {} {magenta}{}{reset}, size: {}B", + "{} {} {magenta}{}{reset}, size: {}B{}{}", self.remote_addresses(&req), - if !legal { - "tried to illegally create" - } else if existent { - "replaced" - } else { - "created" - }, + if existent { "replaced" } else { "created" }, req_p.display(), - *req.headers.get::().expect("No Content-Length header")); + *req.headers.get::().expect("No Content-Length header"), + mtime.map_or("", |_| ". modified: "), + Maybe(mtime.map(MsAsS))); + + let mut ibuf = BufReader::with_capacity(1024 * 1024, &mut req.body); + let file = match direct_output { + Ok(mut file) => { + if let Err(err) = io::copy(&mut ibuf, &mut file) { + drop(file); + fs::remove_file(&req_p).expect("Failed to remove requested file after failure"); + let _ = io::copy(&mut ibuf, &mut io::sink()); + return self.handle_put_error(req, "File not created.", err); + } + + file + } + Err(_) => { + self.create_temp_dir(&self.writes_temp_dir); + let &(_, ref temp_dir) = self.writes_temp_dir.as_ref().unwrap(); + let temp_file_p = temp_dir.join(req_p.file_name().expect("Failed to get requested file's filename")); + struct DropDelete<'a>(&'a Path); + impl<'a> Drop for DropDelete<'a> { + fn drop(&mut self) { + let _ = fs::remove_file(self.0); + } + } + + let mut temp_file = File::options().read(true).write(true).create(true).truncate(true).open(&temp_file_p).expect("Failed to create temp file"); + let _temp_file_p_destroyer = DropDelete(&temp_file_p); + if let Err(err) = io::copy(&mut ibuf, &mut temp_file) { + let _ = io::copy(&mut ibuf, &mut io::sink()); + return self.handle_put_error(req, "File not created.", err); + } - let &(_, ref temp_dir) = self.writes_temp_dir.as_ref().unwrap(); - let temp_file_p = temp_dir.join(req_p.file_name().expect("Failed to get requested file's filename")); + let _temp_file_p_destroyer = DropDelete(&temp_file_p); + temp_file.rewind().expect("Failed to rewind temp file"); + let mut file = File::create(&req_p).expect("Failed to open requested file"); + #[cfg(any(target_os = "linux", target_os = "android"))] // matches std::io::copy() #[cfg] + let err = io::copy(&mut temp_file, &mut file); + #[cfg(not(any(target_os = "linux", target_os = "android")))] + let err = io::copy(&mut BufReader::with_capacity(1024 * 1024, &mut temp_file), &mut file); + if let Err(err) = err { + return self.handle_put_error(req, "File truncated.", err); + } - io::copy(&mut req.body, &mut File::create(&temp_file_p).expect("Failed to create temp file")) - .expect("Failed to write requested data to requested file"); - if legal { - let _ = fs::create_dir_all(req_p.parent().expect("Failed to get requested file's parent directory")); - fs::copy(&temp_file_p, req_p).expect("Failed to copy temp file to requested file"); + file + } + }; + + if let Some(ms) = mtime { + set_mtime_f(&file, ms); } - Ok(Response::with((if !legal || !existent { - status::Created - } else { + Ok(Response::with((if existent { status::NoContent + } else { + status::Created }, - Header(headers::Server(USER_AGENT.to_string()))))) + Header(headers::Server(USER_AGENT.into()))))) + } + + fn handle_put_error(&self, req: &mut Request, res: &str, err: IoError) -> IronResult { + log!(self.log, "{:w$} {} {}", "", res, err, w = self.remote_addresses(req).width()); + return self.handle_generated_response_encoding(req, + status::ServiceUnavailable, + error_html("503 Service Unavailable", res, format_args!("{}", err))); } fn handle_delete(&self, req: &mut Request) -> IronResult { @@ -1166,7 +1330,7 @@ impl HttpHandler { }); } - Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.to_string()))))) + Ok(Response::with((status::NoContent, Header(headers::Server(USER_AGENT.into()))))) } fn handle_trace(&self, req: &mut Request) -> IronResult { @@ -1175,13 +1339,12 @@ impl HttpHandler { self.remote_addresses(&req), url_path(&req.url)); - let mut hdr = req.headers.clone(); - hdr.set(headers::ContentType("message/http".parse().unwrap())); + let mut hdr = mem::replace(&mut req.headers, Headers::new()); + hdr.set(headers::ContentType(Mime(MimeTopLevel::Message, MimeSubLevel::Ext("http".to_string()), Default::default()))); // message/http Ok(Response { status: Some(status::Ok), headers: hdr, - extensions: TypeMap::new(), body: None, }) } @@ -1195,13 +1358,12 @@ impl HttpHandler { self.handle_generated_response_encoding(req, status::Forbidden, - html_response(ERROR_HTML, - &["403 Forbidden", - "This feature is currently disabled.", - &format!("

Ask the server administrator to pass {} to the executable to \ - enable support for {}.

", - switch, - desc)])) + error_html("403 Forbidden", + "This feature is currently disabled.", + format_args!("

Ask the server administrator to pass {} to the executable to \ + enable support for {}.

", + switch, + desc))) } fn handle_bad_method(&self, req: &mut Request) -> IronResult { @@ -1210,71 +1372,101 @@ impl HttpHandler { self.remote_addresses(&req), req.method); - let last_p = format!("

Unsupported request method: {}.
\nSupported methods: {}{}OPTIONS, GET, PUT, DELETE, HEAD, and TRACE.

", - req.method, - CommaList(if self.webdav { - &DAV_LEVEL_1_METHODS[..] - } else { - &[][..] - } - .iter()), - if self.webdav { ", " } else { "" }); self.handle_generated_response_encoding(req, status::NotImplemented, - html_response(ERROR_HTML, &["501 Not Implemented", "This operation was not implemented.", &last_p])) + error_html("501 Not Implemented", + "This operation was not implemented.", + format_args!("

Unsupported request method: {}.
\nSupported methods: {}.

", + req.method, + CommaList(self.allowed_methods.iter())))) } fn handle_generated_response_encoding(&self, req: &mut Request, st: status::Status, resp: String) -> IronResult { + let hash = blake3::hash(resp.as_bytes()); + let etag = hash.to_string(); + + if st == status::Ok && (req.method == method::Get || req.method == method::Head) { + if let Some(headers::IfNoneMatch::Items(inm)) = req.headers.get::() { + if HttpHandler::etag_match(inm, &etag) { + log!(self.log, "{:w$} Not Modified", "", w = self.remote_addresses(req).width()); + return Ok(Response::with((status::NotModified, + Header(headers::Server(USER_AGENT.into())), + Header(headers::ETag(headers::EntityTag::strong(etag))), + text_html_charset_utf8()))); + } + } + } + if let Some(encoding) = req.headers.get_mut::().and_then(|es| response_encoding(&mut **es)) { - let cache_key = (blake3::hash(resp.as_bytes()), encoding.to_string()); + let cache_key = (hash, encoding.0); { if let Some(enc_resp) = self.cache_gen.read().expect("Generated file cache read lock poisoned").get(&cache_key) { + enc_resp.1.store(precise_time_ns(), AtomicOrdering::Relaxed); log!(self.log, - "{} encoded as {} for {:.1}% ratio (cached)", - Spaces(self.remote_addresses(req).to_string().len()), + "{:w$} encoded as {} for {:.1}% ratio (cached)", + "", encoding, - ((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64); + ((resp.len() as f64) / (enc_resp.0.len() as f64)) * 100f64, + w = self.remote_addresses(req).width()); return Ok(Response::with((st, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::ContentEncoding(vec![encoding])), - "text/html;charset=utf-8".parse::().unwrap(), - &enc_resp[..]))); + Header(headers::Server(USER_AGENT.into())), + Header(headers::ContentEncoding([encoding].into())), + Header(headers::ETag(headers::EntityTag::strong(etag))), + text_html_charset_utf8(), + &enc_resp.0[..]))); } } if let Some(enc_resp) = encode_str(&resp, &encoding) { log!(self.log, - "{} encoded as {} for {:.1}% ratio", - Spaces(self.remote_addresses(req).to_string().len()), + "{:w$} encoded as {} for {:.1}% ratio", + "", encoding, - ((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64); + ((resp.len() as f64) / (enc_resp.len() as f64)) * 100f64, + w = self.remote_addresses(req).width()); - let mut cache = self.cache_gen.write().expect("Generated file cache read lock poisoned"); - cache.insert(cache_key.clone(), enc_resp); + if enc_resp.len() as u64 <= self.encoded_generated_limit { + let mut cache = self.cache_gen.write().expect("Generated file cache write lock poisoned"); + self.cache_gen_size.fetch_add(enc_resp.len() as u64, AtomicOrdering::Relaxed); + cache.insert(cache_key.clone(), (enc_resp, AtomicU64::new(precise_time_ns()))); - return Ok(Response::with((st, - Header(headers::Server(USER_AGENT.to_string())), - Header(headers::ContentEncoding(vec![encoding])), - "text/html;charset=utf-8".parse::().unwrap(), - &cache[&cache_key][..]))); + return Ok(Response::with((st, + Header(headers::Server(USER_AGENT.into())), + Header(headers::ContentEncoding([encoding].into())), + Header(headers::ETag(headers::EntityTag::strong(etag))), + text_html_charset_utf8(), + &cache[&cache_key].0[..]))); + } else { + return Ok(Response::with((st, + Header(headers::Server(USER_AGENT.into())), + Header(headers::ContentEncoding([encoding].into())), + Header(headers::ETag(headers::EntityTag::strong(etag))), + text_html_charset_utf8(), + enc_resp))); + } } else { log!(self.log, - "{} failed to encode as {}, sending identity", - Spaces(self.remote_addresses(req).to_string().len()), - encoding); + "{:w$} failed to encode as {}, sending identity", + "", + encoding, + w = self.remote_addresses(req).width()); } } - Ok(Response::with((st, Header(headers::Server(USER_AGENT.to_string())), "text/html;charset=utf-8".parse::().unwrap(), resp))) + Ok(Response::with((st, + Header(headers::Server(USER_AGENT.into())), + Header(headers::ETag(headers::EntityTag::strong(etag))), + text_html_charset_utf8(), + resp))) } fn handle_requested_entity_unopenable(&self, req: &mut Request, e: IoError, entity_type: &str) -> IronResult { if e.kind() == IoErrorKind::PermissionDenied { self.handle_generated_response_encoding(req, status::Forbidden, - html_response(ERROR_HTML, &["403 Forbidden", &format!("Can't access {}.", url_path(&req.url)), ""])) + error_html("403 Forbidden", format_args!("Can't access {}.", url_path(&req.url)), "")) } else { // The ops that get here (File::open(), fs::read_dir()) can't return any other errors by the time they're run // (and even if it could, there isn't much we can do about them) @@ -1284,9 +1476,10 @@ impl HttpHandler { fn handle_raw_fs_api_response(&self, st: status::Status, resp: &R) -> IronResult { Ok(Response::with((st, - Header(headers::Server(USER_AGENT.to_string())), + Header(headers::Server(USER_AGENT.into())), Header(RawFsApiHeader(true)), - "application/json;charset=utf-8".parse::().unwrap(), + // application/json; charset=utf-8 + Mime(MimeTopLevel::Application, MimeSubLevel::Json, vec![(MimeAttr::Charset, MimeAttrValue::Utf8)]), serde_json::to_string(&resp).unwrap()))) } @@ -1351,10 +1544,10 @@ impl HttpHandler { fn guess_mime_type(&self, req_p: &Path) -> Mime { // Based on mime_guess::guess_mime_type_opt(); that one does to_str() instead of to_string_lossy() - let ext = req_p.extension().map(OsStr::to_string_lossy).unwrap_or("".into()); + let ext = req_p.extension().unwrap_or(OsStr::new("")); (self.mime_type_overrides.get(&*ext).cloned()) - .or_else(|| get_mime_type_opt(&*ext)) + .or_else(|| ext.to_str().and_then(get_mime_type_opt)) .unwrap_or_else(|| if file_binary(req_p) { Mime(MimeTopLevel::Application, MimeSubLevel::OctetStream, Default::default()) // "application/octet-stream" } else { @@ -1363,44 +1556,24 @@ impl HttpHandler { } } -impl Clone for HttpHandler { - fn clone(&self) -> HttpHandler { - HttpHandler { - hosted_directory: self.hosted_directory.clone(), - follow_symlinks: self.follow_symlinks, - sandbox_symlinks: self.sandbox_symlinks, - generate_listings: self.generate_listings, - check_indices: self.check_indices, - strip_extensions: self.strip_extensions, - log: self.log, - webdav: self.webdav, - global_auth_data: self.global_auth_data.clone(), - path_auth_data: self.path_auth_data.clone(), - writes_temp_dir: self.writes_temp_dir.clone(), - encoded_temp_dir: self.encoded_temp_dir.clone(), - proxies: self.proxies.clone(), - proxy_redirs: self.proxy_redirs.clone(), - mime_type_overrides: self.mime_type_overrides.clone(), - additional_headers: self.additional_headers.clone(), - cache_gen: Default::default(), - cache_fs: Default::default(), - } - } +/// text/html; charset=utf-8 +fn text_html_charset_utf8() -> Mime { + Mime(MimeTopLevel::Text, MimeSubLevel::Html, vec![(MimeAttr::Charset, MimeAttrValue::Utf8)]) } pub struct AddressWriter<'r, 'p, 'ra, 'rb: 'ra> { pub request: &'r Request<'ra, 'rb>, pub proxies: &'p BTreeMap, - /// (at all, log_colour) - pub log: (bool, bool), + /// (at all, log_time, log_colour) + pub log: (bool, bool, bool), } impl<'r, 'p, 'ra, 'rb: 'ra> fmt::Display for AddressWriter<'r, 'p, 'ra, 'rb> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use trivial_colours::{Reset as CReset, Colour as C}; - if self.log.1 { + if self.log.2 { write!(f, "{green}{}{reset}", self.request.remote_addr, green = C::Green, reset = CReset)?; } else { write!(f, "{}", self.request.remote_addr)?; @@ -1424,6 +1597,27 @@ impl<'r, 'p, 'ra, 'rb: 'ra> fmt::Display for AddressWriter<'r, 'p, 'ra, 'rb> { } } +impl<'r, 'p, 'ra, 'rb: 'ra> AddressWriter<'r, 'p, 'ra, 'rb> { + fn width(&self) -> usize { + // per http://192.168.1.109:8000/target/doc/rust/src/core/net/socket_addr.rs.html#571 + const LONGEST_IPV6_SOCKET_ADDR: &str = "[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%4294967296]:65536"; + let mut widthbuf = ArrayString::<{ LONGEST_IPV6_SOCKET_ADDR.len() }>::new(); + write!(&mut widthbuf, "{}", self.request.remote_addr).unwrap(); + let mut len = widthbuf.len(); + for (network, header) in self.proxies { + if network.contains(&self.request.remote_addr.ip()) { + if let Some(saddrs) = self.request.headers.get_raw(header) { + for saddr in saddrs { + len += " for ".len(); + len += saddr.len(); + } + } + } + } + return len; + } +} + /// Attempt to start a server on ports from `from` to `up_to`, inclusive, with the specified handler. /// @@ -1440,42 +1634,22 @@ impl<'r, 'p, 'ra, 'rb: 'ra> fmt::Display for AddressWriter<'r, 'p, 'ra, 'rb> { /// # use iron::{status, Response}; /// let server = try_ports(|req| Ok(Response::with((status::Ok, "Abolish the burgeoisie!"))), 8000, 8100, None).unwrap(); /// ``` -pub fn try_ports(hndlr: H, addr: IpAddr, from: u16, up_to: u16, tls_data: &Option<((String, PathBuf), String)>) - -> Result { - let hndlr = hndlr; - for port in from..up_to + 1 { - let ir = Iron::new(hndlr.clone()); +pub fn try_ports(hndlr: H, addr: IpAddr, from: u16, up_to: u16, tls_data: &Option<((String, PathBuf), String)>) -> Result { + for port in from..=up_to { + let ir = Iron::new(hndlr); match if let Some(&((_, ref id), ref pw)) = tls_data.as_ref() { ir.https((addr, port), - NativeTlsServer::new(id, pw).map_err(|err| { - Error { - desc: "TLS certificate", - op: "open", - more: err.to_string().into(), - } - })?) + NativeTlsServer::new(id, pw).map_err(|err| Error(format!("Opening TLS certificate: {}", err)))?) } else { ir.http((addr, port)) } { Ok(server) => return Ok(server), - Err(error) => { - let error_s = error.to_string(); - if !error_s.contains("port") && !error_s.contains("in use") { - return Err(Error { - desc: "server", - op: "start", - more: error_s.into(), - }); - } - } + Err(iron::error::HttpError::Io(ioe)) if ioe.kind() == IoErrorKind::AddrInUse => { /* next */ } + Err(error) => return Err(Error(format!("Starting server: {}", error))), } } - Err(Error { - desc: "server", - op: "start", - more: "no free ports".into(), - }) + Err(Error(format!("Starting server: no free ports"))) } /// Generate a passwordless self-signed certificate in the `"tls"` subdirectory of the specified directory @@ -1491,16 +1665,12 @@ pub fn try_ports(hndlr: H, addr: IpAddr, from: u16, up_to: u /// assert_eq!(pass, ""); /// ``` pub fn generate_tls_data(temp_dir: &(String, PathBuf)) -> Result<((String, PathBuf), String), Error> { - fn err>>(which: bool, op: &'static str, more: M) -> Error { - Error { - desc: if which { + fn err(which: bool, op: &'static str, more: M) -> Error { + Error(format!("{} {}: {}", op, if which { "TLS key generation process" } else { "TLS identity generation process" - }, - op: op, - more: more.into(), - } + }, more)) } fn exit_err(which: bool, process: &mut Child, exitc: &ExitStatus) -> Error { let mut stdout = String::new(); @@ -1512,19 +1682,11 @@ pub fn generate_tls_data(temp_dir: &(String, PathBuf)) -> Result<((String, PathB stderr = " Result<((String, PathB .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .map_err(|error| err(true, "spawn", error.to_string()))?; + .map_err(|error| err(true, "Spawning", error))?; child.stdin .as_mut() .unwrap() @@ -1543,8 +1705,8 @@ pub fn generate_tls_data(temp_dir: &(String, PathBuf)) -> Result<((String, PathB env!("CARGO_PKG_VERSION"), "\nnabijaczleweli@gmail.com\n") .as_bytes()) - .map_err(|error| err(true, "pipe", error.to_string()))?; - let es = child.wait().map_err(|error| err(true, "wait", error.to_string()))?; + .map_err(|error| err(true, "Piping", error))?; + let es = child.wait().map_err(|error| err(true, "Waiting", error))?; if !es.success() { return Err(exit_err(true, &mut child, &es)); } @@ -1570,8 +1732,8 @@ pub fn generate_tls_data(temp_dir: &(String, PathBuf)) -> Result<((String, PathB .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn() - .map_err(|error| err(false, "spawn", error.to_string()))?; - let es = child.wait().map_err(|error| err(false, "wait", error.to_string()))?; + .map_err(|error| err(false, "Spawning", error))?; + let es = child.wait().map_err(|error| err(false, "Waiting", error))?; if !es.success() { return Err(exit_err(false, &mut child, &es)); } diff --git a/src/ops/prune.rs b/src/ops/prune.rs new file mode 100644 index 0000000..5409ca9 --- /dev/null +++ b/src/ops/prune.rs @@ -0,0 +1,149 @@ +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use iron::{IronResult, Response, Handler, Request}; +use self::super::super::util::HumanReadableSize; +use self::super::super::Options; +use std::collections::HashSet; +use self::super::HttpHandler; +use time::precise_time_ns; +use std::fs; + + +pub struct PruneChain { + pub handler: HttpHandler, + pub encoded_filesystem_limit: Option, + pub encoded_generated_limit: Option, + pub encoded_prune: Option, + + pub prune_interval: u64, // s + last_prune: AtomicU64, // ns +} + +impl PruneChain { + pub fn new(opts: &Options) -> PruneChain { + PruneChain { + handler: HttpHandler::new(opts), + encoded_filesystem_limit: opts.encoded_filesystem_limit, + encoded_generated_limit: opts.encoded_generated_limit, + encoded_prune: opts.encoded_prune, + + prune_interval: (opts.encoded_prune.unwrap_or(0) / 6).max(10), + last_prune: AtomicU64::new(0), + } + } + + pub fn prune(&self) { + let mut start = 0u64; + let mut freed_fs = 0u64; + let mut freed_gen = 0u64; + + + if let Some(limit) = self.encoded_filesystem_limit { + if self.handler.cache_fs_size.load(AtomicOrdering::Relaxed) > limit { + start = precise_time_ns(); + + let mut cache_files = self.handler.cache_fs_files.write().expect("Filesystem files cache write lock poisoned"); + let mut removed_file_hashes = HashSet::new(); + let mut cache = self.handler.cache_fs.write().expect("Filesystem cache write lock poisoned"); + let size = self.handler.cache_fs_size.load(AtomicOrdering::Relaxed); + while size - freed_fs > limit { + let key = match cache.iter().min_by_key(|i| (i.1).1.load(AtomicOrdering::Relaxed)) { + Some((key, ((path, _, _), _))) => { + match fs::remove_file(path) { + Ok(()) => key.clone(), + Err(_) => break, + } + } + None => break, + }; + let ((_, _, sz), _) = cache.remove(&key).unwrap(); + freed_fs += sz; + removed_file_hashes.insert(key.0); + } + self.handler.cache_fs_size.fetch_sub(freed_fs, AtomicOrdering::Relaxed); + cache_files.retain(|_, v| !removed_file_hashes.contains(v)); + } + } + + if let Some(limit) = self.encoded_generated_limit { + if self.handler.cache_gen_size.load(AtomicOrdering::Relaxed) > limit { + if start == 0 { + start = precise_time_ns(); + } + + let mut cache = self.handler.cache_gen.write().expect("Generated file cache write lock poisoned"); + let size = self.handler.cache_gen_size.load(AtomicOrdering::Relaxed); + while size - freed_gen > limit { + let key = match cache.iter().min_by_key(|i| (i.1).1.load(AtomicOrdering::Relaxed)) { + Some((key, _)) => key.clone(), + None => break, + }; + let (data, _) = cache.remove(&key).unwrap(); + freed_gen += data.len() as u64; + } + self.handler.cache_gen_size.fetch_sub(freed_gen, AtomicOrdering::Relaxed); + } + } + + if let Some(limit) = self.encoded_prune { + if start == 0 { + start = precise_time_ns(); + } + + let last = self.last_prune.swap(start, AtomicOrdering::Relaxed); + if last < start && (start - last) / 1000 / 1000 / 1000 >= self.prune_interval { + { + let mut cache_files = self.handler.cache_fs_files.write().expect("Filesystem files cache write lock poisoned"); + let mut removed_file_hashes = HashSet::new(); + let mut cache = self.handler.cache_fs.write().expect("Filesystem cache write lock poisoned"); + cache.retain(|(hash, _), ((path, _, sz), atime)| { + let atime = atime.load(AtomicOrdering::Relaxed); + if atime > start || (start - atime) / 1000 / 1000 / 1000 <= limit { + return true; + } + + if fs::remove_file(path).is_err() { + return true; + } + freed_fs += *sz; + self.handler.cache_fs_size.fetch_sub(*sz, AtomicOrdering::Relaxed); + removed_file_hashes.insert(*hash); + false + }); + cache_files.retain(|_, v| !removed_file_hashes.contains(v)); + } + { + let mut cache = self.handler.cache_gen.write().expect("Generated file cache write lock poisoned"); + cache.retain(|_, (data, atime)| { + let atime = atime.load(AtomicOrdering::Relaxed); + if atime > start || (start - atime) / 1000 / 1000 / 1000 <= limit { + return true; + } + + freed_gen += data.len() as u64; + self.handler.cache_gen_size.fetch_sub(data.len() as u64, AtomicOrdering::Relaxed); + false + }); + } + } + } + + if freed_fs != 0 || freed_gen != 0 { + let end = precise_time_ns(); + log!(self.handler.log, + "Pruned {} + {} in {}ns; used: {} + {}", + HumanReadableSize(freed_fs), + HumanReadableSize(freed_gen), + end - start, + HumanReadableSize(self.handler.cache_fs_size.load(AtomicOrdering::Relaxed)), + HumanReadableSize(self.handler.cache_gen_size.load(AtomicOrdering::Relaxed))); + } + } +} + +impl Handler for &'static PruneChain { + fn handle(&self, req: &mut Request) -> IronResult { + let resp = (&self.handler).handle(req); + self.prune(); + resp + } +} diff --git a/src/ops/webdav.rs b/src/ops/webdav.rs index 6b8c24b..7afc261 100644 --- a/src/ops/webdav.rs +++ b/src/ops/webdav.rs @@ -6,11 +6,12 @@ //! https://tools.ietf.org/html/rfc2518 -use self::super::super::util::{BorrowXmlName, Destination, CommaList, Overwrite, Depth, win32_file_attributes, file_time_accessed, file_time_modified, - file_time_created, client_microsoft, is_actually_file, is_descendant_of, file_executable, html_response, file_length, copy_dir, - WEBDAV_ALLPROP_PROPERTIES_NON_WINDOWS, WEBDAV_ALLPROP_PROPERTIES_WINDOWS, WEBDAV_XML_NAMESPACE_MICROSOFT, - WEBDAV_XML_NAMESPACE_APACHE, WEBDAV_PROPNAME_PROPERTIES, WEBDAV_XML_NAMESPACE_DAV, WEBDAV_XML_NAMESPACES, MAX_SYMLINKS, - ERROR_HTML}; +use self::super::super::util::{BorrowXmlName, Destination, DisplayThree, CommaList, Overwrite, Depth, win32_file_attributes, file_time_accessed, + file_time_modified, file_time_created, client_microsoft, is_actually_file, is_descendant_of, file_executable, set_executable, + error_html, file_length, set_times, copy_dir, WEBDAV_ALLPROP_PROPERTIES_NON_WINDOWS, WEBDAV_ALLPROP_PROPERTIES_WINDOWS, + WEBDAV_XML_NAMESPACE_MICROSOFT, WEBDAV_XML_NAMESPACE_APACHE, WEBDAV_PROPNAME_PROPERTIES, WEBDAV_XML_NAMESPACE_DAV, + WEBDAV_XML_NAMESPACES, MAX_SYMLINKS}; +use iron::mime::{Mime, Attr as MimeAttr, Value as MimeAttrValue, SubLevel as MimeSubLevel, TopLevel as MimeTopLevel}; use std::io::{ErrorKind as IoErrorKind, Result as IoResult, Error as IoError, Write, Read}; use xml::reader::{EventReader as XmlReader, XmlEvent as XmlREvent, Error as XmlRError}; use xml::writer::{EventWriter as XmlWriter, XmlEvent as XmlWEvent, Error as XmlWError}; @@ -24,14 +25,21 @@ use std::path::{PathBuf, Path}; use std::fs::{self, Metadata}; use self::super::HttpHandler; use itertools::Itertools; -use std::borrow::Borrow; -use iron::mime::Mime; -use std::fmt; +use std::{fmt, mem}; +use time::strptime; -lazy_static! { - static ref DEFAULT_XML_PARSER_CONFIG: XmlParserConfig = XmlParserConfig { trim_whitespace: true, ..Default::default() }; - static ref DEFAULT_XML_EMITTER_CONFIG: XmlEmitterConfig = XmlEmitterConfig { perform_indent: cfg!(debug_assertions), ..Default::default() }; +/// This should be a pub const but the default/new function isn't const +fn default_xml_parser_config() -> XmlParserConfig { + XmlParserConfig { + trim_whitespace: true, + whitespace_to_characters: true, + ..Default::default() + } +} +/// This should be a pub const but the default/new function isn't const +fn default_xml_emitter_config() -> XmlEmitterConfig { + XmlEmitterConfig { perform_indent: cfg!(debug_assertions), ..Default::default() } } @@ -70,8 +78,8 @@ impl HttpHandler { self.remote_addresses(&req), req_p.display()); return self.handle_generated_response_encoding(req, - status::BadRequest, - html_response(ERROR_HTML, &["400 Bad Request", &format!("Invalid XML: {}", e), ""])); + status::BadRequest, + error_html("400 Bad Request", format_args!("Invalid XML: {}", e), "")); } Err(props) => props, } @@ -104,15 +112,15 @@ impl HttpHandler { }; match resp.expect("Couldn't write PROPFIND XML") { - Ok(xml_resp) => Ok(Response::with((status::MultiStatus, xml_resp, "text/xml;charset=utf-8".parse::().unwrap()))), + Ok(xml_resp) => Ok(Response::with((status::MultiStatus, xml_resp, text_xml_charset_utf8()))), Err(resp) => resp, } } /// Adapted from /// https://github.com/tylerwhall/hyperdav-server/blob/415f512ac030478593ad389a3267aeed7441d826/src/lib.rs#L459 - fn handle_webdav_propfind_write_output<'n, N: BorrowXmlName<'n>>(&self, req: &mut Request, url: String, path: &Path, props: &[&'n [N]], just_names: bool, - depth: Depth) + fn handle_webdav_propfind_write_output<'n, N: BorrowXmlName<'n>>(&self, req: &mut Request, mut url: String, path: &Path, props: &[&'n [N]], + just_names: bool, depth: Depth) -> Result, IronResult>, XmlWError> { let mut out = intialise_xml_output()?; out.write(namespaces_for_props("D:multistatus", props.iter().flat_map(|pp| pp.iter())))?; @@ -121,7 +129,7 @@ impl HttpHandler { self.handle_propfind_path(&mut out, &url, &path, &meta, props, just_names)?; if meta.is_dir() { - if let Some(ir) = self.handle_webdav_propfind_path_recursive(req, &mut out, url, &path, props, just_names, depth)? { + if let Some(ir) = self.handle_webdav_propfind_path_recursive(req, &mut out, &mut url, &path, props, just_names, depth)? { return Ok(Err(ir)); } } @@ -131,17 +139,19 @@ impl HttpHandler { Ok(Ok(out.into_inner())) } - fn handle_webdav_propfind_path_recursive<'n, W: Write, N: BorrowXmlName<'n>>(&self, req: &mut Request, out: &mut XmlWriter, root_url: String, + fn handle_webdav_propfind_path_recursive<'n, W: Write, N: BorrowXmlName<'n>>(&self, req: &mut Request, out: &mut XmlWriter, root_url: &mut String, root_path: &Path, props: &[&'n [N]], just_names: bool, depth: Depth) -> Result>, XmlWError> { + if !root_url.ends_with('/') { + root_url.push('/'); + } + let root_url_orig_len = root_url.len(); + let mut links_left = MAX_SYMLINKS; if let Some(next_depth) = depth.lower() { for f in root_path.read_dir().expect("Failed to read requested directory").map(|p| p.expect("Failed to iterate over requested directory")) { - let mut url = root_url.clone(); - if !url.ends_with('/') { - url.push('/'); - } - url.push_str(f.file_name().to_str().expect("Filename not UTF-8")); + root_url.truncate(root_url_orig_len); + root_url.push_str(&f.file_name().to_string_lossy()[..]); let mut path = f.path(); let mut symlink = false; @@ -162,13 +172,11 @@ impl HttpHandler { if !(!path.exists() || (symlink && !self.follow_symlinks) || (symlink && self.follow_symlinks && self.sandbox_symlinks && !is_descendant_of(&path, &self.hosted_directory.1))) { - self.handle_propfind_path(out, - &url, - &path, - &path.metadata().expect("Failed to get requested file metadata"), - props, - just_names)?; - self.handle_webdav_propfind_path_recursive(req, out, url, &path, props, just_names, next_depth)?; + let metadata = path.metadata().expect("Failed to get requested file metadata"); + self.handle_propfind_path(out, &root_url, &path, &metadata, props, just_names)?; + if metadata.is_dir() { + self.handle_webdav_propfind_path_recursive(req, out, root_url, &path, props, just_names, next_depth)?; + } } } } @@ -193,27 +201,37 @@ impl HttpHandler { return self.handle_nonexistent(req, req_p); } - let props = match parse_proppatch(req) { - Ok(props) => props, + let (props, actionables) = match parse_proppatch(req) { + Ok(pp) => pp, Err(e) => { log!(self.log, "{} tried to {red}PROPPATCH{reset} {yellow}{}{reset} with invalid XML", self.remote_addresses(&req), req_p.display()); - return self.handle_generated_response_encoding(req, - status::BadRequest, - html_response(ERROR_HTML, &["400 Bad Request", &format!("Invalid XML: {}", e), ""])); + return self.handle_generated_response_encoding(req, status::BadRequest, error_html("400 Bad Request", format_args!("Invalid XML: {}", e), "")); } }; log!(self.log, "{} requested {red}PROPPATCH{reset} of {} on {yellow}{}{reset}", self.remote_addresses(&req), - CommaList(props.iter().map(|p| &p.0.local_name)), + CommaList(props.iter().map(|p| if p.1.is_empty() { + DisplayThree(&p.0.local_name, "", "") + } else { + DisplayThree(&p.0.local_name, "=", &p.1[..]) + })), req_p.display()); + set_times(&req_p, + actionables.Win32LastModifiedTime, + actionables.Win32LastAccessTime, + actionables.Win32CreationTime); + if let Some(ex) = actionables.executable { + set_executable(&req_p, ex); + } + match write_proppatch_output(&props, req.url.as_ref()).expect("Couldn't write PROPPATCH XML") { - Ok(xml_resp) => Ok(Response::with((status::MultiStatus, xml_resp, "text/xml;charset=utf-8".parse::().unwrap()))), + Ok(xml_resp) => Ok(Response::with((status::MultiStatus, xml_resp, text_xml_charset_utf8()))), Err(resp) => resp, } } @@ -371,7 +389,7 @@ impl HttpHandler { _ => { self.handle_generated_response_encoding(req, status::BadRequest, - html_response(ERROR_HTML, &["400 Bad Request", &format!("Invalid depth: {}", depth), ""])) + error_html("400 Bad Request", format_args!("Invalid depth: {}", depth), "")) } } } @@ -493,7 +511,7 @@ impl HttpHandler { } "Win32LastAccessTime" => { - out.write(XmlWEvent::start_element((WEBDAV_XML_NAMESPACE_MICROSOFT.0, "Win32FileAttributes")))?; + out.write(XmlWEvent::start_element((WEBDAV_XML_NAMESPACE_MICROSOFT.0, "Win32LastAccessTime")))?; out.write(XmlWEvent::characters(&file_time_accessed(meta).rfc3339().to_string()))?; } @@ -568,7 +586,7 @@ fn parse_propfind(req: &mut Request) -> Result(out: &mut XmlWriter, prop: XmlName) -> out.write(XmlWEvent::start_element(prop)) } +// +// +// +// +// Sat, 30 Dec 2017 17:50:04 GMT +// Wed, 08 May 2024 13:50:28 GMT +// Sat, 30 Dec 2017 17:50:04 GMT +// 00000000 +// +// +// Sat, 30 Dec 2017 17:50:04 GMT +// 00000020 +// +// +// +// +// +// T +#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] +#[allow(non_snake_case)] +struct ProppatchActionables { + Win32CreationTime: Option, // ms since epoch + Win32LastAccessTime: Option, // ms since epoch + Win32LastModifiedTime: Option, // ms since epoch + executable: Option, +} + +impl ProppatchActionables { + fn new() -> ProppatchActionables { + ProppatchActionables { + Win32CreationTime: None, + Win32LastAccessTime: None, + Win32LastModifiedTime: None, + executable: None, + } + } +} + +fn win32time(t: &str) -> Option { + let tm = strptime(&t, "%a, %d %b %Y %T %Z").ok()?.to_timespec(); + Some(tm.sec as u64 * 1000 + (tm.nsec / 1000 / 1000) as u64) +} + /// https://tools.ietf.org/html/rfc2518#section-12.13 -fn parse_proppatch(req: &mut Request) -> Result, String> { +fn parse_proppatch(req: &mut Request) -> Result<(Vec<(OwnedXmlName, String)>, ProppatchActionables), String> { #[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] enum State { Start, @@ -623,12 +684,13 @@ fn parse_proppatch(req: &mut Request) -> Result, Strin InProp, } - - let mut xml = XmlReader::new_with_config(&mut req.body, DEFAULT_XML_PARSER_CONFIG.clone()); + let mut xml = XmlReader::new_with_config(&mut req.body, default_xml_parser_config()); let mut state = State::Start; let mut props = vec![]; let mut propname = None; let mut is_remove = false; + let mut actionables = ProppatchActionables::new(); + let mut propdata = String::new(); loop { let event = xml.next().map_err(|e| e.to_string())?; @@ -645,23 +707,33 @@ fn parse_proppatch(req: &mut Request) -> Result, Strin state = State::Action; is_remove = true; } - (State::PropertyUpdate, XmlREvent::EndElement { .. }) => return Ok(props), + (State::PropertyUpdate, XmlREvent::EndElement { .. }) => return Ok((props, actionables)), (State::Action, XmlREvent::StartElement { ref name, .. }) if name.local_name == "prop" => state = State::Prop, (State::Action, XmlREvent::EndElement { .. }) => state = State::PropertyUpdate, (State::Prop, XmlREvent::StartElement { name, .. }) => { state = State::InProp; - propname = Some(name.clone()); - props.push((name, is_remove)); + propname = Some(name); } (State::Prop, XmlREvent::EndElement { .. }) => state = State::Action, (State::InProp, XmlREvent::EndElement { name, .. }) => { - if Some(name) == propname { + if Some(&name) == propname.as_ref() { + props.push((name, mem::replace(&mut propdata, String::new()))); state = State::Prop; } } + (State::InProp, XmlREvent::Characters(data)) if !is_remove => { + propdata = data; + match &propname.as_ref().unwrap().local_name[..] { + "Win32CreationTime" => actionables.Win32CreationTime = win32time(&propdata), + "Win32LastAccessTime" => actionables.Win32LastAccessTime = win32time(&propdata), + "Win32LastModifiedTime" => actionables.Win32LastModifiedTime = win32time(&propdata), + "executable" => actionables.executable = Some(propdata == "T"), + _ => propdata = String::new(), + } + } (State::InProp, _) => {} (st, ev) => return Err(format!("Unexpected event {:?} during state {:?}", ev, st)), @@ -669,9 +741,9 @@ fn parse_proppatch(req: &mut Request) -> Result, Strin } } -fn write_proppatch_output(props: &[(OwnedXmlName, bool)], req_url: &GenericUrl) -> Result, IronResult>, XmlWError> { +fn write_proppatch_output(props: &[(OwnedXmlName, String)], req_url: &GenericUrl) -> Result, IronResult>, XmlWError> { let mut out = intialise_xml_output()?; - out.write(namespaces_for_props("D:multistatus", props.iter().map(|pp| &pp.0)))?; + out.write(namespaces_for_props("D:multistatus", props.iter().map(|p| &p.0)))?; out.write(XmlWEvent::start_element("D:href"))?; out.write(XmlWEvent::characters(req_url.as_str()))?; @@ -735,7 +807,7 @@ fn copy_response_multierror(errors: &[(IoError, String)], req_url: &GenericUrl) } fn intialise_xml_output() -> Result>, XmlWError> { - let mut out = XmlWriter::new_with_config(vec![], DEFAULT_XML_EMITTER_CONFIG.clone()); + let mut out = XmlWriter::new_with_config(vec![], default_xml_emitter_config()); out.write(XmlWEvent::StartDocument { version: XmlVersion::Version10, @@ -749,7 +821,7 @@ fn intialise_xml_output() -> Result>, XmlWError> { fn namespaces_for_props<'n, N: 'n + BorrowXmlName<'n>, Ni: Iterator>(elem_name: &str, props: Ni) -> XmlWEventStartElementBuilder { let mut bldr = XmlWEvent::start_element(elem_name).ns(WEBDAV_XML_NAMESPACES[0].0, WEBDAV_XML_NAMESPACES[0].1); - for prop_namespace in props.map(|p| p.borrow().borrow_xml_name()).flat_map(|p| p.namespace).unique() { + for prop_namespace in props.map(|p| p.borrow_xml_name()).flat_map(|p| p.namespace).unique() { if let Some((prefix, namespace)) = WEBDAV_XML_NAMESPACES[1..].iter().find(|(_, ns)| *ns == prop_namespace) { bldr = bldr.ns(*prefix, *namespace); } @@ -757,3 +829,8 @@ fn namespaces_for_props<'n, N: 'n + BorrowXmlName<'n>, Ni: Iterator Mime { + Mime(MimeTopLevel::Text, MimeSubLevel::Xml, vec![(MimeAttr::Charset, MimeAttrValue::Utf8)]) +} diff --git a/src/options.rs b/src/options.rs index 60559fd..8f5440f 100644 --- a/src/options.rs +++ b/src/options.rs @@ -13,6 +13,7 @@ use clap::{AppSettings, ErrorKind as ClapErrorKind, Error as ClapError, Arg, App}; use std::collections::btree_map::{BTreeMap, Entry as BTreeMapEntry}; +use std::ffi::{OsString, OsStr}; use std::collections::BTreeSet; use std::env::{self, temp_dir}; use std::num::NonZeroU64; @@ -21,16 +22,9 @@ use std::str::FromStr; use std::borrow::Cow; use iron::mime::Mime; use std::net::IpAddr; -use regex::Regex; +use std::{str, fs}; use cidr::IpCidr; -use std::fs; - - -lazy_static! { - static ref CREDENTIALS_REGEX: Regex = Regex::new("^[^:]+(?::[^:]+)?$").unwrap(); - static ref PATH_CREDENTIALS_REGEX: Regex = Regex::new("^(.+)=([^:]+(?::[^:]+)?)?$").unwrap(); - static ref HEADER_REGEX: Regex = Regex::new("^([^:]+):[[:space:]]*(.+)$").unwrap(); -} +use blake3; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] @@ -87,12 +81,20 @@ pub struct Options { pub allow_writes: bool, /// Whether to encode filesystem files. Default: true pub encode_fs: bool, + /// Consume at most this much space for encoded filesystem files. + pub encoded_filesystem_limit: Option, + /// Consume at most this much memory for encoded generated responses. + pub encoded_generated_limit: Option, + /// Prune cached encoded data older than this many seconds. + pub encoded_prune: Option, /// How much to suppress output /// /// * >= 1 – suppress serving status lines ("IP was served something") /// * >= 2 – suppress startup except for auth data, if present /// * >= 3 – suppress all startup messages pub loglevel: LogLevel, + /// Whether to include the time in the log output. Default: `true` + pub log_time: bool, /// Whether to colourise the log output. Default: `true` pub log_colour: bool, /// Whether to handle WebDAV requests. Default: false @@ -110,7 +112,7 @@ pub struct Options { /// Header names and who we trust them from in `HEADER-NAME:CIDR` format pub proxy_redirs: BTreeMap, /// Extension -> MIME type mapping overrides; empty string for no extension - pub mime_type_overrides: BTreeMap, + pub mime_type_overrides: BTreeMap, /// Max amount of data per second each request is allowed to return. Default: `None` pub request_bandwidth: Option, /// Additional headers to add to every response @@ -138,8 +140,15 @@ impl Options { .arg(Arg::from_usage("-l --no-listings 'Never generate dir listings. Default: false'")) .arg(Arg::from_usage("-i --no-indices 'Do not automatically use index files. Default: false'")) .arg(Arg::from_usage("-e --no-encode 'Do not encode filesystem files. Default: false'")) + .arg(Arg::from_usage("--encoded-filesystem [FS_LIMIT] 'Consume at most FS_LIMIT space for encoded filesystem files.'") + .validator(|s| Options::size_parse(s.into()).map(|_| ()))) + .arg(Arg::from_usage("--encoded-generated [GEN_LIMIT] 'Consume at most GEN_LIMIT memory for encoded generated responses.'") + .validator(|s| Options::size_parse(s.into()).map(|_| ()))) + .arg(Arg::from_usage("--encoded-prune [MAX_AGE] 'Prune cached encoded data older than MAX_AGE.'") + .validator(|s| Options::age_parse(s.into()).map(|_| ()))) .arg(Arg::from_usage("-x --strip-extensions 'Allow stripping index extensions from served paths. Default: false'")) .arg(Arg::from_usage("-q --quiet... 'Suppress increasing amounts of output'")) + .arg(Arg::from_usage("-Q --quiet-time 'Don't prefix logs with the timestamp'")) .arg(Arg::from_usage("-c --no-colour 'Don't colourise the log output'")) .arg(Arg::from_usage("-d --webdav 'Handle WebDAV requests. Default: false'")) .arg(Arg::from_usage("--ssl [TLS_IDENTITY] 'Data for HTTPS, identity file. Password in HTTP_SSL_PASS env var, otherwise empty'") @@ -166,7 +175,7 @@ impl Options { .arg(Arg::from_usage("-m --mime-type [EXTENSION:MIME-TYPE]... 'Always return MIME-TYPE for files with EXTENSION'") .number_of_values(1) .use_delimiter(false) - .validator(|s| Options::mime_type_override_parse(s.into()).map(|_| ()))) + .validator_os(|s| Options::mime_type_override_parse(s.into()).map(|_| ()))) .arg(Arg::from_usage("--request-bandwidth [BYTES] 'Limit each request to returning BYTES per second, or 0 for unlimited. Default: 0'") .validator(|s| Options::bandwidth_parse(s.into()).map(|_| ()))) .arg(Arg::from_usage("-H --header [NAME: VALUE]... 'Headers to add to every response'") @@ -223,7 +232,11 @@ impl Options { ("$TEMP".to_string(), temp_dir()) }; let suffix = dir_pb.into_os_string().to_str().unwrap().replace(r"\\?\", "").replace(':', "").replace('\\', "/").replace('/', "-"); - let suffix = format!("http{}{}", if suffix.starts_with('-') { "" } else { "-" }, suffix); + let suffix = if suffix.len() >= 255 - (4 + 1) { + format!("http-{}", blake3::hash(suffix.as_bytes()).to_hex()) // avoid NAME_MAX + } else { + format!("http{}{}", if suffix.starts_with('-') { "" } else { "-" }, suffix) + }; (format!("{}{}{}", temp_s, @@ -240,7 +253,11 @@ impl Options { strip_extensions: matches.is_present("strip-extensions"), allow_writes: matches.is_present("allow-write"), encode_fs: !matches.is_present("no-encode"), + encoded_filesystem_limit: matches.value_of("encoded-filesystem").and_then(|s| Options::size_parse(s.into()).ok()), + encoded_generated_limit: matches.value_of("encoded-generated").and_then(|s| Options::size_parse(s.into()).ok()), + encoded_prune: matches.value_of("encoded-prune").and_then(|s| Options::age_parse(s.into()).ok()), loglevel: matches.occurrences_of("quiet").into(), + log_time: !matches.is_present("quiet-time"), log_colour: !matches.is_present("no-colour"), webdav: matches.is_present("webdav"), tls_data: matches.value_of("ssl").map(|id| ((id.to_string(), fs::canonicalize(id).unwrap()), env::var("HTTP_SSL_PASS").unwrap_or_default())), @@ -249,7 +266,7 @@ impl Options { generate_path_auth: generate_path_auth, proxies: matches.values_of("proxy").unwrap_or_default().map(Cow::from).map(Options::proxy_parse).map(Result::unwrap).collect(), proxy_redirs: matches.values_of("proxy-redir").unwrap_or_default().map(Cow::from).map(Options::proxy_parse).map(Result::unwrap).collect(), - mime_type_overrides: matches.values_of("mime-type") + mime_type_overrides: matches.values_of_os("mime-type") .unwrap_or_default() .map(Cow::from) .map(Options::mime_type_override_parse) @@ -281,7 +298,10 @@ impl Options { } fn credentials_validator(s: String) -> Result<(), String> { - if CREDENTIALS_REGEX.is_match(&s) { + if match s.split_once(':') { + Some((u, p)) => !u.is_empty() && !p.contains(':'), + None => !s.is_empty(), + } { Ok(()) } else { Err(format!("Global authentication credentials \"{}\" need be in format \"username[:password]\"", s)) @@ -289,7 +309,7 @@ impl Options { } fn path_credentials_validator(s: String) -> Result<(), String> { - if PATH_CREDENTIALS_REGEX.is_match(&s) { + if Options::parse_path_credentials(&s).is_some() { Ok(()) } else { Err(format!("Per-path authentication credentials \"{}\" need be in format \"path=[username[:password]]\"", s)) @@ -297,9 +317,24 @@ impl Options { } fn decode_path_credentials(s: &str) -> (String, Option<&str>) { - let creds = PATH_CREDENTIALS_REGEX.captures(s).unwrap(); + Options::parse_path_credentials(s).unwrap() + } - (Options::normalise_path(&creds[1]), creds.get(2).map(|m| m.as_str())) + fn parse_path_credentials(s: &str) -> Option<(String, Option<&str>)> { + let (path, creds) = s.split_once('=')?; + + Some((Options::normalise_path(path), + if creds.is_empty() { + None + } else { + if match creds.split_once(':') { + Some((u, p)) => u.is_empty() || p.contains(':'), + None => false, + } { + return None; + } + Some(creds) + })) } fn path_credentials_dupe(path: &str) -> ! { @@ -349,6 +384,40 @@ impl Options { u16::from_str(&s).map(|_| ()).map_err(|_| format!("{} is not a valid port number", s)) } + fn size_parse<'s>(s: Cow<'s, str>) -> Result { + let mut s = &s[..]; + if matches!(s.as_bytes().last(), Some(b'b' | b'B')) { + s = &s[..s.len() - 1]; + } + let mul: u64 = match s.as_bytes().last() { + Some(b'k' | b'K') => 1024u64, + Some(b'm' | b'M') => 1024u64 * 1024u64, + Some(b'g' | b'G') => 1024u64 * 1024u64 * 1024u64, + Some(b't' | b'T') => 1024u64 * 1024u64 * 1024u64 * 1024u64, + Some(b'p' | b'P') => 1024u64 * 1024u64 * 1024u64 * 1024u64 * 1024u64, + _ => 1, + }; + if mul != 1 { + s = &s[..s.len() - 1]; + } + s.parse().map(|size: u64| size * mul).map_err(|e| format!("{} not a valid (optionally-K/M/G/T/P[B]-suffixed) number: {}", s, e)) + } + + fn age_parse<'s>(s: Cow<'s, str>) -> Result { + let mut s = &s[..]; + let mul: u64 = match s.as_bytes().last() { + Some(b's') => 1, + Some(b'm') => 60, + Some(b'h') => 60 * 60, + Some(b'd') => 60 * 60 * 24, + _ => 1, + }; + if mul != 1 { + s = &s[..s.len() - 1]; + } + s.parse().map(|age: u64| age * mul).map_err(|e| format!("{} not a valid (optionally-s/m/h/d-suffixed) number: {}", s, e)) + } + fn proxy_parse<'s>(s: Cow<'s, str>) -> Result<(IpCidr, String), String> { match s.find(":") { None => Err(format!("{} not in HEADER-NAME:CIDR format", s)), @@ -391,20 +460,32 @@ impl Options { Ok(NonZeroU64::new(number.checked_mul(multiplier).ok_or_else(|| format!("{} * {} too big", number, multiplier))?)) } - fn mime_type_override_parse<'s>(s: Cow<'s, str>) -> Result<(String, Mime), String> { - match s.find(":") { - None => Err(format!("{} not in EXTENSION:MIME-TYPE format", s)), + fn mime_type_override_parse<'s>(s: Cow<'s, OsStr>) -> Result<(OsString, Mime), OsString> { + let b = s.as_encoded_bytes(); + match b.iter().position(|&b| b == b':') { + None => Err(format!("{} not in EXTENSION:MIME-TYPE format", s.to_string_lossy()).into()), Some(col_idx) => { - let mt = s[col_idx + 1..].parse().map_err(|()| format!("{} not a valid MIME type", &s[col_idx + 1..]))?; + let mime_s = str::from_utf8(&b[col_idx + 1..]).map_err(|e| format!("{} {}", s.to_string_lossy(), e))?; + let mt = mime_s.parse().map_err(|()| format!("{} not a valid MIME type", mime_s))?; - let mut s = s.into_owned(); + let mut s = s.into_owned().into_encoded_bytes(); s.truncate(col_idx); - Ok((s, mt)) + Ok((unsafe { OsString::from_encoded_bytes_unchecked(s) }, mt)) } } } fn header_parse(s: &str) -> Result<(String, Vec), String> { - HEADER_REGEX.captures(s).map(|hdr| (hdr[1].to_string(), hdr[2].as_bytes().to_vec())).ok_or_else(|| format!("\"{}\" invalid format", s)) + s.split_once(':') + .and_then(|(hn, mut hd)| { + hd = hd.trim_start(); + if !hn.is_empty() && !hd.is_empty() { + Some((hn, hd)) + } else { + None + } + }) + .map(|(hn, hd)| (hn.to_string(), hd.as_bytes().to_vec())) + .ok_or_else(|| format!("\"{}\" invalid format", s)) } } diff --git a/src/util/content_encoding.rs b/src/util/content_encoding.rs index 8e3087b..80f540a 100644 --- a/src/util/content_encoding.rs +++ b/src/util/content_encoding.rs @@ -1,36 +1,15 @@ use brotli::enc::backward_references::{BrotliEncoderParams, BrotliEncoderMode}; +use std::io::{self, BufReader, BufWriter, Error as IoError, Write}; +use iron::headers::{QualityItem, EncodingType, Encoding}; use brotli::enc::BrotliCompress as brotli_compress; use flate2::write::{DeflateEncoder, GzEncoder}; use flate2::Compression as Flate2Compression; -use std::io::{self, Error as IoError, Write}; -use iron::headers::{QualityItem, Encoding}; -use std::collections::BTreeSet; -use bzip2::write::BzEncoder; -use unicase::UniCase; use std::path::Path; +use std::ffi::OsStr; use std::fs::File; use blake3; -lazy_static! { - /// The list of content encodings we handle. - pub static ref SUPPORTED_ENCODINGS: Vec = { - let es = vec![Encoding::Gzip, Encoding::Deflate, Encoding::EncodingExt("br".to_string()), Encoding::EncodingExt("bzip2".to_string())]; - [es.clone(), es.into_iter().map(|e| Encoding::EncodingExt(format!("x-{}", e))).collect()].iter().flat_map(|e| e.clone()).collect() - }; - - /// The list of extensions not to encode. - pub static ref BLACKLISTED_ENCODING_EXTENSIONS: BTreeSet> = { - let raw = include_str!("../../assets/encoding_blacklist"); - raw.split('\n').map(str::trim).filter(|s| !s.is_empty() && !s.starts_with('#')).map(UniCase::new).collect() - }; - - pub static ref BROTLI_PARAMS: BrotliEncoderParams = BrotliEncoderParams { - mode: BrotliEncoderMode::BROTLI_MODE_TEXT, - ..Default::default() - }; -} - /// The minimal size at which to encode filesystem files. pub const MIN_ENCODING_SIZE: u64 = 1024; @@ -41,16 +20,21 @@ pub const MAX_ENCODING_SIZE: u64 = 100 * 1024 * 1024; pub const MIN_ENCODING_GAIN: f64 = 1.1; +// `true` if we know not to encode the given extension +// pub fn extension_is_blacklisted(ext: &str) -> bool { +include!(concat!(env!("OUT_DIR"), "/extensions.rs")); + + /// Find best supported encoding to use, or `None` for identity. pub fn response_encoding(requested: &mut [QualityItem]) -> Option { requested.sort_by_key(|e| e.quality); - requested.iter().filter(|e| e.quality.0 != 0).find(|e| SUPPORTED_ENCODINGS.contains(&e.item)).map(|e| e.item.clone()) + requested.iter().filter(|e| e.quality.0 != 0).map(|e| &e.item).find(|e| encoding_idx(e).is_some()).cloned() } /// Encode a string slice using a specified encoding or `None` if encoding failed or is not recognised. pub fn encode_str(dt: &str, enc: &Encoding) -> Option> { type EncodeT = fn(&str) -> Option>; - const STR_ENCODING_FNS: &[EncodeT] = &[encode_str_gzip, encode_str_deflate, encode_str_brotli, encode_str_bzip2]; + const STR_ENCODING_FNS: &[EncodeT] = &[encode_str_gzip, encode_str_deflate, encode_str_brotli]; encoding_idx(enc).and_then(|fi| STR_ENCODING_FNS[fi](dt)) } @@ -59,7 +43,7 @@ pub fn encode_str(dt: &str, enc: &Encoding) -> Option> { /// `false` if encoding failed, is not recognised or an I/O error occurred. pub fn encode_file(p: &Path, op: &Path, enc: &Encoding) -> bool { type EncodeT = fn(File, File) -> bool; - const FILE_ENCODING_FNS: &[EncodeT] = &[encode_file_gzip, encode_file_deflate, encode_file_brotli, encode_file_bzip2]; + const FILE_ENCODING_FNS: &[EncodeT] = &[encode_file_gzip, encode_file_deflate, encode_file_brotli]; encoding_idx(enc) .map(|fi| { @@ -73,7 +57,7 @@ pub fn encode_file(p: &Path, op: &Path, enc: &Encoding) -> bool { /// Encoding extension to use for encoded files, for example "gz" for gzip, or `None` if the encoding is not recognised. pub fn encoding_extension(enc: &Encoding) -> Option<&'static str> { - const ENCODING_EXTS: &[&str] = &["gz", "dflt", "br", "bz2"]; + const ENCODING_EXTS: &[&str] = &["gz", "dflt", "br"]; encoding_idx(enc).map(|ei| ENCODING_EXTS[ei]) } @@ -81,24 +65,16 @@ pub fn encoding_extension(enc: &Encoding) -> Option<&'static str> { /// Return the 256-bit BLAKE3 hash of the file denoted by the specified path. pub fn file_hash(p: &Path) -> Result { let mut ctx = blake3::Hasher::new(); - io::copy(&mut File::open(p)?, &mut ctx)?; + io::copy(&mut BufReader::with_capacity(1024 * 1024, File::open(p)?), &mut ctx)?; Ok(ctx.finalize()) } fn encoding_idx(enc: &Encoding) -> Option { - match *enc { - Encoding::Gzip => Some(0), - Encoding::Deflate => Some(1), - Encoding::EncodingExt(ref e) => { - match &e[..] { - "x-gzip" => Some(0), - "x-deflate" => Some(1), - "br" | "x-br" => Some(2), - "bzip2" | "x-bzip2" => Some(3), - _ => None, - } - } + match enc.0 { + EncodingType::Gzip => Some(0), + EncodingType::Deflate => Some(1), + EncodingType::Brotli => Some(2), _ => None, } } @@ -110,9 +86,9 @@ macro_rules! encode_fn { cmp.write_all(dt.as_bytes()).ok().and_then(|_| cmp.finish().ok()) } - fn $file_fn_name(mut inf: File, outf: File) -> bool { - let mut cmp = $constructor(outf); - io::copy(&mut inf, &mut cmp).and_then(|_| cmp.finish()).is_ok() + fn $file_fn_name(inf: File, outf: File) -> bool { + let mut cmp = $constructor(BufWriter::with_capacity(1024 * 1024, outf)); + io::copy(&mut BufReader::with_capacity(1024 * 1024, inf), &mut cmp).and_then(|_| cmp.finish()).is_ok() } }; @@ -123,13 +99,22 @@ macro_rules! encode_fn { encode_fn!(encode_str_gzip, encode_file_gzip, GzEncoder, Flate2Compression::default()); encode_fn!(encode_str_deflate, encode_file_deflate, DeflateEncoder, Flate2Compression::default()); -encode_fn!(encode_str_bzip2, encode_file_bzip2, BzEncoder, Default::default()); +/// This should just be a pub const, but the new and default functions aren't const +pub fn brotli_params() -> BrotliEncoderParams { + BrotliEncoderParams { + mode: BrotliEncoderMode::BROTLI_MODE_TEXT, + quality: 9, + ..Default::default() + } +} fn encode_str_brotli(dt: &str) -> Option> { let mut ret = Vec::new(); - brotli_compress(&mut dt.as_bytes(), &mut ret, &BROTLI_PARAMS).ok().map(|_| ret) + brotli_compress(&mut dt.as_bytes(), &mut ret, &brotli_params()).ok().map(|_| ret) } - -fn encode_file_brotli(mut inf: File, mut outf: File) -> bool { - brotli_compress(&mut inf, &mut outf, &BROTLI_PARAMS).is_ok() +fn encode_file_brotli(inf: File, outf: File) -> bool { + brotli_compress(&mut BufReader::with_capacity(1024 * 1024, inf), + &mut BufWriter::with_capacity(1024 * 1024, outf), + &brotli_params()) + .is_ok() } diff --git a/src/util/mod.rs b/src/util/mod.rs index 574d9ab..c318b25 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -5,97 +5,61 @@ mod os; mod webdav; mod content_encoding; -use base64; use std::path::Path; use percent_encoding; use walkdir::WalkDir; use std::borrow::Cow; use rfsapi::RawFileData; -use std::{cmp, f64, str}; use std::time::SystemTime; -use std::collections::HashMap; -use time::{self, Duration, Tm}; use iron::{mime, Headers, Url}; -use base64::display::Base64Display; -use std::fmt::{self, Write as FmtWrite}; -use iron::error::HttpResult as HyperResult; +use time::{self, Duration, Tm}; +use std::{cmp, fmt, f64, mem, str}; +use mime_guess::guess_mime_type_opt; use std::fs::{self, FileType, Metadata, File}; use iron::headers::{HeaderFormat, UserAgent, Header}; -use mime_guess::{guess_mime_type_opt, get_mime_type_str}; use xml::name::{OwnedName as OwnedXmlName, Name as XmlName}; -use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError}; +use iron::error::{HttpResult as HyperResult, HttpError as HyperError}; +use iron::mime::{Mime, SubLevel as MimeSubLevel, TopLevel as MimeTopLevel}; +use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError, Write}; pub use self::os::*; pub use self::webdav::*; pub use self::content_encoding::*; -/// The generic HTML page to use as response to errors. -pub const ERROR_HTML: &str = include_str!("../../assets/error.html"); - -/// The HTML page to use as template for a requested directory's listing. -pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html"); - -/// The HTML page to use as template for a requested directory's listing for mobile devices. -pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html"); - -lazy_static! { - /// Collection of data to be injected into generated responses. - pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = { - let mut ass = HashMap::with_capacity(10); - ass.insert("favicon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("ico").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD)))); - ass.insert("dir_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD)))); - ass.insert("file_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD)))); - ass.insert("file_binary_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD)))); - ass.insert("file_image_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD)))); - ass.insert("file_text_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD)))); - ass.insert("back_arrow_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD)))); - ass.insert("new_dir_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("gif").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD)))); - ass.insert("delete_file_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("png").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD)))); - ass.insert("rename_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("png").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD)))); - ass.insert("confirm_icon", - Cow::Owned(format!("data:{};base64,{}", - get_mime_type_str("png").unwrap(), - Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD)))); - ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js"))); - ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js"))); - ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js"))); - ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js"))); - ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js"))); - ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js"))); - ass - }; +pub trait HtmlResponseElement { + fn commit(self, data: &mut Vec); } +impl<'s> HtmlResponseElement for &'s str { + fn commit(self, data: &mut Vec) { + data.extend(self.as_bytes()); + } +} +impl<'s> HtmlResponseElement for fmt::Arguments<'s> { + fn commit(self, data: &mut Vec) { + let mut orig = unsafe { String::from_utf8_unchecked(mem::replace(data, Vec::new())) }; + let _ = fmt::write(&mut orig, self); + let _ = mem::replace(data, orig.into_bytes()); + } +} +impl)> HtmlResponseElement for F { + fn commit(self, data: &mut Vec) { + self(data) + } +} + +// The generic HTML page to use as response to errors. +// pub fn error_html(a0: ...) -> String +include!(concat!(env!("OUT_DIR"), "/error.html.rs")); + +// The HTML page to use as template for a requested directory's listing. +// pub fn directory_listing_html(a0: ...) -> String +include!(concat!(env!("OUT_DIR"), "/directory_listing.html.rs")); + +// The HTML page to use as template for a requested directory's listing for mobile devices. +// pub fn directory_listing_mobile_html(a0: ...) -> String +include!(concat!(env!("OUT_DIR"), "/directory_listing_mobile.html.rs")); + /// The port to start scanning from if no ports were given. pub const PORT_SCAN_LOWEST: u16 = 8000; @@ -126,9 +90,9 @@ impl Header for WwwAuthenticate { "WWW-Authenticate" } - /// Dummy impl returning an empty value, since we're only ever sending these - fn parse_header(_: &[Vec]) -> HyperResult { - Ok(WwwAuthenticate("".into())) + /// We only ever send these + fn parse_header>(_: &[T]) -> HyperResult { + unreachable!() } } @@ -138,21 +102,77 @@ impl HeaderFormat for WwwAuthenticate { } } +/// The `X-Last-Modified` header: milliseconds since epoch for PUTs. +/// +/// Required since XMLHttpRequests can't set `Date:`. +/// +/// No formatting, we only receive. +#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] +pub struct XLastModified(pub u64); + +impl Header for XLastModified { + fn header_name() -> &'static str { + "X-Last-Modified" + } + + fn parse_header>(data: &[T]) -> HyperResult { + if data.len() != 1 { + return Err(HyperError::Header); + } + Ok(XLastModified(str::from_utf8(data.last().ok_or(HyperError::Header).map(|d| d.as_ref())?).map_err(|_| HyperError::Header)? + .parse() + .map_err(|_| HyperError::Header)?)) + } +} + +/// We only ever receive these +impl HeaderFormat for XLastModified { + fn fmt_header(&self, _: &mut fmt::Formatter) -> fmt::Result { + unreachable!() + } +} + +/// The `X-OC-MTIME` header: seconds since epoch for PUTs (Total Commander Android WebDAV). +/// +/// Required since XMLHttpRequests can't set `Date:`. +/// +/// No formatting, we only receive. +#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] +pub struct XOcMTime(pub u64); + +impl Header for XOcMTime { + fn header_name() -> &'static str { + "X-OC-MTime" + } + + fn parse_header>(data: &[T]) -> HyperResult { + if data.len() != 1 { + return Err(HyperError::Header); + } + Ok(XOcMTime(str::from_utf8(data.last().ok_or(HyperError::Header).map(|d| d.as_ref())?).map_err(|_| HyperError::Header)? + .parse() + .map_err(|_| HyperError::Header)?)) + } +} + +/// We only ever receive these +impl HeaderFormat for XOcMTime { + fn fmt_header(&self, _: &mut fmt::Formatter) -> fmt::Result { + unreachable!() + } +} + #[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] pub struct CommaList>(pub I); impl + Clone> fmt::Display for CommaList { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut itr = self.0.clone(); - if let Some(item) = itr.next() { - item.fmt(f)?; - - for item in itr { + for (i, item) in self.0.clone().enumerate() { + if i != 0 { f.write_str(", ")?; - item.fmt(f)?; } + item.fmt(f)?; } - Ok(()) } } @@ -190,65 +210,50 @@ impl<'n> BorrowXmlName<'n> for OwnedXmlName { } #[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] -pub struct Spaces(pub usize); +pub struct Maybe(pub Option); -impl fmt::Display for Spaces { +impl fmt::Display for Maybe { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for _ in 0..self.0 { - f.write_char(' ')?; + if let Some(dt) = self.0.as_ref() { + dt.fmt(f)?; } Ok(()) } } +#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] +pub struct MsAsS(pub u64); - -/// Uppercase the first character of the supplied string. -/// -/// Based on http://stackoverflow.com/a/38406885/2851815 -/// -/// # Examples -/// -/// ``` -/// # use https::util::uppercase_first; -/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string()); -/// ``` -pub fn uppercase_first(s: &str) -> String { - let mut c = s.chars(); - match c.next() { - None => String::new(), - Some(f) => f.to_uppercase().collect::() + c.as_str(), +impl fmt::Display for MsAsS { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{:03}", self.0 / 1000, self.0 % 1000) } } + /// Percent-encode the last character if it's white space /// /// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`, /// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"` -pub fn encode_tail_if_trimmed(mut s: String) -> String { - let c = s.chars().rev().next(); - if c.map(|c| c.is_whitespace()).unwrap_or(false) { - let c = c.unwrap(); - - s.pop(); - s.push('%'); - - let mut cb = [0u8; 4]; - c.encode_utf8(&mut cb); - for b in cb.iter().take(c.len_utf8()) { - write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?"); +pub fn encode_tail_if_trimmed(mut s: Cow) -> Cow { + if let Some(c) = s.as_bytes().last().copied() { + if c.is_ascii_whitespace() { + let ed = unsafe { s.to_mut().as_mut_vec() }; + ed.pop(); + write!(ed, "%{:02X}", c).expect("Couldn't allocate two more characters?"); } - - s - } else { - s } + s } /// %-escape special characters in an URL -pub fn escape_specials>(s: S) -> String { - let s = s.as_ref(); - let mut ret = Vec::with_capacity(s.len()); +pub fn escape_specials(s: &str) -> Cow { + let replacements = s.bytes().filter(|b| matches!(b, b'%' | b'#' | b'?' | b'[' | b']' | b'"')).count(); + if replacements == 0 { + return s.into(); + } + + let mut ret = Vec::with_capacity(s.len() + replacements * 2); for &b in s.as_bytes() { match b { b'%' => ret.extend(b"%25"), @@ -256,10 +261,11 @@ pub fn escape_specials>(s: S) -> String { b'?' => ret.extend(b"%3F"), b'[' => ret.extend(b"%5B"), b']' => ret.extend(b"%5D"), + b'"' => ret.extend(b"%22"), _ => ret.push(b), } } - unsafe { String::from_utf8_unchecked(ret) } + unsafe { String::from_utf8_unchecked(ret) }.into() } /// Check if the specified file is to be considered "binary". @@ -286,21 +292,6 @@ fn file_binary_impl(path: &Path) -> bool { .unwrap_or(true) } -/// Fill out an HTML template. -/// -/// All fields must be addressed even if formatted to be empty. -/// -/// # Examples -/// -/// ``` -/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML}; -/// println!(html_response(NOT_IMPLEMENTED_HTML, &["

Abolish the burgeoisie!

"])); -/// ``` -pub fn html_response>(data: &str, format_strings: &[S]) -> String { - ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())), - |d, (k, v)| d.replace(&format!("{{{}}}", k), v)) -} - /// Return the path part of the URL. /// /// # Example @@ -313,16 +304,12 @@ pub fn html_response>(data: &str, format_strings: &[S]) -> String /// let url = Url::parse("127.0.0.1:8000/capitalism/русский/"); /// assert_eq!(url_path(&url), "capitalism/русский/"); /// ``` -pub fn url_path(url: &Url) -> String { - let path = url.path(); - if path == [""] { - "/".to_string() - } else { - path.into_iter().fold("".to_string(), - |cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("")))) - [1..] - .to_string() +pub fn url_path(url: &Url) -> Cow { + let mut path = url.as_ref().path(); + while path.bytes().nth(0) == Some(b'/') && path.bytes().nth(1) == Some(b'/') { + path = &path[1..]; } + percent_decode(path).unwrap_or(Cow::Borrowed("")) } /// Decode a percent-encoded string (like a part of a URL). @@ -344,7 +331,7 @@ pub fn file_time_modified_p(f: &Path) -> Tm { file_time_modified(&f.metadata().expect("Failed to get file metadata")) } -/// Get the timestamp of the file's last modification as a `time::Tm` in UTC. +/// Get the timestamp of the file's creation as a `time::Tm` in UTC. pub fn file_time_created_p(f: &Path) -> Tm { file_time_created(&f.metadata().expect("Failed to get file metadata")) } @@ -359,7 +346,7 @@ pub fn file_time_modified(m: &Metadata) -> Tm { file_time_impl(m.modified().expect("Failed to get file last modified date")) } -/// Get the timestamp of the file's last modification as a `time::Tm` in UTC. +/// Get the timestamp of the file's creation as a `time::Tm` in UTC. pub fn file_time_created(m: &Metadata) -> Tm { file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date")) } @@ -463,28 +450,68 @@ pub fn is_nonexistent_descendant_of, Po: AsRef>(who: Pw, o false } -/// Construct string representing a human-readable size. +/// Write a representation as a human-readable size. /// -/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com). -pub fn human_readable_size(s: u64) -> String { - lazy_static! { - static ref LN_KIB: f64 = 1024f64.log(f64::consts::E); +/// Stolen, adapted and inlined from [filesize.js](http://filesizejs.com). +pub struct HumanReadableSize(pub u64); + +impl fmt::Display for HumanReadableSize { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + const LN_KIB: f64 = 6.931471805599453; // 1024f64.ln() + + if self.0 == 0 { + f.write_str("0 B") + } else { + let num = self.0 as f64; + let exp = cmp::min(cmp::max((num.ln() / LN_KIB) as i32, 0), 8); + + let val = num / 2f64.powi(exp * 10); + + write!(f, + "{} {}", + if exp > 0 { + (val * 10f64).round() / 10f64 + } else { + val.round() + }, + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]) + } } +} - if s == 0 { - "0 B".to_string() - } else { - let num = s as f64; - let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8); +/// Replace `"` with `_` +pub struct NoDoubleQuotes<'s>(pub &'s str); - let val = num / 2f64.powi(exp * 10); +impl<'s> fmt::Display for NoDoubleQuotes<'s> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (i, s) in self.0.split('"').enumerate() { + if i != 0 { + f.write_str("_")?; + } + f.write_str(s)? + } + Ok(()) + } +} - if exp > 0 { - (val * 10f64).round() / 10f64 - } else { - val.round() +/// Replace `&` with `&` and `<` with `<` +pub struct NoHtmlLiteral<'s>(pub &'s str); + +impl<'s> fmt::Display for NoHtmlLiteral<'s> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for mut s in self.0.split_inclusive(&['&', '<']) { + let last = s.as_bytes().last(); + if matches!(last, Some(b'&' | b'<')) { + s = &s[..s.len() - 1]; + } + f.write_str(s)?; + match last { + Some(b'&') => f.write_str("&")?, + Some(b'<') => f.write_str("<")?, + _ => {} } - .to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize] + } + Ok(()) } } @@ -527,9 +554,9 @@ fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData { let meta = f.metadata().expect("Failed to get requested file metadata"); RawFileData { mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) { - "application/octet-stream".parse().unwrap() + Mime(MimeTopLevel::Application, MimeSubLevel::OctetStream, Default::default()) // application/octet-stream } else { - "text/plain".parse().unwrap() + Mime(MimeTopLevel::Text, MimeSubLevel::Plain, Default::default()) // text/plain }), name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(), last_modified: file_time_modified(&meta), diff --git a/src/util/os/non_windows.rs b/src/util/os/non_windows.rs index 9ca661c..b4b5320 100644 --- a/src/util/os/non_windows.rs +++ b/src/util/os/non_windows.rs @@ -1,6 +1,9 @@ +use libc::{AT_SYMLINK_NOFOLLOW, UTIME_OMIT, AT_FDCWD, mode_t, futimens, utimensat, timespec, umask}; +use std::os::unix::fs::{PermissionsExt, MetadataExt}; use self::super::super::is_actually_file; -use os_str_generic::OsStrGenericExt; -use std::fs::Metadata; +use std::fs::{self, Metadata, File}; +use std::os::unix::ffi::OsStrExt; +use std::os::fd::AsRawFd; use std::path::Path; @@ -20,7 +23,7 @@ pub fn win32_file_attributes(meta: &Metadata, path: &Path) -> u32 { attr |= FILE_ATTRIBUTE_READONLY; } - if path.file_name().map(|n| n.starts_with(".")).unwrap_or(false) { + if path.file_name().map(|n| n.as_bytes().starts_with(b".")).unwrap_or(false) { attr |= FILE_ATTRIBUTE_HIDDEN; } @@ -35,3 +38,90 @@ pub fn win32_file_attributes(meta: &Metadata, path: &Path) -> u32 { attr } + + +/// `st_dev`-`st_ino`-`st_mtime` +pub fn file_etag(m: &Metadata) -> String { + format!("{:x}-{}-{}.{}", m.dev(), m.ino(), m.mtime(), m.mtime_nsec()) +} + + +/// Check if file is marked executable +pub fn file_executable(meta: &Metadata) -> bool { + (meta.permissions().mode() & 0o111) != 0 +} + + +static mut UMASK: u32 = 0; + +// as seen in https://docs.rs/ctor/latest/ctor/attr.ctor.html +#[used] +#[cfg_attr(any(target_os = "linux", target_os = "android"), link_section = ".init_array")] +#[cfg_attr(target_os = "freebsd", link_section = ".init_array")] +#[cfg_attr(target_os = "netbsd", link_section = ".init_array")] +#[cfg_attr(target_os = "openbsd", link_section = ".init_array")] +#[cfg_attr(target_os = "illumos", link_section = ".init_array")] +#[cfg_attr(any(target_os = "macos", target_os = "ios", target_os = "tvos"), link_section = "__DATA_CONST,__mod_init_func")] +#[cfg_attr(target_os = "windows", link_section = ".CRT$XCU")] +static LOAD_UMASK: unsafe extern "C" fn() = { + #[cfg_attr(any(target_os = "linux", target_os = "android"), link_section = ".text.startup")] + unsafe extern "C" fn load_umask() { + UMASK = umask(0o777) as u32; + umask(UMASK as mode_t); + } + load_umask +}; + +pub fn set_executable(f: &Path, ex: bool) { + let mut perm = match fs::metadata(f) { + Ok(meta) => meta.permissions(), + Err(_) => return, + }; + if ex { + perm.set_mode(perm.mode() | (0o111 & unsafe { !UMASK })); + } else { + perm.set_mode(perm.mode() & !0o111); + } + let _ = fs::set_permissions(f, perm); +} + + +const NO_TIMESPEC: timespec = timespec { + tv_sec: 0, + tv_nsec: UTIME_OMIT, +}; + +pub fn set_mtime_f(f: &File, ms: u64) { + set_times_f(f, Some(ms), None, None) +} + +pub fn set_times_f(f: &File, mtime_ms: Option, atime_ms: Option, _: Option) { + if mtime_ms.is_some() || atime_ms.is_some() { + unsafe { + futimens(f.as_raw_fd(), + [atime_ms.map(ms_to_timespec).unwrap_or(NO_TIMESPEC), mtime_ms.map(ms_to_timespec).unwrap_or(NO_TIMESPEC)].as_ptr()); + } + } +} + +pub fn set_mtime(f: &Path, ms: u64) { + set_times(f, Some(ms), None, None) +} + +pub fn set_times(f: &Path, mtime_ms: Option, atime_ms: Option, _: Option) { + if mtime_ms.is_some() || atime_ms.is_some() { + unsafe { + utimensat(AT_FDCWD, + f.as_os_str().as_bytes().as_ptr() as *const _, + [atime_ms.map(ms_to_timespec).unwrap_or(NO_TIMESPEC), mtime_ms.map(ms_to_timespec).unwrap_or(NO_TIMESPEC)].as_ptr(), + AT_SYMLINK_NOFOLLOW); + } + } +} + +fn ms_to_timespec(ms: u64) -> timespec { + timespec { + tv_sec: (ms / 1000) as i64, + tv_nsec: ((ms % 1000) * 1000_000) as i64, + } +} diff --git a/src/util/os/non_windows_non_macos.rs b/src/util/os/non_windows_non_macos.rs index 2c3248d..cca500b 100644 --- a/src/util/os/non_windows_non_macos.rs +++ b/src/util/os/non_windows_non_macos.rs @@ -1,21 +1,15 @@ -use std::os::unix::fs::{PermissionsExt, FileTypeExt}; -use libc::{O_RDONLY, c_ulong, close, ioctl, open}; -use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::{OpenOptionsExt, FileTypeExt}; use std::fs::{FileType, Metadata}; -use std::ffi::CString; +use std::os::fd::AsRawFd; +use std::os::raw::c_int; +use libc::O_NONBLOCK; use std::path::Path; +use std::fs::OpenOptions; -include!(concat!(env!("OUT_DIR"), "/ioctl-data/ioctl.rs")); - - -// Stolen from https://unix.superglobalmegacorp.com/Net2/newsrc/sys/stat.h.html -/// X for owner -const S_IXUSR: u32 = 0o000100; -/// X for group -const S_IXGRP: u32 = 0o000010; -/// X for other -const S_IXOTH: u32 = 0o000001; +extern "C" { + fn http_blkgetsize(fd: c_int) -> u64; +} /// OS-specific check for fileness @@ -30,25 +24,14 @@ pub fn file_length>(meta: &Metadata, path: &P) -> u64 { } fn file_length_impl(meta: &Metadata, path: &Path) -> u64 { - if is_device(&meta.file_type()) { - let mut block_count: c_ulong = 0; - - let path_c = CString::new(path.as_os_str().as_bytes()).unwrap(); - let dev_file = unsafe { open(path_c.as_ptr(), O_RDONLY) }; - if dev_file >= 0 { - let ok = unsafe { ioctl(dev_file, BLKGETSIZE, &mut block_count as *mut c_ulong) } == 0; - unsafe { close(dev_file) }; - - if ok { - return block_count as u64 * 512; + if meta.file_type().is_block_device() || meta.file_type().is_char_device() { + if let Ok(f) = OpenOptions::new().read(true).custom_flags(O_NONBLOCK).open(path) { + let size = unsafe { http_blkgetsize(f.as_raw_fd()) }; + if size != u64::MAX { + return size; } } } meta.len() } - -/// Check if file is marked executable -pub fn file_executable(meta: &Metadata) -> bool { - (meta.permissions().mode() & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 -} diff --git a/src/util/os/windows.rs b/src/util/os/windows.rs index 0686de2..48ad0a6 100644 --- a/src/util/os/windows.rs +++ b/src/util/os/windows.rs @@ -1,6 +1,9 @@ -use winapi::um::fileapi::GetFileAttributesW; +use winapi::um::fileapi::{GetFileAttributesW, SetFileTime}; +use winapi::shared::minwindef::FILETIME; +use std::os::windows::io::AsRawHandle; +use std::os::windows::fs::MetadataExt; use std::os::windows::ffi::OsStrExt; -use std::fs::Metadata; +use std::fs::{Metadata, File}; use std::path::Path; @@ -13,3 +16,66 @@ pub fn win32_file_attributes(_: &Metadata, path: &Path) -> u32 { unsafe { GetFileAttributesW(buf.as_ptr()) } } + + +/// `st_dev`-`st_ino`-`st_mtim` +pub fn file_etag(m: &Metadata) -> String { + format!("{:x}-{}-{}", + m.volume_serial_number().unwrap_or(0), + m.file_index().unwrap_or(0), + m.last_write_time()) +} + + +/// Check if file is marked executable +#[inline(always)] +pub fn file_executable(_: &Metadata) -> bool { + true +} + +#[inline(always)] +pub fn set_executable(_: &Path, _: bool) {} + + +pub fn set_mtime(f: &Path, ms: u64) { + set_times(f, Some(ms), None, None) +} + +pub fn set_mtime_f(f: &File, ms: u64) { + set_times_f(f, Some(ms), None, None) +} + + +const NO_FILETIME: FILETIME = FILETIME { + dwLowDateTime: 0, + dwHighDateTime: 0, +}; + +pub fn set_times_f(f: &File, mtime_ms: Option, atime_ms: Option, ctime_ms: Option) { + if mtime_ms.is_some() || atime_ms.is_some() || ctime_ms.is_some() { + unsafe { + SetFileTime(f.as_raw_handle(), + &ctime_ms.map(ms_to_FILETIME).unwrap_or(NO_FILETIME), + &atime_ms.map(ms_to_FILETIME).unwrap_or(NO_FILETIME), + &mtime_ms.map(ms_to_FILETIME).unwrap_or(NO_FILETIME)); + } + } +} + +pub fn set_times(f: &Path, mtime_ms: Option, atime_ms: Option, ctime_ms: Option) { + if mtime_ms.is_some() || atime_ms.is_some() || ctime_ms.is_some() { + if let Ok(f) = File::options().write(true).open(f) { + set_times_f(&f, mtime_ms, atime_ms, ctime_ms); + } + } +} + +/// FILETIME is in increments of 100ns, and in the Win32 epoch +#[allow(non_snake_case)] +fn ms_to_FILETIME(ms: u64) -> FILETIME { + let ft = (ms * 1000_0) + 116444736000000000; + FILETIME { + dwLowDateTime: (ft & 0xFFFFFFFF) as u32, + dwHighDateTime: (ft >> 32) as u32, + } +} diff --git a/src/util/os/windows_macos.rs b/src/util/os/windows_macos.rs index 7f10521..1ef2ca0 100644 --- a/src/util/os/windows_macos.rs +++ b/src/util/os/windows_macos.rs @@ -13,9 +13,3 @@ pub fn is_device(_: &FileType) -> bool { pub fn file_length>(meta: &Metadata, _: &P) -> u64 { meta.len() } - -/// Check if file is marked executable -#[inline(always)] -pub fn file_executable(_: &Metadata) -> bool { - true -} diff --git a/src/util/webdav.rs b/src/util/webdav.rs index 9f1b995..df6f3ac 100644 --- a/src/util/webdav.rs +++ b/src/util/webdav.rs @@ -16,11 +16,10 @@ macro_rules! xml_name { } } - -lazy_static! { - /// HTTP methods we support for WebDAV level 1, as specified in https://tools.ietf.org/html/rfc2518, without locks - pub static ref DAV_LEVEL_1_METHODS: Vec = - ["COPY", "MKCOL", "MOVE", "PROPFIND", "PROPPATCH"].iter().map(|m| method::Extension(m.to_string())).collect(); +/// HTTP methods we support for WebDAV level 1, as specified in https://tools.ietf.org/html/rfc2518, without locks +pub fn dav_level_1_methods(writes: bool) -> &'static [method::Method] { + static METHODS: [method::Method; 5] = [method::DavPropfind, method::DavCopy, method::DavMkcol, method::DavMove, method::DavProppatch]; + if writes { &METHODS[..] } else { &METHODS[..1] } } /// Prefix and namespace URI for generic WebDAV elements @@ -81,9 +80,9 @@ impl Header for Dav { "DAV" } - /// Dummy impl returning an empty value, since we're only ever sending these - fn parse_header(_: &[Vec]) -> HyperResult { - Ok(Dav(&[])) + /// We only ever send these + fn parse_header>(_: &[T]) -> HyperResult { + unreachable!() } } @@ -98,6 +97,7 @@ impl HeaderFormat for Dav { } } + /// The [Depth header](https://tools.ietf.org/html/rfc2518#section-9.2). #[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] pub enum Depth { @@ -122,12 +122,12 @@ impl Header for Depth { "Depth" } - fn parse_header(raw: &[Vec]) -> HyperResult { + fn parse_header>(raw: &[T]) -> HyperResult { if raw.len() != 1 { return Err(HyperError::Header); } - Ok(match &unsafe { raw.get_unchecked(0) }[..] { + Ok(match unsafe { raw.get_unchecked(0) }.as_ref() { b"0" => Depth::Zero, b"1" => Depth::One, b"infinity" => Depth::Infinity, @@ -153,6 +153,7 @@ impl fmt::Display for Depth { } } + /// The [Destination header](https://tools.ietf.org/html/rfc2518#section-9.3). #[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] pub struct Destination(pub GenericUrl); @@ -162,12 +163,12 @@ impl Header for Destination { "Destination" } - fn parse_header(raw: &[Vec]) -> HyperResult { + fn parse_header>(raw: &[T]) -> HyperResult { if raw.len() != 1 { return Err(HyperError::Header); } - let url = str::from_utf8(&unsafe { raw.get_unchecked(0) }).map_err(|_| HyperError::Header)?; + let url = str::from_utf8(unsafe { raw.get_unchecked(0) }.as_ref()).map_err(|_| HyperError::Header)?; GenericUrl::parse(url).map(Destination).map_err(HyperError::Uri) } } @@ -185,6 +186,7 @@ impl fmt::Display for Destination { } } + /// The [Overwrite header](https://tools.ietf.org/html/rfc2518#section-9.6). #[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)] pub struct Overwrite(pub bool); @@ -194,18 +196,13 @@ impl Header for Overwrite { "Overwrite" } - fn parse_header(raw: &[Vec]) -> HyperResult { + fn parse_header>(raw: &[T]) -> HyperResult { if raw.len() != 1 { return Err(HyperError::Header); } - - let val = unsafe { raw.get_unchecked(0) }; - if val.len() != 1 { - return Err(HyperError::Header); - } - match unsafe { val.get_unchecked(0) } { - b'T' => Ok(Overwrite(true)), - b'F' => Ok(Overwrite(false)), + match unsafe { raw.get_unchecked(0) }.as_ref() { + b"T" => Ok(Overwrite(true)), + b"F" => Ok(Overwrite(false)), _ => Err(HyperError::Header), } } diff --git a/vendor/hyper-0.10.16/.cargo_vcs_info.json b/vendor/hyper-0.10.16/.cargo_vcs_info.json new file mode 100644 index 0000000..b3f72ac --- /dev/null +++ b/vendor/hyper-0.10.16/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "39a4bd063c1fc7d4b93661dba7d76fef2d8c5c9d" + } +} diff --git a/vendor/hyper-0.10.16/Cargo.toml b/vendor/hyper-0.10.16/Cargo.toml new file mode 100644 index 0000000..01c2583 --- /dev/null +++ b/vendor/hyper-0.10.16/Cargo.toml @@ -0,0 +1,61 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "hyper" +version = "0.10.16" +authors = ["Sean McArthur ", "Jonathan Reem "] +include = ["Cargo.toml", "LICENSE", "src/**/*"] +description = "A modern HTTP library." +homepage = "http://hyper.rs" +documentation = "https://docs.rs/hyper" +readme = "README.md" +keywords = ["http", "hyper", "hyperium"] +categories = ["web-programming::http-client", "web-programming::http-server"] +license = "MIT" +repository = "https://github.com/hyperium/hyper" +[dependencies.base64] +version = "0.9.0" + +[dependencies.httparse] +version = "1.0" + +[dependencies.mime] +version = "0.2" + +[dependencies.num_cpus] +version = "1.0" + +[dependencies.time] +version = "0.1" + +[dependencies.traitobject] +git = "https://github.com/reem/rust-traitobject" +rev = "b3471a15917b2caf5a8b27debb0b4b390fc6634f" + +[dependencies.typeable] +version = "0.1" + +[dependencies.unicase] +version = "1.0" + +[dependencies.url] +version = "1.0" + +[dependencies] +crossbeam-channel = "0.5" +[dependencies.smallvec] +version = "1.13" +features = ["union"] + +[features] +nightly = [] diff --git a/vendor/hyper-0.10.16/Cargo.toml.orig b/vendor/hyper-0.10.16/Cargo.toml.orig new file mode 100644 index 0000000..ddcb16b --- /dev/null +++ b/vendor/hyper-0.10.16/Cargo.toml.orig @@ -0,0 +1,39 @@ +[package] + +name = "hyper" +version = "0.10.16" # remember to update html_root_url +description = "A modern HTTP library." +readme = "README.md" +homepage = "http://hyper.rs" +documentation = "https://docs.rs/hyper" +repository = "https://github.com/hyperium/hyper" +license = "MIT" +authors = ["Sean McArthur ", + "Jonathan Reem "] +keywords = ["http", "hyper", "hyperium"] +categories = ["web-programming::http-client", "web-programming::http-server"] + +include = [ + "Cargo.toml", + "LICENSE", + "src/**/*" +] + +[dependencies] +base64 = "0.9.0" +httparse = "1.0" +language-tags = "0.2" +log = "0.3" +mime = "0.2" +num_cpus = "1.0" +time = "0.1" +traitobject = "0.1" +typeable = "0.1" +unicase = "1.0" +url = "1.0" + +[dev-dependencies] +env_logger = "0.4" + +[features] +nightly = [] diff --git a/vendor/hyper-0.10.16/LICENSE b/vendor/hyper-0.10.16/LICENSE new file mode 100644 index 0000000..557b7e5 --- /dev/null +++ b/vendor/hyper-0.10.16/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 Sean McArthur + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/hyper-0.10.16/src/buffer.rs b/vendor/hyper-0.10.16/src/buffer.rs new file mode 100644 index 0000000..1460c56 --- /dev/null +++ b/vendor/hyper-0.10.16/src/buffer.rs @@ -0,0 +1,198 @@ +use std::cmp; +use std::io::{self, Read, BufRead}; + +pub struct BufReader { + inner: R, + buf: Vec, + pos: usize, + cap: usize, +} + +const INIT_BUFFER_SIZE: usize = 4096; +pub const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100; + +impl BufReader { + #[inline] + pub fn new(rdr: R) -> BufReader { + BufReader::with_capacity(rdr, INIT_BUFFER_SIZE) + } + + #[inline] + pub fn from_parts(rdr: R, buf: Vec, pos: usize, cap: usize) -> BufReader { + BufReader { + inner: rdr, + buf: buf, + pos: pos, + cap: cap, + } + } + + #[inline] + pub fn with_capacity(rdr: R, cap: usize) -> BufReader { + BufReader { + inner: rdr, + buf: vec![0; cap], + pos: 0, + cap: 0, + } + } + + #[inline] + pub fn get_ref(&self) -> &R { &self.inner } + + #[inline] + pub fn get_mut(&mut self) -> &mut R { &mut self.inner } + + #[inline] + pub fn get_buf(&self) -> &[u8] { + if self.pos < self.cap { + trace!("get_buf [u8; {}][{}..{}]", self.buf.len(), self.pos, self.cap); + &self.buf[self.pos..self.cap] + } else { + trace!("get_buf []"); + &[] + } + } + + /// Extracts the buffer from this reader. Return the current cursor position + /// and the position of the last valid byte. + /// + /// This operation does not copy the buffer. Instead, it directly returns + /// the internal buffer. As a result, this reader will no longer have any + /// buffered contents and any subsequent read from this reader will not + /// include the returned buffered contents. Note that subsequent reads may + /// buffer. + #[inline] + pub fn take_buf(&mut self) -> (Vec, usize, usize) { + let (pos, cap) = (self.pos, self.cap); + self.pos = 0; + self.cap = 0; + + let mut output = vec![]; + ::std::mem::swap(&mut self.buf, &mut output); + (output, pos, cap) + } + + #[inline] + pub fn into_inner(self) -> R { self.inner } + + #[inline] + pub fn into_parts(self) -> (R, Vec, usize, usize) { + (self.inner, self.buf, self.pos, self.cap) + } + + #[inline] + pub fn read_into_buf(&mut self) -> io::Result { + self.maybe_reserve(); + let v = &mut self.buf; + trace!("read_into_buf buf[{}..{}]", self.cap, v.len()); + if self.cap < v.capacity() { + let nread = try!(self.inner.read(&mut v[self.cap..])); + self.cap += nread; + Ok(nread) + } else { + trace!("read_into_buf at full capacity"); + Ok(0) + } + } + + #[inline] + fn maybe_reserve(&mut self) { + let cap = self.buf.capacity(); + if self.cap == cap && cap < MAX_BUFFER_SIZE { + self.buf.reserve(cmp::min(cap * 4, MAX_BUFFER_SIZE) - cap); + let new = self.buf.capacity() - self.buf.len(); + trace!("reserved {}", new); + unsafe { grow_zerofill(&mut self.buf, new) } + } + } +} + +#[inline] +unsafe fn grow_zerofill(buf: &mut Vec, additional: usize) { + use std::ptr; + let len = buf.len(); + buf.set_len(len + additional); + ptr::write_bytes(buf.as_mut_ptr().offset(len as isize), 0, additional); +} + +impl Read for BufReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if self.cap == self.pos && buf.len() >= self.buf.len() { + return self.inner.read(buf); + } + let nread = { + let mut rem = try!(self.fill_buf()); + try!(rem.read(buf)) + }; + self.consume(nread); + Ok(nread) + } +} + +impl BufRead for BufReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + if self.pos == self.cap { + self.cap = try!(self.inner.read(&mut self.buf)); + self.pos = 0; + } + Ok(&self.buf[self.pos..self.cap]) + } + + #[inline] + fn consume(&mut self, amt: usize) { + self.pos = cmp::min(self.pos + amt, self.cap); + if self.pos == self.cap { + self.pos = 0; + self.cap = 0; + } + } +} + +#[cfg(test)] +mod tests { + + use std::io::{self, Read, BufRead}; + use super::BufReader; + + struct SlowRead(u8); + + impl Read for SlowRead { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let state = self.0; + self.0 += 1; + (&match state % 3 { + 0 => b"foo", + 1 => b"bar", + _ => b"baz", + }[..]).read(buf) + } + } + + #[test] + fn test_consume_and_get_buf() { + let mut rdr = BufReader::new(SlowRead(0)); + rdr.read_into_buf().unwrap(); + rdr.consume(1); + assert_eq!(rdr.get_buf(), b"oo"); + rdr.read_into_buf().unwrap(); + rdr.read_into_buf().unwrap(); + assert_eq!(rdr.get_buf(), b"oobarbaz"); + rdr.consume(5); + assert_eq!(rdr.get_buf(), b"baz"); + rdr.consume(3); + assert_eq!(rdr.get_buf(), b""); + assert_eq!(rdr.pos, 0); + assert_eq!(rdr.cap, 0); + } + + #[test] + fn test_resize() { + let raw = b"hello world"; + let mut rdr = BufReader::with_capacity(&raw[..], 5); + rdr.read_into_buf().unwrap(); + assert_eq!(rdr.get_buf(), b"hello"); + rdr.read_into_buf().unwrap(); + assert_eq!(rdr.get_buf(), b"hello world"); + } +} diff --git a/vendor/hyper-0.10.16/src/error.rs b/vendor/hyper-0.10.16/src/error.rs new file mode 100644 index 0000000..6b2ec71 --- /dev/null +++ b/vendor/hyper-0.10.16/src/error.rs @@ -0,0 +1,207 @@ +//! Error and Result module. +use std::error::Error as StdError; +use std::fmt; +use std::io::Error as IoError; +use std::str::Utf8Error; +use std::string::FromUtf8Error; + +use httparse; +use url; + +#[cfg(feature = "openssl")] +use openssl::ssl::error::SslError; + +use self::Error::{ + Method, + Uri, + Version, + Header, + Status, + Io, + Ssl, + TooLarge, + Utf8 +}; + +pub use url::ParseError; + +/// Result type often returned from methods that can have hyper `Error`s. +pub type Result = ::std::result::Result; + +/// A set of errors that can occur parsing HTTP streams. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// An invalid `Method`, such as `GE,T`. + Method, + /// An invalid `RequestUri`, such as `exam ple.domain`. + Uri(url::ParseError), + /// An invalid `HttpVersion`, such as `HTP/1.1` + Version, + /// An invalid `Header`. + Header, + /// A message head is too large to be reasonable. + TooLarge, + /// An invalid `Status`, such as `1337 ELITE`. + Status, + /// An `io::Error` that occurred while trying to read or write to a network stream. + Io(IoError), + /// An error from a SSL library. + Ssl(Box), + /// Parsing a field as string failed + Utf8(Utf8Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Uri(ref e) => fmt::Display::fmt(e, f), + Io(ref e) => fmt::Display::fmt(e, f), + Ssl(ref e) => fmt::Display::fmt(e, f), + Utf8(ref e) => fmt::Display::fmt(e, f), + ref e => f.write_str(e.description()), + } + } +} + +impl StdError for Error { + fn description(&self) -> &str { + match *self { + Method => "Invalid Method specified", + Version => "Invalid HTTP version specified", + Header => "Invalid Header provided", + TooLarge => "Message head is too large", + Status => "Invalid Status provided", + Uri(ref e) => e.description(), + Io(ref e) => e.description(), + Ssl(ref e) => e.description(), + Utf8(ref e) => e.description(), + } + } + + fn cause(&self) -> Option<&StdError> { + match *self { + Io(ref error) => Some(error), + Ssl(ref error) => Some(&**error), + Uri(ref error) => Some(error), + Utf8(ref error) => Some(error), + _ => None, + } + } +} + +impl From for Error { + fn from(err: IoError) -> Error { + Io(err) + } +} + +impl From for Error { + fn from(err: url::ParseError) -> Error { + Uri(err) + } +} + +#[cfg(feature = "openssl")] +impl From for Error { + fn from(err: SslError) -> Error { + match err { + SslError::StreamError(err) => Io(err), + err => Ssl(Box::new(err)), + } + } +} + +impl From for Error { + fn from(err: Utf8Error) -> Error { + Utf8(err) + } +} + +impl From for Error { + fn from(err: FromUtf8Error) -> Error { + Utf8(err.utf8_error()) + } +} + +impl From for Error { + fn from(err: httparse::Error) -> Error { + match err { + httparse::Error::HeaderName => Header, + httparse::Error::HeaderValue => Header, + httparse::Error::NewLine => Header, + httparse::Error::Status => Status, + httparse::Error::Token => Header, + httparse::Error::TooManyHeaders => TooLarge, + httparse::Error::Version => Version, + } + } +} + +#[cfg(test)] +mod tests { + use std::error::Error as StdError; + use std::io; + use httparse; + use url; + use super::Error; + use super::Error::*; + + #[test] + fn test_cause() { + let orig = io::Error::new(io::ErrorKind::Other, "other"); + let desc = orig.description().to_owned(); + let e = Io(orig); + assert_eq!(e.cause().unwrap().description(), desc); + } + + macro_rules! from { + ($from:expr => $error:pat) => { + match Error::from($from) { + e @ $error => { + assert!(e.description().len() > 5); + } , + _ => panic!("{:?}", $from) + } + } + } + + macro_rules! from_and_cause { + ($from:expr => $error:pat) => { + match Error::from($from) { + e @ $error => { + let desc = e.cause().unwrap().description(); + assert_eq!(desc, $from.description().to_owned()); + assert_eq!(desc, e.description()); + }, + _ => panic!("{:?}", $from) + } + } + } + + #[test] + fn test_from() { + + from_and_cause!(io::Error::new(io::ErrorKind::Other, "other") => Io(..)); + from_and_cause!(url::ParseError::EmptyHost => Uri(..)); + + from!(httparse::Error::HeaderName => Header); + from!(httparse::Error::HeaderName => Header); + from!(httparse::Error::HeaderValue => Header); + from!(httparse::Error::NewLine => Header); + from!(httparse::Error::Status => Status); + from!(httparse::Error::Token => Header); + from!(httparse::Error::TooManyHeaders => TooLarge); + from!(httparse::Error::Version => Version); + } + + #[cfg(feature = "openssl")] + #[test] + fn test_from_ssl() { + use openssl::ssl::error::SslError; + + from!(SslError::StreamError( + io::Error::new(io::ErrorKind::Other, "ssl negotiation")) => Io(..)); + from_and_cause!(SslError::SslSessionClosed => Ssl(..)); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/accept.rs b/vendor/hyper-0.10.16/src/header/common/accept.rs new file mode 100644 index 0000000..f58a143 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/accept.rs @@ -0,0 +1,143 @@ +use mime::Mime; + +use header::{QualityItem, qitem}; + +header! { + /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) + /// + /// The `Accept` header field can be used by user agents to specify + /// response media types that are acceptable. Accept header fields can + /// be used to indicate that the request is specifically limited to a + /// small set of desired types, as in the case of a request for an + /// in-line image + /// + /// # ABNF + /// ```plain + /// Accept = #( media-range [ accept-params ] ) + /// + /// media-range = ( "*/*" + /// / ( type "/" "*" ) + /// / ( type "/" subtype ) + /// ) *( OWS ";" OWS parameter ) + /// accept-params = weight *( accept-ext ) + /// accept-ext = OWS ";" OWS token [ "=" ( token / quoted-string ) ] + /// ``` + /// + /// # Example values + /// * `audio/*; q=0.2, audio/basic` (`*` value won't parse correctly) + /// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, Accept, qitem}; + /// use hyper::mime::{Mime, TopLevel, SubLevel}; + /// + /// let mut headers = Headers::new(); + /// + /// headers.set( + /// Accept(vec![ + /// qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), + /// ]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, Accept, qitem}; + /// use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// Accept(vec![ + /// qitem(Mime(TopLevel::Application, SubLevel::Json, + /// vec![(Attr::Charset, Value::Utf8)])), + /// ]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, Accept, QualityItem, Quality, qitem}; + /// use hyper::mime::{Mime, TopLevel, SubLevel}; + /// + /// let mut headers = Headers::new(); + /// + /// headers.set( + /// Accept(vec![ + /// qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), + /// qitem(Mime(TopLevel::Application, + /// SubLevel::Ext("xhtml+xml".to_owned()), vec![])), + /// QualityItem::new(Mime(TopLevel::Application, SubLevel::Xml, vec![]), + /// Quality(900)), + /// qitem(Mime(TopLevel::Image, + /// SubLevel::Ext("webp".to_owned()), vec![])), + /// QualityItem::new(Mime(TopLevel::Star, SubLevel::Star, vec![]), + /// Quality(800)) + /// ]) + /// ); + /// ``` + /// + /// # Notes + /// * Using always Mime types to represent `media-range` differs from the ABNF. + /// * **FIXME**: `accept-ext` is not supported. + (Accept, "Accept") => (QualityItem)+ + + test_accept { + // Tests from the RFC + // FIXME: Test fails, first value containing a "*" fails to parse + // test_header!( + // test1, + // vec![b"audio/*; q=0.2, audio/basic"], + // Some(HeaderField(vec![ + // QualityItem::new(Mime(TopLevel::Audio, SubLevel::Star, vec![]), Quality(200)), + // qitem(Mime(TopLevel::Audio, SubLevel::Ext("basic".to_owned()), vec![])), + // ]))); + test_header!( + test2, + vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"], + Some(HeaderField(vec![ + QualityItem::new(Mime(TopLevel::Text, SubLevel::Plain, vec![]), Quality(500)), + qitem(Mime(TopLevel::Text, SubLevel::Html, vec![])), + QualityItem::new( + Mime(TopLevel::Text, SubLevel::Ext("x-dvi".to_owned()), vec![]), + Quality(800)), + qitem(Mime(TopLevel::Text, SubLevel::Ext("x-c".to_owned()), vec![])), + ]))); + // Custom tests + test_header!( + test3, + vec![b"text/plain; charset=utf-8"], + Some(Accept(vec![ + qitem(Mime(TopLevel::Text, SubLevel::Plain, vec![(Attr::Charset, Value::Utf8)])), + ]))); + test_header!( + test4, + vec![b"text/plain; charset=utf-8; q=0.5"], + Some(Accept(vec![ + QualityItem::new(Mime(TopLevel::Text, + SubLevel::Plain, vec![(Attr::Charset, Value::Utf8)]), + Quality(500)), + ]))); + } +} + +impl Accept { + /// A constructor to easily create `Accept: */*`. + pub fn star() -> Accept { + Accept(vec![qitem(mime!(Star/Star))]) + } + + /// A constructor to easily create `Accept: application/json`. + pub fn json() -> Accept { + Accept(vec![qitem(mime!(Application/Json))]) + } + + /// A constructor to easily create `Accept: text/*`. + pub fn text() -> Accept { + Accept(vec![qitem(mime!(Text/Star))]) + } + + /// A constructor to easily create `Accept: image/*`. + pub fn image() -> Accept { + Accept(vec![qitem(mime!(Image/Star))]) + } +} + + +bench_header!(bench, Accept, { vec![b"text/plain; q=0.5, text/html".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/accept_charset.rs b/vendor/hyper-0.10.16/src/header/common/accept_charset.rs new file mode 100644 index 0000000..7e4d124 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/accept_charset.rs @@ -0,0 +1,56 @@ +use header::{Charset, QualityItem}; + +header! { + /// `Accept-Charset` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.3) + /// + /// The `Accept-Charset` header field can be sent by a user agent to + /// indicate what charsets are acceptable in textual response content. + /// This field allows user agents capable of understanding more + /// comprehensive or special-purpose charsets to signal that capability + /// to an origin server that is capable of representing information in + /// those charsets. + /// + /// # ABNF + /// ```plain + /// Accept-Charset = 1#( ( charset / "*" ) [ weight ] ) + /// ``` + /// + /// # Example values + /// * `iso-8859-5, unicode-1-1;q=0.8` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AcceptCharset, Charset, qitem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptCharset(vec![qitem(Charset::Us_Ascii)]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptCharset, Charset, Quality, QualityItem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptCharset(vec![ + /// QualityItem::new(Charset::Us_Ascii, Quality(900)), + /// QualityItem::new(Charset::Iso_8859_10, Quality(200)), + /// ]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptCharset, Charset, qitem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))]) + /// ); + /// ``` + (AcceptCharset, "Accept-Charset") => (QualityItem)+ + + test_accept_charset { + /// Testcase from RFC + test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/accept_encoding.rs b/vendor/hyper-0.10.16/src/header/common/accept_encoding.rs new file mode 100644 index 0000000..cbc0de6 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/accept_encoding.rs @@ -0,0 +1,71 @@ +use header::{Encoding, QualityItem}; + +header! { + /// `Accept-Encoding` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.4) + /// + /// The `Accept-Encoding` header field can be used by user agents to + /// indicate what response content-codings are + /// acceptable in the response. An `identity` token is used as a synonym + /// for "no encoding" in order to communicate when no encoding is + /// preferred. + /// + /// # ABNF + /// ```plain + /// Accept-Encoding = #( codings [ weight ] ) + /// codings = content-coding / "identity" / "*" + /// ``` + /// + /// # Example values + /// * `compress, gzip` + /// * `` + /// * `*` + /// * `compress;q=0.5, gzip;q=1` + /// * `gzip;q=1.0, identity; q=0.5, *;q=0` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AcceptEncoding, Encoding, qitem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptEncoding(vec![qitem(Encoding::Chunked)]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptEncoding, Encoding, qitem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptEncoding(vec![ + /// qitem(Encoding::Chunked), + /// qitem(Encoding::Gzip), + /// qitem(Encoding::Deflate), + /// ]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptEncoding, Encoding, QualityItem, Quality, qitem}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptEncoding(vec![ + /// qitem(Encoding::Chunked), + /// QualityItem::new(Encoding::Gzip, Quality(600)), + /// QualityItem::new(Encoding::EncodingExt("*".to_owned()), Quality(0)), + /// ]) + /// ); + /// ``` + (AcceptEncoding, "Accept-Encoding") => (QualityItem)* + + test_accept_encoding { + // From the RFC + test_header!(test1, vec![b"compress, gzip"]); + test_header!(test2, vec![b""], Some(AcceptEncoding(vec![]))); + test_header!(test3, vec![b"*"]); + // Note: Removed quality 1 from gzip + test_header!(test4, vec![b"compress;q=0.5, gzip"]); + // Note: Removed quality 1 from gzip + test_header!(test5, vec![b"gzip, identity; q=0.5, *;q=0"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/accept_ranges.rs b/vendor/hyper-0.10.16/src/header/common/accept_ranges.rs new file mode 100644 index 0000000..6d38c7c --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/accept_ranges.rs @@ -0,0 +1,96 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +header! { + /// `Accept-Ranges` header, defined in + /// [RFC7233](http://tools.ietf.org/html/rfc7233#section-2.3) + /// + /// The `Accept-Ranges` header field allows a server to indicate that it + /// supports range requests for the target resource. + /// + /// # ABNF + /// ```plain + /// Accept-Ranges = acceptable-ranges + /// acceptable-ranges = 1#range-unit / \"none\" + /// + /// # Example values + /// * `bytes` + /// * `none` + /// * `unknown-unit` + /// ``` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; + /// + /// let mut headers = Headers::new(); + /// headers.set(AcceptRanges(vec![RangeUnit::Bytes])); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; + /// + /// let mut headers = Headers::new(); + /// headers.set(AcceptRanges(vec![RangeUnit::None])); + /// ``` + /// ``` + /// use hyper::header::{Headers, AcceptRanges, RangeUnit}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AcceptRanges(vec![ + /// RangeUnit::Unregistered("nibbles".to_owned()), + /// RangeUnit::Bytes, + /// RangeUnit::Unregistered("doublets".to_owned()), + /// RangeUnit::Unregistered("quadlets".to_owned()), + /// ]) + /// ); + /// ``` + (AcceptRanges, "Accept-Ranges") => [RangeUnit] + + test_acccept_ranges { + test_header!(test1, vec![b"bytes"]); + test_header!(test2, vec![b"none"]); + } +} + +/// Range Units, described in [RFC7233](http://tools.ietf.org/html/rfc7233#section-2) +/// +/// A representation can be partitioned into subranges according to +/// various structural units, depending on the structure inherent in the +/// representation's media type. +/// +/// # ABNF +/// ```plain +/// range-unit = bytes-unit / other-range-unit +/// bytes-unit = "bytes" +/// other-range-unit = token +/// ``` +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum RangeUnit { + /// Indicating byte-range requests are supported. + Bytes, + /// Reserved as keyword, indicating no ranges are supported. + None, +} + + +impl FromStr for RangeUnit { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + match s { + "bytes" => Ok(RangeUnit::Bytes), + "none" => Ok(RangeUnit::None), + // FIXME: Check if s is really a Token + _ => Err(::Error::Method), + } + } +} + +impl Display for RangeUnit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + RangeUnit::Bytes => f.write_str("bytes"), + RangeUnit::None => f.write_str("none"), + } + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_allow_credentials.rs b/vendor/hyper-0.10.16/src/header/common/access_control_allow_credentials.rs new file mode 100644 index 0000000..2b7b557 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_allow_credentials.rs @@ -0,0 +1,89 @@ +use std::fmt::{self, Display}; +use std::str; +use unicase::UniCase; +use header::{Header, HeaderFormat}; + +/// `Access-Control-Allow-Credentials` header, part of +/// [CORS](http://www.w3.org/TR/cors/#access-control-allow-headers-response-header) +/// +/// > The Access-Control-Allow-Credentials HTTP response header indicates whether the +/// > response to request can be exposed when the credentials flag is true. When part +/// > of the response to an preflight request it indicates that the actual request can +/// > be made with credentials. The Access-Control-Allow-Credentials HTTP header must +/// > match the following ABNF: +/// +/// # ABNF +/// ```plain +/// Access-Control-Allow-Credentials: "Access-Control-Allow-Credentials" ":" "true" +/// ``` +/// +/// Since there is only one acceptable field value, the header struct does not accept +/// any values at all. Setting an empty `AccessControlAllowCredentials` header is +/// sufficient. See the examples below. +/// +/// # Example values +/// * "true" +/// +/// # Examples +/// ``` +/// # extern crate hyper; +/// # fn main() { +/// +/// use hyper::header::{Headers, AccessControlAllowCredentials}; +/// +/// let mut headers = Headers::new(); +/// headers.set(AccessControlAllowCredentials); +/// # } +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct AccessControlAllowCredentials; + +const ACCESS_CONTROL_ALLOW_CREDENTIALS_TRUE: UniCase<&'static str> = UniCase("true"); + +impl Header for AccessControlAllowCredentials { + fn header_name() -> &'static str { + "Access-Control-Allow-Credentials" + } + + fn parse_header>(raw: &[T]) -> ::Result { + if raw.len() == 1 { + let text = unsafe { + // safe because: + // 1. we just checked raw.len == 1 + // 2. we don't actually care if it's utf8, we just want to + // compare the bytes with the "case" normalized. If it's not + // utf8, then the byte comparison will fail, and we'll return + // None. No big deal. + str::from_utf8_unchecked(raw.get_unchecked(0).as_ref()) + }; + if UniCase(text) == ACCESS_CONTROL_ALLOW_CREDENTIALS_TRUE { + return Ok(AccessControlAllowCredentials); + } + } + Err(::Error::Header) + } +} + +impl HeaderFormat for AccessControlAllowCredentials { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("true") + } +} + +impl Display for AccessControlAllowCredentials { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + self.fmt_header(f) + } +} + +#[cfg(test)] +mod test_access_control_allow_credentials { + use std::str; + use header::*; + use super::AccessControlAllowCredentials as HeaderField; + test_header!(works, vec![b"true"], Some(HeaderField)); + test_header!(ignores_case, vec![b"True"]); + test_header!(not_bool, vec![b"false"], None); + test_header!(only_single, vec![b"true", b"true"], None); + test_header!(no_gibberish, vec!["\u{645}\u{631}\u{62d}\u{628}\u{627}".as_bytes()], None); +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_allow_headers.rs b/vendor/hyper-0.10.16/src/header/common/access_control_allow_headers.rs new file mode 100644 index 0000000..e753bde --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_allow_headers.rs @@ -0,0 +1,58 @@ +use unicase::UniCase; + +header! { + /// `Access-Control-Allow-Headers` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-allow-headers-response-header) + /// + /// The `Access-Control-Allow-Headers` header indicates, as part of the + /// response to a preflight request, which header field names can be used + /// during the actual request. + /// + /// # ABNF + /// ```plain + /// Access-Control-Allow-Headers: "Access-Control-Allow-Headers" ":" #field-name + /// ``` + /// + /// # Example values + /// * `accept-language, date` + /// + /// # Examples + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlAllowHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlAllowHeaders(vec![UniCase("date".to_owned())]) + /// ); + /// # } + /// ``` + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlAllowHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlAllowHeaders(vec![ + /// UniCase("accept-language".to_owned()), + /// UniCase("date".to_owned()), + /// ]) + /// ); + /// # } + /// ``` + (AccessControlAllowHeaders, "Access-Control-Allow-Headers") => (UniCase)* + + test_access_control_allow_headers { + test_header!(test1, vec![b"accept-language, date"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_allow_methods.rs b/vendor/hyper-0.10.16/src/header/common/access_control_allow_methods.rs new file mode 100644 index 0000000..7917e19 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_allow_methods.rs @@ -0,0 +1,48 @@ +use method::Method; + +header! { + /// `Access-Control-Allow-Methods` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-allow-methods-response-header) + /// + /// The `Access-Control-Allow-Methods` header indicates, as part of the + /// response to a preflight request, which methods can be used during the + /// actual request. + /// + /// # ABNF + /// ```plain + /// Access-Control-Allow-Methods: "Access-Control-Allow-Methods" ":" #Method + /// ``` + /// + /// # Example values + /// * `PUT, DELETE, XMODIFY` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AccessControlAllowMethods}; + /// use hyper::method::Method; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlAllowMethods(vec![Method::Get]) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, AccessControlAllowMethods}; + /// use hyper::method::Method; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlAllowMethods(vec![ + /// Method::Get, + /// Method::Post, + /// Method::Patch, + /// Method::Extension("COPY".to_owned()), + /// ]) + /// ); + /// ``` + (AccessControlAllowMethods, "Access-Control-Allow-Methods") => (Method)* + + test_access_control_allow_methods { + test_header!(test1, vec![b"PUT, DELETE, XMODIFY"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_allow_origin.rs b/vendor/hyper-0.10.16/src/header/common/access_control_allow_origin.rs new file mode 100644 index 0000000..9a2777c --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_allow_origin.rs @@ -0,0 +1,98 @@ +use std::fmt::{self, Display}; + +use header::{Header, HeaderFormat}; + +/// The `Access-Control-Allow-Origin` response header, +/// part of [CORS](http://www.w3.org/TR/cors/#access-control-allow-origin-response-header) +/// +/// The `Access-Control-Allow-Origin` header indicates whether a resource +/// can be shared based by returning the value of the Origin request header, +/// "*", or "null" in the response. +/// +/// # ABNF +/// ```plain +/// Access-Control-Allow-Origin = "Access-Control-Allow-Origin" ":" origin-list-or-null | "*" +/// ``` +/// +/// # Example values +/// * `null` +/// * `*` +/// * `http://google.com/` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, AccessControlAllowOrigin}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// AccessControlAllowOrigin::Any +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, AccessControlAllowOrigin}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// AccessControlAllowOrigin::Null, +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, AccessControlAllowOrigin}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// AccessControlAllowOrigin::Value("http://hyper.rs".to_owned()) +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub enum AccessControlAllowOrigin { + /// Allow all origins + Any, + /// A hidden origin + Null, + /// Allow one particular origin + Value(String), +} + +impl Header for AccessControlAllowOrigin { + fn header_name() -> &'static str { + "Access-Control-Allow-Origin" + } + + fn parse_header>(raw: &[T]) -> ::Result { + if raw.len() != 1 { + return Err(::Error::Header) + } + let value = unsafe { raw.get_unchecked(0) }.as_ref(); + Ok(match value { + b"*" => AccessControlAllowOrigin::Any, + b"null" => AccessControlAllowOrigin::Null, + _ => AccessControlAllowOrigin::Value(try!(String::from_utf8(value.to_owned()))) + }) + } +} + +impl HeaderFormat for AccessControlAllowOrigin { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + AccessControlAllowOrigin::Any => f.write_str("*"), + AccessControlAllowOrigin::Null => f.write_str("null"), + AccessControlAllowOrigin::Value(ref url) => Display::fmt(url, f), + } + } +} + +impl Display for AccessControlAllowOrigin { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + self.fmt_header(f) + } +} + +#[cfg(test)] +mod test_access_control_allow_orgin { + use header::*; + use super::AccessControlAllowOrigin as HeaderField; + test_header!(test1, vec![b"null"]); + test_header!(test2, vec![b"*"]); + test_header!(test3, vec![b"http://google.com/"]); +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_expose_headers.rs b/vendor/hyper-0.10.16/src/header/common/access_control_expose_headers.rs new file mode 100644 index 0000000..ac18744 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_expose_headers.rs @@ -0,0 +1,60 @@ +use unicase::UniCase; + +header! { + /// `Access-Control-Expose-Headers` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-expose-headers-response-header) + /// + /// The Access-Control-Expose-Headers header indicates which headers are safe to expose to the + /// API of a CORS API specification. + /// + /// # ABNF + /// ```plain + /// Access-Control-Expose-Headers = "Access-Control-Expose-Headers" ":" #field-name + /// ``` + /// + /// # Example values + /// * `ETag, Content-Length` + /// + /// # Examples + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlExposeHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlExposeHeaders(vec![ + /// UniCase("etag".to_owned()), + /// UniCase("content-length".to_owned()) + /// ]) + /// ); + /// # } + /// ``` + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlExposeHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlExposeHeaders(vec![ + /// UniCase("etag".to_owned()), + /// UniCase("content-length".to_owned()) + /// ]) + /// ); + /// # } + /// ``` + (AccessControlExposeHeaders, "Access-Control-Expose-Headers") => (UniCase)* + + test_access_control_expose_headers { + test_header!(test1, vec![b"etag, content-length"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_max_age.rs b/vendor/hyper-0.10.16/src/header/common/access_control_max_age.rs new file mode 100644 index 0000000..d487dc7 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_max_age.rs @@ -0,0 +1,28 @@ +header! { + /// `Access-Control-Max-Age` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-max-age-response-header) + /// + /// The `Access-Control-Max-Age` header indicates how long the results of a + /// preflight request can be cached in a preflight result cache. + /// + /// # ABNF + /// ```plain + /// Access-Control-Max-Age = \"Access-Control-Max-Age\" \":\" delta-seconds + /// ``` + /// + /// # Example values + /// * `531` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AccessControlMaxAge}; + /// + /// let mut headers = Headers::new(); + /// headers.set(AccessControlMaxAge(1728000u32)); + /// ``` + (AccessControlMaxAge, "Access-Control-Max-Age") => [u32] + + test_access_control_max_age { + test_header!(test1, vec![b"531"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_request_headers.rs b/vendor/hyper-0.10.16/src/header/common/access_control_request_headers.rs new file mode 100644 index 0000000..b08cb33 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_request_headers.rs @@ -0,0 +1,58 @@ +use unicase::UniCase; + +header! { + /// `Access-Control-Request-Headers` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-request-headers-request-header) + /// + /// The `Access-Control-Request-Headers` header indicates which headers will + /// be used in the actual request as part of the preflight request. + /// during the actual request. + /// + /// # ABNF + /// ```plain + /// Access-Control-Allow-Headers: "Access-Control-Allow-Headers" ":" #field-name + /// ``` + /// + /// # Example values + /// * `accept-language, date` + /// + /// # Examples + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlRequestHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlRequestHeaders(vec![UniCase("date".to_owned())]) + /// ); + /// # } + /// ``` + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, AccessControlRequestHeaders}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// AccessControlRequestHeaders(vec![ + /// UniCase("accept-language".to_owned()), + /// UniCase("date".to_owned()), + /// ]) + /// ); + /// # } + /// ``` + (AccessControlRequestHeaders, "Access-Control-Request-Headers") => (UniCase)* + + test_access_control_request_headers { + test_header!(test1, vec![b"accept-language, date"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/access_control_request_method.rs b/vendor/hyper-0.10.16/src/header/common/access_control_request_method.rs new file mode 100644 index 0000000..afeb4e2 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/access_control_request_method.rs @@ -0,0 +1,30 @@ +use method::Method; + +header! { + /// `Access-Control-Request-Method` header, part of + /// [CORS](http://www.w3.org/TR/cors/#access-control-request-method-request-header) + /// + /// The `Access-Control-Request-Method` header indicates which method will be + /// used in the actual request as part of the preflight request. + /// # ABNF + /// ```plain + /// Access-Control-Request-Method: \"Access-Control-Request-Method\" \":\" Method + /// ``` + /// + /// # Example values + /// * `GET` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, AccessControlRequestMethod}; + /// use hyper::method::Method; + /// + /// let mut headers = Headers::new(); + /// headers.set(AccessControlRequestMethod(Method::Get)); + /// ``` + (AccessControlRequestMethod, "Access-Control-Request-Method") => [Method] + + test_access_control_request_method { + test_header!(test1, vec![b"GET"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/allow.rs b/vendor/hyper-0.10.16/src/header/common/allow.rs new file mode 100644 index 0000000..794b3fc --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/allow.rs @@ -0,0 +1,68 @@ +use method::Method; + +/// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1) +/// +/// The `Allow` header field lists the set of methods advertised as +/// supported by the target resource. The purpose of this field is +/// strictly to inform the recipient of valid request methods associated +/// with the resource. +/// +/// # ABNF +/// ```plain +/// Allow = #method +/// ``` +/// +/// # Example values +/// * `GET, HEAD, PUT` +/// * `OPTIONS, GET, PUT, POST, DELETE, HEAD, TRACE, CONNECT, PATCH, fOObAr` +/// * `` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Allow}; +/// use hyper::method::Method; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Allow(vec![Method::Get]) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, Allow}; +/// use hyper::method::Method; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Allow(vec![ +/// Method::Get, +/// Method::Post, +/// Method::Patch, +/// Method::Extension("TEST".to_owned()), +/// ].into()) +/// ); +/// ``` +#[derive(Clone, Debug, PartialEq)] +pub struct Allow(pub std::borrow::Cow<'static, [Method]>); +__hyper__deref!(Allow => std::borrow::Cow<'static, [Method]>); +impl ::header::Header for Allow { + fn header_name() -> &'static str { + "Allow" + } + fn parse_header>(raw: &[T]) -> ::Result { + ::header::parsing::from_comma_delimited(raw).map(std::borrow::Cow::Owned).map(Allow) + } +} +impl ::header::HeaderFormat for Allow { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::header::parsing::fmt_comma_delimited(f, &self.0[..]) + } +} +impl ::std::fmt::Display for Allow { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + use ::header::HeaderFormat; + self.fmt_header(f) + } +} + +bench_header!(bench, +Allow, { vec![b"OPTIONS,GET,PUT,POST,DELETE,HEAD,TRACE,CONNECT,PATCH,fOObAr".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/authorization.rs b/vendor/hyper-0.10.16/src/header/common/authorization.rs new file mode 100644 index 0000000..78bc373 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/authorization.rs @@ -0,0 +1,289 @@ +use std::any::Any; +use std::fmt::{self, Display}; +use std::str::{FromStr, from_utf8}; +use std::ops::{Deref, DerefMut}; +use base64::{encode, decode}; +use header::{Header, HeaderFormat}; + +/// `Authorization` header, defined in [RFC7235](https://tools.ietf.org/html/rfc7235#section-4.2) +/// +/// The `Authorization` header field allows a user agent to authenticate +/// itself with an origin server -- usually, but not necessarily, after +/// receiving a 401 (Unauthorized) response. Its value consists of +/// credentials containing the authentication information of the user +/// agent for the realm of the resource being requested. +/// +/// # ABNF +/// ```plain +/// Authorization = credentials +/// ``` +/// +/// # Example values +/// * `Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==` +/// * `Bearer fpKL54jvWmEGVoRdCNjG` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Authorization}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Authorization("let me in".to_owned())); +/// ``` +/// ``` +/// use hyper::header::{Headers, Authorization, Basic}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Authorization( +/// Basic { +/// username: "Aladdin".to_owned(), +/// password: Some("open sesame".to_owned()) +/// } +/// ) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, Authorization, Bearer}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Authorization( +/// Bearer { +/// token: "QWxhZGRpbjpvcGVuIHNlc2FtZQ".to_owned() +/// } +/// ) +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct Authorization(pub S); + +impl Deref for Authorization { + type Target = S; + + fn deref(&self) -> &S { + &self.0 + } +} + +impl DerefMut for Authorization { + fn deref_mut(&mut self) -> &mut S { + &mut self.0 + } +} + +impl Header for Authorization where ::Err: 'static { + fn header_name() -> &'static str { + "Authorization" + } + + fn parse_header>(raw: &[T]) -> ::Result> { + if raw.len() != 1 { + return Err(::Error::Header); + } + let header = try!(from_utf8(unsafe { raw.get_unchecked(0) }.as_ref())); + if let Some(scheme) = ::scheme() { + if header.starts_with(scheme) && header.len() > scheme.len() + 1 { + match header[scheme.len() + 1..].parse::().map(Authorization) { + Ok(h) => Ok(h), + Err(_) => Err(::Error::Header) + } + } else { + Err(::Error::Header) + } + } else { + match header.parse::().map(Authorization) { + Ok(h) => Ok(h), + Err(_) => Err(::Error::Header) + } + } + } +} + +impl HeaderFormat for Authorization where ::Err: 'static { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(scheme) = ::scheme() { + try!(write!(f, "{} ", scheme)) + }; + self.0.fmt_scheme(f) + } +} + +/// An Authorization scheme to be used in the header. +pub trait Scheme: FromStr + fmt::Debug + Clone + Send + Sync { + /// An optional Scheme name. + /// + /// Will be replaced with an associated constant once available. + fn scheme() -> Option<&'static str>; + /// Format the Scheme data into a header value. + fn fmt_scheme(&self, &mut fmt::Formatter) -> fmt::Result; +} + +impl Scheme for String { + fn scheme() -> Option<&'static str> { + None + } + + fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(self, f) + } +} + +/// Credential holder for Basic Authentication +#[derive(Clone, PartialEq, Debug)] +pub struct Basic { + /// The username as a possibly empty string + pub username: String, + /// The password. `None` if the `:` delimiter character was not + /// part of the parsed input. + pub password: Option +} + +impl Scheme for Basic { + fn scheme() -> Option<&'static str> { + Some("Basic") + } + + fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { + //FIXME: serialize::base64 could use some Debug implementation, so + //that we don't have to allocate a new string here just to write it + //to the formatter. + let mut text = self.username.clone(); + text.push(':'); + if let Some(ref pass) = self.password { + text.push_str(&pass[..]); + } + + f.write_str(&encode(text.as_bytes())) + } +} + +impl FromStr for Basic { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + match decode(s) { + Ok(decoded) => match String::from_utf8(decoded) { + Ok(text) => { + let parts = &mut text.split(':'); + let user = match parts.next() { + Some(part) => part.to_owned(), + None => return Err(::Error::Header) + }; + let password = match parts.next() { + Some(part) => Some(part.to_owned()), + None => None + }; + Ok(Basic { + username: user, + password: password + }) + }, + Err(e) => { + debug!("Basic::from_utf8 error={:?}", e); + Err(::Error::Header) + } + }, + Err(e) => { + debug!("Basic::from_base64 error={:?}", e); + Err(::Error::Header) + } + } + } +} + +#[derive(Clone, PartialEq, Debug)] +///Token holder for Bearer Authentication, most often seen with oauth +pub struct Bearer { + ///Actual bearer token as a string + pub token: String +} + +impl Scheme for Bearer { + fn scheme() -> Option<&'static str> { + Some("Bearer") + } + + fn fmt_scheme(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.token) + } +} + +impl FromStr for Bearer { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + Ok(Bearer { token: s.to_owned()}) + } +} + +#[cfg(test)] +mod tests { + use super::{Authorization, Basic, Bearer}; + use super::super::super::{Headers, Header}; + + #[test] + fn test_raw_auth() { + let mut headers = Headers::new(); + headers.set(Authorization("foo bar baz".to_owned())); + assert_eq!(headers.to_string(), "Authorization: foo bar baz\r\n".to_owned()); + } + + #[test] + fn test_raw_auth_parse() { + let header: Authorization = Header::parse_header( + &[b"foo bar baz".to_vec()]).unwrap(); + assert_eq!(header.0, "foo bar baz"); + } + + #[test] + fn test_basic_auth() { + let mut headers = Headers::new(); + headers.set(Authorization( + Basic { username: "Aladdin".to_owned(), password: Some("open sesame".to_owned()) })); + assert_eq!( + headers.to_string(), + "Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\r\n".to_owned()); + } + + #[test] + fn test_basic_auth_no_password() { + let mut headers = Headers::new(); + headers.set(Authorization(Basic { username: "Aladdin".to_owned(), password: None })); + assert_eq!(headers.to_string(), "Authorization: Basic QWxhZGRpbjo=\r\n".to_owned()); + } + + #[test] + fn test_basic_auth_parse() { + let auth: Authorization = Header::parse_header( + &[b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==".to_vec()]).unwrap(); + assert_eq!(auth.0.username, "Aladdin"); + assert_eq!(auth.0.password, Some("open sesame".to_owned())); + } + + #[test] + fn test_basic_auth_parse_no_password() { + let auth: Authorization = Header::parse_header( + &[b"Basic QWxhZGRpbjo=".to_vec()]).unwrap(); + assert_eq!(auth.0.username, "Aladdin"); + assert_eq!(auth.0.password, Some("".to_owned())); + } + + #[test] + fn test_bearer_auth() { + let mut headers = Headers::new(); + headers.set(Authorization( + Bearer { token: "fpKL54jvWmEGVoRdCNjG".to_owned() })); + assert_eq!( + headers.to_string(), + "Authorization: Bearer fpKL54jvWmEGVoRdCNjG\r\n".to_owned()); + } + + #[test] + fn test_bearer_auth_parse() { + let auth: Authorization = Header::parse_header( + &[b"Bearer fpKL54jvWmEGVoRdCNjG".to_vec()]).unwrap(); + assert_eq!(auth.0.token, "fpKL54jvWmEGVoRdCNjG"); + } +} + +bench_header!(raw, Authorization, { vec![b"foo bar baz".to_vec()] }); +bench_header!(basic, Authorization, { vec![b"Basic QWxhZGRpbjpuIHNlc2FtZQ==".to_vec()] }); +bench_header!(bearer, Authorization, { vec![b"Bearer fpKL54jvWmEGVoRdCNjG".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/cache_control.rs b/vendor/hyper-0.10.16/src/header/common/cache_control.rs new file mode 100644 index 0000000..49132f8 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/cache_control.rs @@ -0,0 +1,211 @@ +use std::fmt; +use std::str::FromStr; +use header::{Header, HeaderFormat}; +use header::parsing::{from_comma_delimited, fmt_comma_delimited}; + +/// `Cache-Control` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.2) +/// +/// The `Cache-Control` header field is used to specify directives for +/// caches along the request/response chain. Such cache directives are +/// unidirectional in that the presence of a directive in a request does +/// not imply that the same directive is to be given in the response. +/// +/// # ABNF +/// ```plain +/// Cache-Control = 1#cache-directive +/// cache-directive = token [ "=" ( token / quoted-string ) ] +/// ``` +/// +/// # Example values +/// * `no-cache` +/// * `private, community="UCI"` +/// * `max-age=30` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, CacheControl, CacheDirective}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// CacheControl(vec![CacheDirective::MaxAge(86400u32)]) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, CacheControl, CacheDirective}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// CacheControl(vec![ +/// CacheDirective::NoCache, +/// CacheDirective::Private, +/// CacheDirective::MaxAge(360u32), +/// CacheDirective::Extension("foo".to_owned(), +/// Some("bar".to_owned())), +/// ]) +/// ); +/// ``` +#[derive(PartialEq, Clone, Debug)] +pub struct CacheControl(pub Vec); + +__hyper__deref!(CacheControl => Vec); + +impl Header for CacheControl { + fn header_name() -> &'static str { + "Cache-Control" + } + + fn parse_header>(raw: &[T]) -> ::Result { + let directives = try!(from_comma_delimited(raw)); + if !directives.is_empty() { + Ok(CacheControl(directives)) + } else { + Err(::Error::Header) + } + } +} + +impl HeaderFormat for CacheControl { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for CacheControl { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt_comma_delimited(f, &self[..]) + } +} + +/// `CacheControl` contains a list of these directives. +#[derive(PartialEq, Clone, Debug)] +pub enum CacheDirective { + /// "no-cache" + NoCache, + /// "no-store" + NoStore, + /// "no-transform" + NoTransform, + /// "only-if-cached" + OnlyIfCached, + + // request directives + /// "max-age=delta" + MaxAge(u32), + /// "max-stale=delta" + MaxStale(u32), + /// "min-fresh=delta" + MinFresh(u32), + + // response directives + /// "must-revalidate" + MustRevalidate, + /// "public" + Public, + /// "private" + Private, + /// "proxy-revalidate" + ProxyRevalidate, + /// "s-maxage=delta" + SMaxAge(u32), + + /// Extension directives. Optionally include an argument. + Extension(String, Option) +} + +impl fmt::Display for CacheDirective { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::CacheDirective::*; + fmt::Display::fmt(match *self { + NoCache => "no-cache", + NoStore => "no-store", + NoTransform => "no-transform", + OnlyIfCached => "only-if-cached", + + MaxAge(secs) => return write!(f, "max-age={}", secs), + MaxStale(secs) => return write!(f, "max-stale={}", secs), + MinFresh(secs) => return write!(f, "min-fresh={}", secs), + + MustRevalidate => "must-revalidate", + Public => "public", + Private => "private", + ProxyRevalidate => "proxy-revalidate", + SMaxAge(secs) => return write!(f, "s-maxage={}", secs), + + Extension(ref name, None) => &name[..], + Extension(ref name, Some(ref arg)) => return write!(f, "{}={}", name, arg), + + }, f) + } +} + +impl FromStr for CacheDirective { + type Err = Option<::Err>; + fn from_str(s: &str) -> Result::Err>> { + use self::CacheDirective::*; + match s { + "no-cache" => Ok(NoCache), + "no-store" => Ok(NoStore), + "no-transform" => Ok(NoTransform), + "only-if-cached" => Ok(OnlyIfCached), + "must-revalidate" => Ok(MustRevalidate), + "public" => Ok(Public), + "private" => Ok(Private), + "proxy-revalidate" => Ok(ProxyRevalidate), + "" => Err(None), + _ => match s.find('=') { + Some(idx) if idx+1 < s.len() => match (&s[..idx], (&s[idx+1..]).trim_matches('"')) { + ("max-age" , secs) => secs.parse().map(MaxAge).map_err(Some), + ("max-stale", secs) => secs.parse().map(MaxStale).map_err(Some), + ("min-fresh", secs) => secs.parse().map(MinFresh).map_err(Some), + ("s-maxage", secs) => secs.parse().map(SMaxAge).map_err(Some), + (left, right) => Ok(Extension(left.to_owned(), Some(right.to_owned()))) + }, + Some(_) => Err(None), + None => Ok(Extension(s.to_owned(), None)) + } + } + } +} + +#[cfg(test)] +mod tests { + use header::Header; + use super::*; + + #[test] + fn test_parse_multiple_headers() { + let cache = Header::parse_header(&[b"no-cache".to_vec(), b"private".to_vec()]); + assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::NoCache, + CacheDirective::Private]))) + } + + #[test] + fn test_parse_argument() { + let cache = Header::parse_header(&[b"max-age=100, private".to_vec()]); + assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(100), + CacheDirective::Private]))) + } + + #[test] + fn test_parse_quote_form() { + let cache = Header::parse_header(&[b"max-age=\"200\"".to_vec()]); + assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(200)]))) + } + + #[test] + fn test_parse_extension() { + let cache = Header::parse_header(&[b"foo, bar=baz".to_vec()]); + assert_eq!(cache.ok(), Some(CacheControl(vec![ + CacheDirective::Extension("foo".to_owned(), None), + CacheDirective::Extension("bar".to_owned(), Some("baz".to_owned()))]))) + } + + #[test] + fn test_parse_bad_syntax() { + let cache: ::Result = Header::parse_header(&[b"foo=".to_vec()]); + assert_eq!(cache.ok(), None) + } +} + +bench_header!(normal, + CacheControl, { vec![b"no-cache, private".to_vec(), b"max-age=100".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/connection.rs b/vendor/hyper-0.10.16/src/header/common/connection.rs new file mode 100644 index 0000000..5e86833 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/connection.rs @@ -0,0 +1,143 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; +use unicase::UniCase; + +pub use self::ConnectionOption::{KeepAlive, Close, ConnectionHeader}; + +const KEEP_ALIVE: UniCase<&'static str> = UniCase("keep-alive"); +const CLOSE: UniCase<&'static str> = UniCase("close"); + +/// Values that can be in the `Connection` header. +#[derive(Clone, PartialEq, Debug)] +pub enum ConnectionOption { + /// The `keep-alive` connection value. + KeepAlive, + /// The `close` connection value. + Close, + /// Values in the Connection header that are supposed to be names of other Headers. + /// + /// > When a header field aside from Connection is used to supply control + /// > information for or about the current connection, the sender MUST list + /// > the corresponding field-name within the Connection header field. + // TODO: it would be nice if these "Strings" could be stronger types, since + // they are supposed to relate to other Header fields (which we have strong + // types for). + ConnectionHeader(UniCase), +} + +impl FromStr for ConnectionOption { + type Err = (); + fn from_str(s: &str) -> Result { + if UniCase(s) == KEEP_ALIVE { + Ok(KeepAlive) + } else if UniCase(s) == CLOSE { + Ok(Close) + } else { + Ok(ConnectionHeader(UniCase(s.to_owned()))) + } + } +} + +impl Display for ConnectionOption { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + KeepAlive => "keep-alive", + Close => "close", + ConnectionHeader(UniCase(ref s)) => s.as_ref() + }) + } +} + +header! { + /// `Connection` header, defined in + /// [RFC7230](http://tools.ietf.org/html/rfc7230#section-6.1) + /// + /// The `Connection` header field allows the sender to indicate desired + /// control options for the current connection. In order to avoid + /// confusing downstream recipients, a proxy or gateway MUST remove or + /// replace any received connection options before forwarding the + /// message. + /// + /// # ABNF + /// ```plain + /// Connection = 1#connection-option + /// connection-option = token + /// + /// # Example values + /// * `close` + /// * `keep-alive` + /// * `upgrade` + /// ``` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, Connection}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Connection::keep_alive()); + /// ``` + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, Connection, ConnectionOption}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// Connection(vec![ + /// ConnectionOption::ConnectionHeader(UniCase("upgrade".to_owned())), + /// ]) + /// ); + /// # } + /// ``` + (Connection, "Connection") => (ConnectionOption)+ + + test_connection { + test_header!(test1, vec![b"close"]); + test_header!(test2, vec![b"keep-alive"]); + test_header!(test3, vec![b"upgrade"]); + } +} + +impl Connection { + /// A constructor to easily create a `Connection: close` header. + #[inline] + pub fn close() -> Connection { + Connection(vec![ConnectionOption::Close]) + } + + /// A constructor to easily create a `Connection: keep-alive` header. + #[inline] + pub fn keep_alive() -> Connection { + Connection(vec![ConnectionOption::KeepAlive]) + } +} + +bench_header!(close, Connection, { vec![b"close".to_vec()] }); +bench_header!(keep_alive, Connection, { vec![b"keep-alive".to_vec()] }); +bench_header!(header, Connection, { vec![b"authorization".to_vec()] }); + +#[cfg(test)] +mod tests { + use super::{Connection,ConnectionHeader}; + use header::Header; + use unicase::UniCase; + + fn parse_option(header: Vec) -> Connection { + let val = vec![header]; + let connection: Connection = Header::parse_header(&val[..]).unwrap(); + connection + } + + #[test] + fn test_parse() { + assert_eq!(Connection::close(),parse_option(b"close".to_vec())); + assert_eq!(Connection::keep_alive(),parse_option(b"keep-alive".to_vec())); + assert_eq!(Connection::keep_alive(),parse_option(b"Keep-Alive".to_vec())); + assert_eq!(Connection(vec![ConnectionHeader(UniCase("upgrade".to_owned()))]), + parse_option(b"upgrade".to_vec())); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/content_encoding.rs b/vendor/hyper-0.10.16/src/header/common/content_encoding.rs new file mode 100644 index 0000000..7eed8e3 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/content_encoding.rs @@ -0,0 +1,50 @@ +use header::Encoding; + +header! { + /// `Content-Encoding` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-3.1.2.2) + /// + /// The `Content-Encoding` header field indicates what content codings + /// have been applied to the representation, beyond those inherent in the + /// media type, and thus what decoding mechanisms have to be applied in + /// order to obtain data in the media type referenced by the Content-Type + /// header field. Content-Encoding is primarily used to allow a + /// representation's data to be compressed without losing the identity of + /// its underlying media type. + /// + /// # ABNF + /// ```plain + /// Content-Encoding = 1#content-coding + /// ``` + /// + /// # Example values + /// * `gzip` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, ContentEncoding, Encoding}; + /// + /// let mut headers = Headers::new(); + /// headers.set(ContentEncoding(vec![Encoding::Chunked])); + /// ``` + /// ``` + /// use hyper::header::{Headers, ContentEncoding, Encoding}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// ContentEncoding(vec![ + /// Encoding::Gzip, + /// Encoding::Chunked, + /// ]) + /// ); + /// ``` + (ContentEncoding, "Content-Encoding") => [Encoding]+ + + /*test_content_encoding { + /// Testcase from the RFC + test_header!(test1, vec![b"gzip"], Some(ContentEncoding(vec![Encoding::Gzip]))); + }*/ +} + +bench_header!(single, ContentEncoding, { vec![b"gzip".to_vec()] }); +bench_header!(multiple, ContentEncoding, { vec![b"gzip, deflate".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/content_length.rs b/vendor/hyper-0.10.16/src/header/common/content_length.rs new file mode 100644 index 0000000..273a31e --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/content_length.rs @@ -0,0 +1,94 @@ +use std::fmt; + +use header::{HeaderFormat, Header, parsing}; + +/// `Content-Length` header, defined in +/// [RFC7230](http://tools.ietf.org/html/rfc7230#section-3.3.2) +/// +/// When a message does not have a `Transfer-Encoding` header field, a +/// Content-Length header field can provide the anticipated size, as a +/// decimal number of octets, for a potential payload body. For messages +/// that do include a payload body, the Content-Length field-value +/// provides the framing information necessary for determining where the +/// body (and message) ends. For messages that do not include a payload +/// body, the Content-Length indicates the size of the selected +/// representation. +/// +/// # ABNF +/// ```plain +/// Content-Length = 1*DIGIT +/// ``` +/// +/// # Example values +/// * `3495` +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, ContentLength}; +/// +/// let mut headers = Headers::new(); +/// headers.set(ContentLength(1024u64)); +/// ``` +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ContentLength(pub u64); + +impl Header for ContentLength { + #[inline] + fn header_name() -> &'static str { + "Content-Length" + } + fn parse_header>(raw: &[T]) -> ::Result { + // If multiple Content-Length headers were sent, everything can still + // be alright if they all contain the same value, and all parse + // correctly. If not, then it's an error. + raw.iter() + .map(|b| b.as_ref()) + .map(|b| if b.iter().all(|b| matches!(b, b'0'..=b'9')) { parsing::from_raw_str(b) } else { Err(::Error::Header) }) + .fold(None, |prev, x| { + match (prev, x) { + (None, x) => Some(x), + (e@Some(Err(_)), _ ) => e, + (Some(Ok(prev)), Ok(x)) if prev == x => Some(Ok(prev)), + _ => Some(Err(::Error::Header)) + } + }) + .unwrap_or(Err(::Error::Header)) + .map(ContentLength) + } +} + +impl HeaderFormat for ContentLength { + #[inline] + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +impl fmt::Display for ContentLength { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +__hyper__deref!(ContentLength => u64); + +__hyper__tm!(ContentLength, tests { + // Testcase from RFC + test_header!(test1, vec![b"3495"], Some(HeaderField(3495))); + + test_header!(test_invalid, vec![b"34v95"], None); + + // Can't use the test_header macro because "5, 5" gets cleaned to "5". + #[test] + fn test_duplicates() { + let parsed = HeaderField::parse_header(&[b"5"[..].into(), + b"5"[..].into()]).unwrap(); + assert_eq!(parsed, HeaderField(5)); + assert_eq!(format!("{}", parsed), "5"); + } + + test_header!(test_duplicates_vary, vec![b"5", b"6", b"5"], None); +}); + +bench_header!(bench, ContentLength, { vec![b"42349984".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/content_range.rs b/vendor/hyper-0.10.16/src/header/common/content_range.rs new file mode 100644 index 0000000..2d9a965 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/content_range.rs @@ -0,0 +1,189 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +header! { + /// `Content-Range` header, defined in + /// [RFC7233](http://tools.ietf.org/html/rfc7233#section-4.2) + (ContentRange, "Content-Range") => [ContentRangeSpec] + + test_content_range { + test_header!(test_bytes, + vec![b"bytes 0-499/500"], + Some(ContentRange(ContentRangeSpec::Bytes { + range: Some((0, 499)), + instance_length: Some(500) + }))); + + test_header!(test_bytes_unknown_len, + vec![b"bytes 0-499/*"], + Some(ContentRange(ContentRangeSpec::Bytes { + range: Some((0, 499)), + instance_length: None + }))); + + test_header!(test_bytes_unknown_range, + vec![b"bytes */500"], + Some(ContentRange(ContentRangeSpec::Bytes { + range: None, + instance_length: Some(500) + }))); + + test_header!(test_unregistered, + vec![b"seconds 1-2"], + Some(ContentRange(ContentRangeSpec::Unregistered { + unit: "seconds".to_owned(), + resp: "1-2".to_owned() + }))); + + test_header!(test_no_len, + vec![b"bytes 0-499"], + None::); + + test_header!(test_only_unit, + vec![b"bytes"], + None::); + + test_header!(test_end_less_than_start, + vec![b"bytes 499-0/500"], + None::); + + test_header!(test_blank, + vec![b""], + None::); + + test_header!(test_bytes_many_spaces, + vec![b"bytes 1-2/500 3"], + None::); + + test_header!(test_bytes_many_slashes, + vec![b"bytes 1-2/500/600"], + None::); + + test_header!(test_bytes_many_dashes, + vec![b"bytes 1-2-3/500"], + None::); + + } +} + + +/// Content-Range, described in [RFC7233](https://tools.ietf.org/html/rfc7233#section-4.2) +/// +/// # ABNF +/// ```plain +/// Content-Range = byte-content-range +/// / other-content-range +/// +/// byte-content-range = bytes-unit SP +/// ( byte-range-resp / unsatisfied-range ) +/// +/// byte-range-resp = byte-range "/" ( complete-length / "*" ) +/// byte-range = first-byte-pos "-" last-byte-pos +/// unsatisfied-range = "*/" complete-length +/// +/// complete-length = 1*DIGIT +/// +/// other-content-range = other-range-unit SP other-range-resp +/// other-range-resp = *CHAR +/// ``` +#[derive(PartialEq, Clone, Debug)] +pub enum ContentRangeSpec { + /// Byte range + Bytes { + /// First and last bytes of the range, omitted if request could not be + /// satisfied + range: Option<(u64, u64)>, + + /// Total length of the instance, can be omitted if unknown + instance_length: Option + }, + + /// Custom range, with unit not registered at IANA + Unregistered { + /// other-range-unit + unit: String, + + /// other-range-resp + resp: String + } +} + +fn split_in_two(s: &str, separator: char) -> Option<(&str, &str)> { + let mut iter = s.splitn(2, separator); + match (iter.next(), iter.next()) { + (Some(a), Some(b)) => Some((a, b)), + _ => None + } +} + +impl FromStr for ContentRangeSpec { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + let res = match split_in_two(s, ' ') { + Some(("bytes", resp)) => { + let (range, instance_length) = try!(split_in_two(resp, '/').ok_or(::Error::Header)); + + let instance_length = if instance_length == "*" { + None + } else { + Some(try!(instance_length.parse().map_err(|_| ::Error::Header))) + }; + + let range = if range == "*" { + None + } else { + let (first_byte, last_byte) = try!(split_in_two(range, '-').ok_or(::Error::Header)); + let first_byte = try!(first_byte.parse().map_err(|_| ::Error::Header)); + let last_byte = try!(last_byte.parse().map_err(|_| ::Error::Header)); + if last_byte < first_byte { + return Err(::Error::Header); + } + Some((first_byte, last_byte)) + }; + + ContentRangeSpec::Bytes { + range: range, + instance_length: instance_length + } + } + Some((unit, resp)) => { + ContentRangeSpec::Unregistered { + unit: unit.to_owned(), + resp: resp.to_owned() + } + } + _ => return Err(::Error::Header) + }; + Ok(res) + } +} + +impl Display for ContentRangeSpec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ContentRangeSpec::Bytes { range, instance_length } => { + try!(f.write_str("bytes ")); + match range { + Some((first_byte, last_byte)) => { + try!(write!(f, "{}-{}", first_byte, last_byte)); + }, + None => { + try!(f.write_str("*")); + } + }; + try!(f.write_str("/")); + if let Some(v) = instance_length { + write!(f, "{}", v) + } else { + f.write_str("*") + } + } + ContentRangeSpec::Unregistered { ref unit, ref resp } => { + try!(f.write_str(&unit)); + try!(f.write_str(" ")); + f.write_str(&resp) + } + } + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/content_type.rs b/vendor/hyper-0.10.16/src/header/common/content_type.rs new file mode 100644 index 0000000..73ead58 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/content_type.rs @@ -0,0 +1,97 @@ +use mime::Mime; + +header! { + /// `Content-Type` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-3.1.1.5) + /// + /// The `Content-Type` header field indicates the media type of the + /// associated representation: either the representation enclosed in the + /// message payload or the selected representation, as determined by the + /// message semantics. The indicated media type defines both the data + /// format and how that data is intended to be processed by a recipient, + /// within the scope of the received message semantics, after any content + /// codings indicated by Content-Encoding are decoded. + /// + /// # ABNF + /// ```plain + /// Content-Type = media-type + /// ``` + /// + /// # Example values + /// * `text/html; charset=ISO-8859-4` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, ContentType}; + /// use hyper::mime::{Mime, TopLevel, SubLevel}; + /// + /// let mut headers = Headers::new(); + /// + /// headers.set( + /// ContentType(Mime(TopLevel::Text, SubLevel::Html, vec![])) + /// ); + /// ``` + /// ``` + /// use hyper::header::{Headers, ContentType}; + /// use hyper::mime::{Mime, TopLevel, SubLevel, Attr, Value}; + /// + /// let mut headers = Headers::new(); + /// + /// headers.set( + /// ContentType(Mime(TopLevel::Application, SubLevel::Json, + /// vec![(Attr::Charset, Value::Utf8)])) + /// ); + /// ``` + (ContentType, "Content-Type") => [Mime] + + test_content_type { + test_header!( + test1, + // FIXME: Should be b"text/html; charset=ISO-8859-4" but mime crate lowercases + // the whole value so parsing and formatting the value gives a different result + vec![b"text/html; charset=iso-8859-4"], + Some(HeaderField(Mime( + TopLevel::Text, + SubLevel::Html, + vec![(Attr::Charset, Value::Ext("iso-8859-4".to_owned()))])))); + } +} + +impl ContentType { + /// A constructor to easily create a `Content-Type: application/json` header. + #[inline] + pub fn json() -> ContentType { + ContentType(mime!(Application/Json)) + } + + /// A constructor to easily create a `Content-Type: text/plain; charset=utf-8` header. + #[inline] + pub fn plaintext() -> ContentType { + ContentType(mime!(Text/Plain; Charset=Utf8)) + } + + /// A constructor to easily create a `Content-Type: text/html; charset=utf-8` header. + #[inline] + pub fn html() -> ContentType { + ContentType(mime!(Text/Html; Charset=Utf8)) + } + + /// A constructor to easily create a `Content-Type: application/www-form-url-encoded` header. + #[inline] + pub fn form_url_encoded() -> ContentType { + ContentType(mime!(Application/WwwFormUrlEncoded)) + } + /// A constructor to easily create a `Content-Type: image/jpeg` header. + #[inline] + pub fn jpeg() -> ContentType { + ContentType(mime!(Image/Jpeg)) + } + + /// A constructor to easily create a `Content-Type: image/png` header. + #[inline] + pub fn png() -> ContentType { + ContentType(mime!(Image/Png)) + } +} + +bench_header!(bench, ContentType, { vec![b"application/json; charset=utf-8".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/cookie.rs b/vendor/hyper-0.10.16/src/header/common/cookie.rs new file mode 100644 index 0000000..634ece5 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/cookie.rs @@ -0,0 +1,70 @@ +use header::{Header, HeaderFormat}; +use std::fmt::{self, Display}; +use std::str::from_utf8; + +/// `Cookie` header, defined in [RFC6265](http://tools.ietf.org/html/rfc6265#section-5.4) +/// +/// If the user agent does attach a Cookie header field to an HTTP +/// request, the user agent must send the cookie-string +/// as the value of the header field. +/// +/// When the user agent generates an HTTP request, the user agent MUST NOT +/// attach more than one Cookie header field. +/// +/// # Example values +/// * `SID=31d4d96e407aad42` +/// * `SID=31d4d96e407aad42; lang=en-US` +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, Cookie}; +/// +/// let mut headers = Headers::new(); +/// +/// headers.set( +/// Cookie(vec![ +/// String::from("foo=bar") +/// ]) +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct Cookie(pub Vec); + +__hyper__deref!(Cookie => Vec); + +impl Header for Cookie { + fn header_name() -> &'static str { + "Cookie" + } + + fn parse_header>(raw: &[T]) -> ::Result { + let mut cookies = Vec::with_capacity(raw.len()); + for cookies_raw in raw.iter() { + let cookies_str = try!(from_utf8(cookies_raw.as_ref())); + for cookie_str in cookies_str.split(';') { + cookies.push(cookie_str.trim().to_owned()) + } + } + + if !cookies.is_empty() { + Ok(Cookie(cookies)) + } else { + Err(::Error::Header) + } + } +} + +impl HeaderFormat for Cookie { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + let cookies = &self.0; + for (i, cookie) in cookies.iter().enumerate() { + if i != 0 { + try!(f.write_str("; ")); + } + try!(Display::fmt(&cookie, f)); + } + Ok(()) + } +} + +bench_header!(bench, Cookie, { vec![b"foo=bar; baz=quux".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/date.rs b/vendor/hyper-0.10.16/src/header/common/date.rs new file mode 100644 index 0000000..e0a9e58 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/date.rs @@ -0,0 +1,40 @@ +use header::HttpDate; + +header! { + /// `Date` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.1.1.2) + /// + /// The `Date` header field represents the date and time at which the + /// message was originated. + /// + /// # ABNF + /// ```plain + /// Date = HTTP-date + /// ``` + /// + /// # Example values + /// * `Tue, 15 Nov 1994 08:12:31 GMT` + /// + /// # Example + /// ``` + /// # extern crate time; + /// # extern crate hyper; + /// # fn main() { + /// // extern crate time; + /// + /// use hyper::header::{Headers, Date, HttpDate}; + /// use time; + /// + /// let mut headers = Headers::new(); + /// headers.set(Date(HttpDate(time::now()))); + /// # } + /// ``` + (Date, "Date") => [HttpDate] + + test_date { + test_header!(test1, vec![b"Tue, 15 Nov 1994 08:12:31 GMT"]); + } +} + +bench_header!(imf_fixdate, Date, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); +bench_header!(rfc_850, Date, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); +bench_header!(asctime, Date, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/etag.rs b/vendor/hyper-0.10.16/src/header/common/etag.rs new file mode 100644 index 0000000..068c859 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/etag.rs @@ -0,0 +1,89 @@ +use header::EntityTag; + +header! { + /// `ETag` header, defined in [RFC7232](http://tools.ietf.org/html/rfc7232#section-2.3) + /// + /// The `ETag` header field in a response provides the current entity-tag + /// for the selected representation, as determined at the conclusion of + /// handling the request. An entity-tag is an opaque validator for + /// differentiating between multiple representations of the same + /// resource, regardless of whether those multiple representations are + /// due to resource state changes over time, content negotiation + /// resulting in multiple representations being valid at the same time, + /// or both. An entity-tag consists of an opaque quoted string, possibly + /// prefixed by a weakness indicator. + /// + /// # ABNF + /// ```plain + /// ETag = entity-tag + /// ``` + /// + /// # Example values + /// * `"xyzzy"` + /// * `W/"xyzzy"` + /// * `""` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, ETag, EntityTag}; + /// + /// let mut headers = Headers::new(); + /// headers.set(ETag(EntityTag::new(false, "xyzzy".to_owned()))); + /// ``` + /// ``` + /// use hyper::header::{Headers, ETag, EntityTag}; + /// + /// let mut headers = Headers::new(); + /// headers.set(ETag(EntityTag::new(true, "xyzzy".to_owned()))); + /// ``` + (ETag, "ETag") => [EntityTag] + + test_etag { + // From the RFC + test_header!(test1, + vec![b"\"xyzzy\""], + Some(ETag(EntityTag::new(false, "xyzzy".to_owned())))); + test_header!(test2, + vec![b"W/\"xyzzy\""], + Some(ETag(EntityTag::new(true, "xyzzy".to_owned())))); + test_header!(test3, + vec![b"\"\""], + Some(ETag(EntityTag::new(false, "".to_owned())))); + // Own tests + test_header!(test4, + vec![b"\"foobar\""], + Some(ETag(EntityTag::new(false, "foobar".to_owned())))); + test_header!(test5, + vec![b"\"\""], + Some(ETag(EntityTag::new(false, "".to_owned())))); + test_header!(test6, + vec![b"W/\"weak-etag\""], + Some(ETag(EntityTag::new(true, "weak-etag".to_owned())))); + test_header!(test7, + vec![b"W/\"\x65\x62\""], + Some(ETag(EntityTag::new(true, "\u{0065}\u{0062}".to_owned())))); + test_header!(test8, + vec![b"W/\"\""], + Some(ETag(EntityTag::new(true, "".to_owned())))); + test_header!(test9, + vec![b"no-dquotes"], + None::); + test_header!(test10, + vec![b"w/\"the-first-w-is-case-sensitive\""], + None::); + test_header!(test11, + vec![b""], + None::); + test_header!(test12, + vec![b"\"unmatched-dquotes1"], + None::); + test_header!(test13, + vec![b"unmatched-dquotes2\""], + None::); + test_header!(test14, + vec![b"matched-\"dquotes\""], + None::); + } +} + +bench_header!(bench, ETag, { vec![b"W/\"nonemptytag\"".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/expect.rs b/vendor/hyper-0.10.16/src/header/common/expect.rs new file mode 100644 index 0000000..f261c36 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/expect.rs @@ -0,0 +1,68 @@ +use std::fmt; +use std::str; + +use unicase::UniCase; + +use header::{Header, HeaderFormat}; + +/// The `Expect` header. +/// +/// > The "Expect" header field in a request indicates a certain set of +/// > behaviors (expectations) that need to be supported by the server in +/// > order to properly handle this request. The only such expectation +/// > defined by this specification is 100-continue. +/// > +/// > Expect = "100-continue" +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, Expect}; +/// let mut headers = Headers::new(); +/// headers.set(Expect::Continue); +/// ``` +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum Expect { + /// The value `100-continue`. + Continue +} + +const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue"); + +impl Header for Expect { + fn header_name() -> &'static str { + "Expect" + } + + fn parse_header>(raw: &[T]) -> ::Result { + if raw.len() == 1 { + let text = unsafe { + // safe because: + // 1. we just checked raw.len == 1 + // 2. we don't actually care if it's utf8, we just want to + // compare the bytes with the "case" normalized. If it's not + // utf8, then the byte comparison will fail, and we'll return + // None. No big deal. + str::from_utf8_unchecked(raw.get_unchecked(0).as_ref()) + }; + if UniCase(text) == EXPECT_CONTINUE { + Ok(Expect::Continue) + } else { + Err(::Error::Header) + } + } else { + Err(::Error::Header) + } + } +} + +impl HeaderFormat for Expect { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Expect { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("100-continue") + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/expires.rs b/vendor/hyper-0.10.16/src/header/common/expires.rs new file mode 100644 index 0000000..839798b --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/expires.rs @@ -0,0 +1,45 @@ +use header::HttpDate; + +header! { + /// `Expires` header, defined in [RFC7234](http://tools.ietf.org/html/rfc7234#section-5.3) + /// + /// The `Expires` header field gives the date/time after which the + /// response is considered stale. + /// + /// The presence of an Expires field does not imply that the original + /// resource will change or cease to exist at, before, or after that + /// time. + /// + /// # ABNF + /// ```plain + /// Expires = HTTP-date + /// ``` + /// + /// # Example values + /// * `Thu, 01 Dec 1994 16:00:00 GMT` + /// + /// # Example + /// ``` + /// # extern crate hyper; + /// # extern crate time; + /// # fn main() { + /// // extern crate time; + /// + /// use hyper::header::{Headers, Expires, HttpDate}; + /// use time::{self, Duration}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Expires(HttpDate(time::now() + Duration::days(1)))); + /// # } + /// ``` + (Expires, "Expires") => [HttpDate] + + test_expires { + // Testcase from RFC + test_header!(test1, vec![b"Thu, 01 Dec 1994 16:00:00 GMT"]); + } +} + +bench_header!(imf_fixdate, Expires, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); +bench_header!(rfc_850, Expires, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); +bench_header!(asctime, Expires, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/from.rs b/vendor/hyper-0.10.16/src/header/common/from.rs new file mode 100644 index 0000000..69e5174 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/from.rs @@ -0,0 +1,26 @@ +header! { + /// `From` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.1) + /// + /// The `From` header field contains an Internet email address for a + /// human user who controls the requesting user agent. The address ought + /// to be machine-usable. + /// # ABNF + /// ```plain + /// From = mailbox + /// mailbox = + /// ``` + /// + /// # Example + /// ``` + /// use hyper::header::{Headers, From}; + /// + /// let mut headers = Headers::new(); + /// headers.set(From("webmaster@example.org".to_owned())); + /// ``` + // FIXME: Maybe use mailbox? + (From, "From") => [String] + + test_from { + test_header!(test1, vec![b"webmaster@example.org"]); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/host.rs b/vendor/hyper-0.10.16/src/header/common/host.rs new file mode 100644 index 0000000..3eeb5f7 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/host.rs @@ -0,0 +1,145 @@ +use header::{Header, HeaderFormat}; +use std::fmt; +use std::str::FromStr; +use header::parsing::from_one_raw_str; +use url::idna::domain_to_unicode; + +/// The `Host` header. +/// +/// HTTP/1.1 requires that all requests include a `Host` header, and so hyper +/// client requests add one automatically. +/// +/// Currently is just a String, but it should probably become a better type, +/// like `url::Host` or something. +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Host}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Host{ +/// hostname: "hyper.rs".to_owned(), +/// port: None, +/// } +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, Host}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Host{ +/// hostname: "hyper.rs".to_owned(), +/// port: Some(8080), +/// } +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct Host { + /// The hostname, such a example.domain. + pub hostname: String, + /// An optional port number. + pub port: Option +} + +impl Header for Host { + fn header_name() -> &'static str { + "Host" + } + + fn parse_header>(raw: &[T]) -> ::Result { + from_one_raw_str(raw) + } +} + +impl HeaderFormat for Host { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.port { + None | Some(80) | Some(443) => f.write_str(&self.hostname[..]), + Some(port) => write!(f, "{}:{}", self.hostname, port) + } + } +} + +impl fmt::Display for Host { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.fmt_header(f) + } +} + +impl FromStr for Host { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + let idx = s.rfind(':'); + let port = idx.and_then( + |idx| s[idx + 1..].parse().ok() + ); + let hostname_encoded = match port { + None => s, + Some(_) => &s[..idx.unwrap()] + }; + + let hostname = if hostname_encoded.starts_with("[") { + if !hostname_encoded.ends_with("]") { + return Err(::Error::Header) + } + hostname_encoded.to_owned() + } else { + let (hostname, res) = domain_to_unicode(hostname_encoded); + if res.is_err() { + return Err(::Error::Header) + } + hostname + }; + + Ok(Host { + hostname: hostname, + port: port + }) + } +} + +#[cfg(test)] +mod tests { + use super::Host; + use header::Header; + + + #[test] + fn test_host() { + let host = Header::parse_header([b"foo.com".to_vec()].as_ref()); + assert_eq!(host.ok(), Some(Host { + hostname: "foo.com".to_owned(), + port: None + })); + + + let host = Header::parse_header([b"foo.com:8080".to_vec()].as_ref()); + assert_eq!(host.ok(), Some(Host { + hostname: "foo.com".to_owned(), + port: Some(8080) + })); + + let host = Header::parse_header([b"foo.com".to_vec()].as_ref()); + assert_eq!(host.ok(), Some(Host { + hostname: "foo.com".to_owned(), + port: None + })); + + let host = Header::parse_header([b"[::1]:8080".to_vec()].as_ref()); + assert_eq!(host.ok(), Some(Host { + hostname: "[::1]".to_owned(), + port: Some(8080) + })); + + let host = Header::parse_header([b"[::1]".to_vec()].as_ref()); + assert_eq!(host.ok(), Some(Host { + hostname: "[::1]".to_owned(), + port: None + })); + } +} + +bench_header!(bench, Host, { vec![b"foo.com:3000".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/if_match.rs b/vendor/hyper-0.10.16/src/header/common/if_match.rs new file mode 100644 index 0000000..4788453 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/if_match.rs @@ -0,0 +1,69 @@ +use header::EntityTag; + +header! { + /// `If-Match` header, defined in + /// [RFC7232](https://tools.ietf.org/html/rfc7232#section-3.1) + /// + /// The `If-Match` header field makes the request method conditional on + /// the recipient origin server either having at least one current + /// representation of the target resource, when the field-value is "*", + /// or having a current representation of the target resource that has an + /// entity-tag matching a member of the list of entity-tags provided in + /// the field-value. + /// + /// An origin server MUST use the strong comparison function when + /// comparing entity-tags for `If-Match`, since the client + /// intends this precondition to prevent the method from being applied if + /// there have been any changes to the representation data. + /// + /// # ABNF + /// ```plain + /// If-Match = "*" / 1#entity-tag + /// ``` + /// + /// # Example values + /// * `"xyzzy"` + /// * "xyzzy", "r2d2xxxx", "c3piozzzz" + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, IfMatch}; + /// + /// let mut headers = Headers::new(); + /// headers.set(IfMatch::Any); + /// ``` + /// ``` + /// use hyper::header::{Headers, IfMatch, EntityTag}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// IfMatch::Items(vec![ + /// EntityTag::new(false, "xyzzy".to_owned()), + /// EntityTag::new(false, "foobar".to_owned()), + /// EntityTag::new(false, "bazquux".to_owned()), + /// ]) + /// ); + /// ``` + (IfMatch, "If-Match") => {Any / (EntityTag)+} + + test_if_match { + test_header!( + test1, + vec![b"\"xyzzy\""], + Some(HeaderField::Items( + vec![EntityTag::new(false, "xyzzy".to_owned())]))); + test_header!( + test2, + vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\""], + Some(HeaderField::Items( + vec![EntityTag::new(false, "xyzzy".to_owned()), + EntityTag::new(false, "r2d2xxxx".to_owned()), + EntityTag::new(false, "c3piozzzz".to_owned())]))); + test_header!(test3, vec![b"*"], Some(IfMatch::Any)); + } +} + +bench_header!(star, IfMatch, { vec![b"*".to_vec()] }); +bench_header!(single , IfMatch, { vec![b"\"xyzzy\"".to_vec()] }); +bench_header!(multi, IfMatch, + { vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/if_modified_since.rs b/vendor/hyper-0.10.16/src/header/common/if_modified_since.rs new file mode 100644 index 0000000..7a0aee2 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/if_modified_since.rs @@ -0,0 +1,45 @@ +use header::HttpDate; + +header! { + /// `If-Modified-Since` header, defined in + /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-3.3) + /// + /// The `If-Modified-Since` header field makes a GET or HEAD request + /// method conditional on the selected representation's modification date + /// being more recent than the date provided in the field-value. + /// Transfer of the selected representation's data is avoided if that + /// data has not changed. + /// + /// # ABNF + /// ```plain + /// If-Unmodified-Since = HTTP-date + /// ``` + /// + /// # Example values + /// * `Sat, 29 Oct 1994 19:43:31 GMT` + /// + /// # Example + /// ``` + /// # extern crate hyper; + /// # extern crate time; + /// # fn main() { + /// // extern crate time; + /// + /// use hyper::header::{Headers, IfModifiedSince, HttpDate}; + /// use time::{self, Duration}; + /// + /// let mut headers = Headers::new(); + /// headers.set(IfModifiedSince(HttpDate(time::now() - Duration::days(1)))); + /// # } + /// ``` + (IfModifiedSince, "If-Modified-Since") => [HttpDate] + + test_if_modified_since { + // Testcase from RFC + test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); + } +} + +bench_header!(imf_fixdate, IfModifiedSince, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); +bench_header!(rfc_850, IfModifiedSince, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); +bench_header!(asctime, IfModifiedSince, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/if_none_match.rs b/vendor/hyper-0.10.16/src/header/common/if_none_match.rs new file mode 100644 index 0000000..734845d --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/if_none_match.rs @@ -0,0 +1,83 @@ +use header::EntityTag; + +header! { + /// `If-None-Match` header, defined in + /// [RFC7232](https://tools.ietf.org/html/rfc7232#section-3.2) + /// + /// The `If-None-Match` header field makes the request method conditional + /// on a recipient cache or origin server either not having any current + /// representation of the target resource, when the field-value is "*", + /// or having a selected representation with an entity-tag that does not + /// match any of those listed in the field-value. + /// + /// A recipient MUST use the weak comparison function when comparing + /// entity-tags for If-None-Match (Section 2.3.2), since weak entity-tags + /// can be used for cache validation even if there have been changes to + /// the representation data. + /// + /// # ABNF + /// ```plain + /// If-None-Match = "*" / 1#entity-tag + /// ``` + /// + /// # Example values + /// * `"xyzzy"` + /// * `W/"xyzzy"` + /// * `"xyzzy", "r2d2xxxx", "c3piozzzz"` + /// * `W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz"` + /// * `*` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, IfNoneMatch}; + /// + /// let mut headers = Headers::new(); + /// headers.set(IfNoneMatch::Any); + /// ``` + /// ``` + /// use hyper::header::{Headers, IfNoneMatch, EntityTag}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// IfNoneMatch::Items(vec![ + /// EntityTag::new(false, "xyzzy".to_owned()), + /// EntityTag::new(false, "foobar".to_owned()), + /// EntityTag::new(false, "bazquux".to_owned()), + /// ]) + /// ); + /// ``` + (IfNoneMatch, "If-None-Match") => {Any / (EntityTag)+} + + test_if_none_match { + test_header!(test1, vec![b"\"xyzzy\""]); + test_header!(test2, vec![b"W/\"xyzzy\""]); + test_header!(test3, vec![b"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\""]); + test_header!(test4, vec![b"W/\"xyzzy\", W/\"r2d2xxxx\", W/\"c3piozzzz\""]); + test_header!(test5, vec![b"*"]); + } +} + +#[cfg(test)] +mod tests { + use super::IfNoneMatch; + use header::Header; + use header::EntityTag; + + #[test] + fn test_if_none_match() { + let mut if_none_match: ::Result; + + if_none_match = Header::parse_header([b"*".to_vec()].as_ref()); + assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Any)); + + if_none_match = Header::parse_header([b"\"foobar\", W/\"weak-etag\"".to_vec()].as_ref()); + let mut entities: Vec = Vec::new(); + let foobar_etag = EntityTag::new(false, "foobar".to_owned()); + let weak_etag = EntityTag::new(true, "weak-etag".to_owned()); + entities.push(foobar_etag); + entities.push(weak_etag); + assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Items(entities))); + } +} + +bench_header!(bench, IfNoneMatch, { vec![b"W/\"nonemptytag\"".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/if_range.rs b/vendor/hyper-0.10.16/src/header/common/if_range.rs new file mode 100644 index 0000000..962b286 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/if_range.rs @@ -0,0 +1,96 @@ +use std::fmt::{self, Display}; +use header::{self, Header, HeaderFormat, EntityTag, HttpDate}; + +/// `If-Range` header, defined in [RFC7233](http://tools.ietf.org/html/rfc7233#section-3.2) +/// +/// If a client has a partial copy of a representation and wishes to have +/// an up-to-date copy of the entire representation, it could use the +/// Range header field with a conditional GET (using either or both of +/// If-Unmodified-Since and If-Match.) However, if the precondition +/// fails because the representation has been modified, the client would +/// then have to make a second request to obtain the entire current +/// representation. +/// +/// The `If-Range` header field allows a client to \"short-circuit\" the +/// second request. Informally, its meaning is as follows: if the +/// representation is unchanged, send me the part(s) that I am requesting +/// in Range; otherwise, send me the entire representation. +/// +/// # ABNF +/// ```plain +/// If-Range = entity-tag / HTTP-date +/// ``` +/// +/// # Example values +/// * `Sat, 29 Oct 1994 19:43:31 GMT` +/// * `\"xyzzy\"` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, IfRange, EntityTag}; +/// +/// let mut headers = Headers::new(); +/// headers.set(IfRange::EntityTag(EntityTag::new(false, "xyzzy".to_owned()))); +/// ``` +/// ``` +/// # extern crate hyper; +/// # extern crate time; +/// # fn main() { +/// // extern crate time; +/// +/// use hyper::header::{Headers, IfRange, HttpDate}; +/// use time::{self, Duration}; +/// +/// let mut headers = Headers::new(); +/// headers.set(IfRange::Date(HttpDate(time::now() - Duration::days(1)))); +/// # } +/// ``` +#[derive(Clone, Debug, PartialEq)] +pub enum IfRange { + /// The entity-tag the client has of the resource + EntityTag(EntityTag), + /// The date when the client retrieved the resource + Date(HttpDate), +} + +impl Header for IfRange { + fn header_name() -> &'static str { + "If-Range" + } + fn parse_header>(raw: &[T]) -> ::Result { + let etag: ::Result = header::parsing::from_one_raw_str(raw); + if etag.is_ok() { + return Ok(IfRange::EntityTag(etag.unwrap())); + } + let date: ::Result = header::parsing::from_one_raw_str(raw); + if date.is_ok() { + return Ok(IfRange::Date(date.unwrap())); + } + Err(::Error::Header) + } +} + +impl HeaderFormat for IfRange { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match *self { + IfRange::EntityTag(ref x) => Display::fmt(x, f), + IfRange::Date(ref x) => Display::fmt(x, f), + } + } +} + +impl Display for IfRange { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.fmt_header(f) + } +} + +#[cfg(test)] +mod test_if_range { + use std::str; + use header::*; + use super::IfRange as HeaderField; + test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); + test_header!(test2, vec![b"\"xyzzy\""]); + test_header!(test3, vec![b"this-is-invalid"], None::); +} diff --git a/vendor/hyper-0.10.16/src/header/common/if_unmodified_since.rs b/vendor/hyper-0.10.16/src/header/common/if_unmodified_since.rs new file mode 100644 index 0000000..9912416 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/if_unmodified_since.rs @@ -0,0 +1,45 @@ +use header::HttpDate; + +header! { + /// `If-Unmodified-Since` header, defined in + /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-3.4) + /// + /// The `If-Unmodified-Since` header field makes the request method + /// conditional on the selected representation's last modification date + /// being earlier than or equal to the date provided in the field-value. + /// This field accomplishes the same purpose as If-Match for cases where + /// the user agent does not have an entity-tag for the representation. + /// + /// # ABNF + /// ```plain + /// If-Unmodified-Since = HTTP-date + /// ``` + /// + /// # Example values + /// * `Sat, 29 Oct 1994 19:43:31 GMT` + /// + /// # Example + /// ``` + /// # extern crate hyper; + /// # extern crate time; + /// # fn main() { + /// // extern crate time; + /// + /// use hyper::header::{Headers, IfUnmodifiedSince, HttpDate}; + /// use time::{self, Duration}; + /// + /// let mut headers = Headers::new(); + /// headers.set(IfUnmodifiedSince(HttpDate(time::now() - Duration::days(1)))); + /// # } + /// ``` + (IfUnmodifiedSince, "If-Unmodified-Since") => [HttpDate] + + test_if_unmodified_since { + // Testcase from RFC + test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]); + } +} + +bench_header!(imf_fixdate, IfUnmodifiedSince, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); +bench_header!(rfc_850, IfUnmodifiedSince, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); +bench_header!(asctime, IfUnmodifiedSince, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/last-event-id.rs b/vendor/hyper-0.10.16/src/header/common/last-event-id.rs new file mode 100644 index 0000000..2c82d36 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/last-event-id.rs @@ -0,0 +1,30 @@ +header! { + /// `Last-Event-ID` header, defined in + /// [RFC3864](https://html.spec.whatwg.org/multipage/references.html#refsRFC3864) + /// + /// The `Last-Event-ID` header contains information about + /// the last event in an http interaction so that it's easier to + /// track of event state. This is helpful when working + /// with [Server-Sent-Events](http://www.html5rocks.com/en/tutorials/eventsource/basics/). If the connection were to be dropped, for example, it'd + /// be useful to let the server know what the last event you + /// recieved was. + /// + /// The spec is a String with the id of the last event, it can be + /// an empty string which acts a sort of "reset". + /// + /// # Example + /// ``` + /// use hyper::header::{Headers, LastEventID}; + /// + /// let mut headers = Headers::new(); + /// headers.set(LastEventID("1".to_owned())); + /// ``` + (LastEventID, "Last-Event-ID") => [String] + + test_last_event_id { + // Initial state + test_header!(test1, vec![b""]); + // Own testcase + test_header!(test2, vec![b"1"], Some(LastEventID("1".to_owned()))); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/last_modified.rs b/vendor/hyper-0.10.16/src/header/common/last_modified.rs new file mode 100644 index 0000000..24fa1c3 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/last_modified.rs @@ -0,0 +1,43 @@ +use header::HttpDate; + +header! { + /// `Last-Modified` header, defined in + /// [RFC7232](http://tools.ietf.org/html/rfc7232#section-2.2) + /// + /// The `Last-Modified` header field in a response provides a timestamp + /// indicating the date and time at which the origin server believes the + /// selected representation was last modified, as determined at the + /// conclusion of handling the request. + /// + /// # ABNF + /// ```plain + /// Expires = HTTP-date + /// ``` + /// + /// # Example values + /// * `Sat, 29 Oct 1994 19:43:31 GMT` + /// + /// # Example + /// ``` + /// # extern crate hyper; + /// # extern crate time; + /// # fn main() { + /// // extern crate time; + /// + /// use hyper::header::{Headers, LastModified, HttpDate}; + /// use time::{self, Duration}; + /// + /// let mut headers = Headers::new(); + /// headers.set(LastModified(HttpDate(time::now() - Duration::days(1)))); + /// # } + /// ``` + (LastModified, "Last-Modified") => [HttpDate] + + test_last_modified { + // Testcase from RFC + test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]);} +} + +bench_header!(imf_fixdate, LastModified, { vec![b"Sun, 07 Nov 1994 08:48:37 GMT".to_vec()] }); +bench_header!(rfc_850, LastModified, { vec![b"Sunday, 06-Nov-94 08:49:37 GMT".to_vec()] }); +bench_header!(asctime, LastModified, { vec![b"Sun Nov 6 08:49:37 1994".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/link.rs b/vendor/hyper-0.10.16/src/header/common/link.rs new file mode 100644 index 0000000..cef4872 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/link.rs @@ -0,0 +1,1065 @@ +use std::fmt; +use std::borrow::Cow; +use std::str::FromStr; + +#[allow(unused_imports)] +use std::ascii::AsciiExt; + +use mime::Mime; + +use header::parsing; +use header::{Header, HeaderFormat}; + +/// The `Link` header, defined in +/// [RFC5988](http://tools.ietf.org/html/rfc5988#section-5) +/// +/// # ABNF +/// ```plain +/// Link = "Link" ":" #link-value +/// link-value = "<" URI-Reference ">" *( ";" link-param ) +/// link-param = ( ( "rel" "=" relation-types ) +/// | ( "anchor" "=" <"> URI-Reference <"> ) +/// | ( "rev" "=" relation-types ) +/// | ( "hreflang" "=" Language-Tag ) +/// | ( "media" "=" ( MediaDesc | ( <"> MediaDesc <"> ) ) ) +/// | ( "title" "=" quoted-string ) +/// | ( "title*" "=" ext-value ) +/// | ( "type" "=" ( media-type | quoted-mt ) ) +/// | ( link-extension ) ) +/// link-extension = ( parmname [ "=" ( ptoken | quoted-string ) ] ) +/// | ( ext-name-star "=" ext-value ) +/// ext-name-star = parmname "*" ; reserved for RFC2231-profiled +/// ; extensions. Whitespace NOT +/// ; allowed in between. +/// ptoken = 1*ptokenchar +/// ptokenchar = "!" | "#" | "$" | "%" | "&" | "'" | "(" +/// | ")" | "*" | "+" | "-" | "." | "/" | DIGIT +/// | ":" | "<" | "=" | ">" | "?" | "@" | ALPHA +/// | "[" | "]" | "^" | "_" | "`" | "{" | "|" +/// | "}" | "~" +/// media-type = type-name "/" subtype-name +/// quoted-mt = <"> media-type <"> +/// relation-types = relation-type +/// | <"> relation-type *( 1*SP relation-type ) <"> +/// relation-type = reg-rel-type | ext-rel-type +/// reg-rel-type = LOALPHA *( LOALPHA | DIGIT | "." | "-" ) +/// ext-rel-type = URI +/// ``` +/// +/// # Example values +/// +/// `Link: ; rel="previous"; +/// title="previous chapter"` +/// +/// `Link: ; rel="previous"; title*=UTF-8'de'letztes%20Kapitel, +/// ; rel="next"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Link, LinkValue, RelationType}; +/// +/// let link_value = LinkValue::new("http://example.com/TheBook/chapter2") +/// .push_rel(RelationType::Previous) +/// .set_title("previous chapter"); +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Link::new(vec![link_value]) +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct Link { + /// A list of the `link-value`s of the Link entity-header. + values: Vec +} + +/// A single `link-value` of a `Link` header, based on: +/// [RFC5988](http://tools.ietf.org/html/rfc5988#section-5) +#[derive(Clone, PartialEq, Debug)] +pub struct LinkValue { + /// Target IRI: `link-value`. + link: Cow<'static, str>, + + /// Forward Relation Types: `rel`. + rel: Option>, + + /// Context IRI: `anchor`. + anchor: Option, + + /// Reverse Relation Types: `rev`. + rev: Option>, + + /// Destination medium or media: `media`. + media_desc: Option>, + + /// Label of the destination of a Link: `title`. + title: Option, + + /// The `title` encoded in a different charset: `title*`. + title_star: Option, + + /// Hint on the media type of the result of dereferencing + /// the link: `type`. + media_type: Option, +} + +/// A Media Descriptors Enum based on: +/// https://www.w3.org/TR/html401/types.html#h-6.13 +#[derive(Clone, PartialEq, Debug)] +pub enum MediaDesc { + /// screen. + Screen, + /// tty. + Tty, + /// tv. + Tv, + /// projection. + Projection, + /// handheld. + Handheld, + /// print. + Print, + /// braille. + Braille, + /// aural. + Aural, + /// all. + All, + /// Unrecognized media descriptor extension. + Extension(String) +} + +/// A Link Relation Type Enum based on: +/// [RFC5988](https://tools.ietf.org/html/rfc5988#section-6.2.2) +#[derive(Clone, PartialEq, Debug)] +pub enum RelationType { + /// alternate. + Alternate, + /// appendix. + Appendix, + /// bookmark. + Bookmark, + /// chapter. + Chapter, + /// contents. + Contents, + /// copyright. + Copyright, + /// current. + Current, + /// describedby. + DescribedBy, + /// edit. + Edit, + /// edit-media. + EditMedia, + /// enclosure. + Enclosure, + /// first. + First, + /// glossary. + Glossary, + /// help. + Help, + /// hub. + Hub, + /// index. + Index, + /// last. + Last, + /// latest-version. + LatestVersion, + /// license. + License, + /// next. + Next, + /// next-archive. + NextArchive, + /// payment. + Payment, + /// prev. + Prev, + /// predecessor-version. + PredecessorVersion, + /// previous. + Previous, + /// prev-archive. + PrevArchive, + /// related. + Related, + /// replies. + Replies, + /// section. + Section, + /// self. + RelationTypeSelf, + /// service. + Service, + /// start. + Start, + /// stylesheet. + Stylesheet, + /// subsection. + Subsection, + /// successor-version. + SuccessorVersion, + /// up. + Up, + /// versionHistory. + VersionHistory, + /// via. + Via, + /// working-copy. + WorkingCopy, + /// working-copy-of. + WorkingCopyOf, + /// ext-rel-type. + ExtRelType(String) +} + +//////////////////////////////////////////////////////////////////////////////// +// Struct methods +//////////////////////////////////////////////////////////////////////////////// + +impl Link { + /// Create `Link` from a `Vec`. + pub fn new(link_values: Vec) -> Link { + Link { values: link_values } + } + + /// Get the `Link` header's `LinkValue`s. + pub fn values(&self) -> &[LinkValue] { + self.values.as_ref() + } + + /// Add a `LinkValue` instance to the `Link` header's values. + pub fn push_value(&mut self, link_value: LinkValue) { + self.values.push(link_value); + } +} + +impl LinkValue { + /// Create `LinkValue` from URI-Reference. + pub fn new(uri: T) -> LinkValue + where T: Into> { + LinkValue { + link: uri.into(), + rel: None, + anchor: None, + rev: None, + media_desc: None, + title: None, + title_star: None, + media_type: None, + } + } + + /// Get the `LinkValue`'s value. + pub fn link(&self) -> &str { + self.link.as_ref() + } + + /// Get the `LinkValue`'s `rel` parameter(s). + pub fn rel(&self) -> Option<&[RelationType]> { + self.rel.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `anchor` parameter. + pub fn anchor(&self) -> Option<&str> { + self.anchor.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `rev` parameter(s). + pub fn rev(&self) -> Option<&[RelationType]> { + self.rev.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `media` parameter(s). + pub fn media_desc(&self) -> Option<&[MediaDesc]> { + self.media_desc.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `title` parameter. + pub fn title(&self) -> Option<&str> { + self.title.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `title*` parameter. + pub fn title_star(&self) -> Option<&str> { + self.title_star.as_ref().map(AsRef::as_ref) + } + + /// Get the `LinkValue`'s `type` parameter. + pub fn media_type(&self) -> Option<&Mime> { + self.media_type.as_ref() + } + + /// Add a `RelationType` to the `LinkValue`'s `rel` parameter. + pub fn push_rel(mut self, rel: RelationType) -> LinkValue { + let mut v = self.rel.take().unwrap_or(Vec::new()); + + v.push(rel); + + self.rel = Some(v); + + self + } + + /// Set `LinkValue`'s `anchor` parameter. + pub fn set_anchor>(mut self, anchor: T) -> LinkValue { + self.anchor = Some(anchor.into()); + + self + } + + /// Add a `RelationType` to the `LinkValue`'s `rev` parameter. + pub fn push_rev(mut self, rev: RelationType) -> LinkValue { + let mut v = self.rev.take().unwrap_or(Vec::new()); + + v.push(rev); + + self.rev = Some(v); + + self + } + + /// Add a `MediaDesc` to the `LinkValue`'s `media_desc` parameter. + pub fn push_media_desc(mut self, media_desc: MediaDesc) -> LinkValue { + let mut v = self.media_desc.take().unwrap_or(Vec::new()); + + v.push(media_desc); + + self.media_desc = Some(v); + + self + } + + /// Set `LinkValue`'s `title` parameter. + pub fn set_title>(mut self, title: T) -> LinkValue { + self.title = Some(title.into()); + + self + } + + /// Set `LinkValue`'s `title*` parameter. + pub fn set_title_star>(mut self, title_star: T) -> LinkValue { + self.title_star = Some(title_star.into()); + + self + } + + /// Set `LinkValue`'s `type` parameter. + pub fn set_media_type(mut self, media_type: Mime) -> LinkValue { + self.media_type = Some(media_type); + + self + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Trait implementations +//////////////////////////////////////////////////////////////////////////////// + +impl Header for Link { + fn header_name() -> &'static str { + static NAME: &'static str = "Link"; + NAME + } + + fn parse_header>(raw: &[T]) -> ::Result { + // If more that one `Link` headers are present in a request's + // headers they are combined in a single `Link` header containing + // all the `link-value`s present in each of those `Link` headers. + raw.iter() + .map(|v| parsing::from_raw_str::(v.as_ref())) + .fold(None, |p, c| match (p, c) { + (None, c) => Some(c), + (e @ Some(Err(_)), _) => e, + (Some(Ok(mut p)), Ok(c)) => { + p.values.extend(c.values); + + Some(Ok(p)) + } + _ => Some(Err(::Error::Header)), + }) + .unwrap_or(Err(::Error::Header)) + } +} +impl HeaderFormat for Link { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt_delimited(f, self.values.as_slice(), ", ", ("", "")) + } +} + +impl fmt::Display for Link { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.fmt_header(f) + } +} + +impl fmt::Display for LinkValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(write!(f, "<{}>", self.link)); + + if let Some(ref rel) = self.rel { + try!(fmt_delimited(f, rel.as_slice(), " ", ("; rel=\"", "\""))); + } + if let Some(ref anchor) = self.anchor { + try!(write!(f, "; anchor=\"{}\"", anchor)); + } + if let Some(ref rev) = self.rev { + try!(fmt_delimited(f, rev.as_slice(), " ", ("; rev=\"", "\""))); + } + if let Some(ref media_desc) = self.media_desc { + try!(fmt_delimited(f, media_desc.as_slice(), ", ", ("; media=\"", "\""))); + } + if let Some(ref title) = self.title { + try!(write!(f, "; title=\"{}\"", title)); + } + if let Some(ref title_star) = self.title_star { + try!(write!(f, "; title*={}", title_star)); + } + if let Some(ref media_type) = self.media_type { + try!(write!(f, "; type=\"{}\"", media_type)); + } + + Ok(()) + } +} + +impl FromStr for Link { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + // Create a split iterator with delimiters: `;`, `,` + let link_split = SplitAsciiUnquoted::new(s, ";,"); + + let mut link_values: Vec = Vec::new(); + + // Loop over the splits parsing the Link header into + // a `Vec` + for segment in link_split { + // Parse the `Target IRI` + // https://tools.ietf.org/html/rfc5988#section-5.1 + if segment.trim().starts_with('<') { + link_values.push( + match verify_and_trim(segment.trim(), (b'<', b'>')) { + Err(_) => return Err(::Error::Header), + Ok(s) => { + LinkValue { + link: s.to_owned().into(), + rel: None, + anchor: None, + rev: None, + media_desc: None, + title: None, + title_star: None, + media_type: None, + } + }, + } + ); + } else { + // Parse the current link-value's parameters + let mut link_param_split = segment.splitn(2, '='); + + let link_param_name = match link_param_split.next() { + None => return Err(::Error::Header), + Some(p) => p.trim(), + }; + + let link_header = match link_values.last_mut() { + None => return Err(::Error::Header), + Some(l) => l, + }; + + if "rel".eq_ignore_ascii_case(link_param_name) { + // Parse relation type: `rel`. + // https://tools.ietf.org/html/rfc5988#section-5.3 + if link_header.rel.is_none() { + link_header.rel = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => { + s.trim_matches(|c: char| c == '"' || c.is_whitespace()) + .split(' ') + .map(|t| t.trim().parse()) + .collect::, _>>() + .or_else(|_| return Err(::Error::Header)) + .ok() + }, + }; + } + } else if "anchor".eq_ignore_ascii_case(link_param_name) { + // Parse the `Context IRI`. + // https://tools.ietf.org/html/rfc5988#section-5.2 + link_header.anchor = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { + Err(_) => return Err(::Error::Header), + Ok(a) => Some(String::from(a)), + }, + }; + } else if "rev".eq_ignore_ascii_case(link_param_name) { + // Parse relation type: `rev`. + // https://tools.ietf.org/html/rfc5988#section-5.3 + if link_header.rev.is_none() { + link_header.rev = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => { + s.trim_matches(|c: char| c == '"' || c.is_whitespace()) + .split(' ') + .map(|t| t.trim().parse()) + .collect::, _>>() + .or_else(|_| return Err(::Error::Header)) + .ok() + }, + } + } + } else if "media".eq_ignore_ascii_case(link_param_name) { + // Parse target attribute: `media`. + // https://tools.ietf.org/html/rfc5988#section-5.4 + if link_header.media_desc.is_none() { + link_header.media_desc = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => { + s.trim_matches(|c: char| c == '"' || c.is_whitespace()) + .split(',') + .map(|t| t.trim().parse()) + .collect::, _>>() + .or_else(|_| return Err(::Error::Header)) + .ok() + }, + }; + } + } else if "title".eq_ignore_ascii_case(link_param_name) { + // Parse target attribute: `title`. + // https://tools.ietf.org/html/rfc5988#section-5.4 + if link_header.title.is_none() { + link_header.title = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { + Err(_) => return Err(::Error::Header), + Ok(t) => Some(String::from(t)), + }, + }; + } + } else if "title*".eq_ignore_ascii_case(link_param_name) { + // Parse target attribute: `title*`. + // https://tools.ietf.org/html/rfc5988#section-5.4 + // + // Definition of `ext-value`: + // https://tools.ietf.org/html/rfc5987#section-3.2.1 + if link_header.title_star.is_none() { + link_header.title_star = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => Some(String::from(s.trim())), + }; + } + } else if "type".eq_ignore_ascii_case(link_param_name) { + // Parse target attribute: `type`. + // https://tools.ietf.org/html/rfc5988#section-5.4 + if link_header.media_type.is_none() { + link_header.media_type = match link_param_split.next() { + None => return Err(::Error::Header), + Some("") => return Err(::Error::Header), + Some(s) => match verify_and_trim(s.trim(), (b'"', b'"')) { + Err(_) => return Err(::Error::Header), + Ok(t) => match t.parse() { + Err(_) => return Err(::Error::Header), + Ok(m) => Some(m), + }, + }, + + }; + } + } else { + return Err(::Error::Header); + } + } + } + + Ok(Link::new(link_values)) + } +} + +impl fmt::Display for MediaDesc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + MediaDesc::Screen => write!(f, "screen"), + MediaDesc::Tty => write!(f, "tty"), + MediaDesc::Tv => write!(f, "tv"), + MediaDesc::Projection => write!(f, "projection"), + MediaDesc::Handheld => write!(f, "handheld"), + MediaDesc::Print => write!(f, "print"), + MediaDesc::Braille => write!(f, "braille"), + MediaDesc::Aural => write!(f, "aural"), + MediaDesc::All => write!(f, "all"), + MediaDesc::Extension(ref other) => write!(f, "{}", other), + } + } +} + +impl FromStr for MediaDesc { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + match s { + "screen" => Ok(MediaDesc::Screen), + "tty" => Ok(MediaDesc::Tty), + "tv" => Ok(MediaDesc::Tv), + "projection" => Ok(MediaDesc::Projection), + "handheld" => Ok(MediaDesc::Handheld), + "print" => Ok(MediaDesc::Print), + "braille" => Ok(MediaDesc::Braille), + "aural" => Ok(MediaDesc::Aural), + "all" => Ok(MediaDesc::All), + _ => Ok(MediaDesc::Extension(String::from(s))), + } + } +} + +impl fmt::Display for RelationType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + RelationType::Alternate => write!(f, "alternate"), + RelationType::Appendix => write!(f, "appendix"), + RelationType::Bookmark => write!(f, "bookmark"), + RelationType::Chapter => write!(f, "chapter"), + RelationType::Contents => write!(f, "contents"), + RelationType::Copyright => write!(f, "copyright"), + RelationType::Current => write!(f, "current"), + RelationType::DescribedBy => write!(f, "describedby"), + RelationType::Edit => write!(f, "edit"), + RelationType::EditMedia => write!(f, "edit-media"), + RelationType::Enclosure => write!(f, "enclosure"), + RelationType::First => write!(f, "first"), + RelationType::Glossary => write!(f, "glossary"), + RelationType::Help => write!(f, "help"), + RelationType::Hub => write!(f, "hub"), + RelationType::Index => write!(f, "index"), + RelationType::Last => write!(f, "last"), + RelationType::LatestVersion => write!(f, "latest-version"), + RelationType::License => write!(f, "license"), + RelationType::Next => write!(f, "next"), + RelationType::NextArchive => write!(f, "next-archive"), + RelationType::Payment => write!(f, "payment"), + RelationType::Prev => write!(f, "prev"), + RelationType::PredecessorVersion => write!(f, "predecessor-version"), + RelationType::Previous => write!(f, "previous"), + RelationType::PrevArchive => write!(f, "prev-archive"), + RelationType::Related => write!(f, "related"), + RelationType::Replies => write!(f, "replies"), + RelationType::Section => write!(f, "section"), + RelationType::RelationTypeSelf => write!(f, "self"), + RelationType::Service => write!(f, "service"), + RelationType::Start => write!(f, "start"), + RelationType::Stylesheet => write!(f, "stylesheet"), + RelationType::Subsection => write!(f, "subsection"), + RelationType::SuccessorVersion => write!(f, "successor-version"), + RelationType::Up => write!(f, "up"), + RelationType::VersionHistory => write!(f, "version-history"), + RelationType::Via => write!(f, "via"), + RelationType::WorkingCopy => write!(f, "working-copy"), + RelationType::WorkingCopyOf => write!(f, "working-copy-of"), + RelationType::ExtRelType(ref uri) => write!(f, "{}", uri), + } + } +} + +impl FromStr for RelationType { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + if "alternate".eq_ignore_ascii_case(s) { + Ok(RelationType::Alternate) + } else if "appendix".eq_ignore_ascii_case(s) { + Ok(RelationType::Appendix) + } else if "bookmark".eq_ignore_ascii_case(s) { + Ok(RelationType::Bookmark) + } else if "chapter".eq_ignore_ascii_case(s) { + Ok(RelationType::Chapter) + } else if "contents".eq_ignore_ascii_case(s) { + Ok(RelationType::Contents) + } else if "copyright".eq_ignore_ascii_case(s) { + Ok(RelationType::Copyright) + } else if "current".eq_ignore_ascii_case(s) { + Ok(RelationType::Current) + } else if "describedby".eq_ignore_ascii_case(s) { + Ok(RelationType::DescribedBy) + } else if "edit".eq_ignore_ascii_case(s) { + Ok(RelationType::Edit) + } else if "edit-media".eq_ignore_ascii_case(s) { + Ok(RelationType::EditMedia) + } else if "enclosure".eq_ignore_ascii_case(s) { + Ok(RelationType::Enclosure) + } else if "first".eq_ignore_ascii_case(s) { + Ok(RelationType::First) + } else if "glossary".eq_ignore_ascii_case(s) { + Ok(RelationType::Glossary) + } else if "help".eq_ignore_ascii_case(s) { + Ok(RelationType::Help) + } else if "hub".eq_ignore_ascii_case(s) { + Ok(RelationType::Hub) + } else if "index".eq_ignore_ascii_case(s) { + Ok(RelationType::Index) + } else if "last".eq_ignore_ascii_case(s) { + Ok(RelationType::Last) + } else if "latest-version".eq_ignore_ascii_case(s) { + Ok(RelationType::LatestVersion) + } else if "license".eq_ignore_ascii_case(s) { + Ok(RelationType::License) + } else if "next".eq_ignore_ascii_case(s) { + Ok(RelationType::Next) + } else if "next-archive".eq_ignore_ascii_case(s) { + Ok(RelationType::NextArchive) + } else if "payment".eq_ignore_ascii_case(s) { + Ok(RelationType::Payment) + } else if "prev".eq_ignore_ascii_case(s) { + Ok(RelationType::Prev) + } else if "predecessor-version".eq_ignore_ascii_case(s) { + Ok(RelationType::PredecessorVersion) + } else if "previous".eq_ignore_ascii_case(s) { + Ok(RelationType::Previous) + } else if "prev-archive".eq_ignore_ascii_case(s) { + Ok(RelationType::PrevArchive) + } else if "related".eq_ignore_ascii_case(s) { + Ok(RelationType::Related) + } else if "replies".eq_ignore_ascii_case(s) { + Ok(RelationType::Replies) + } else if "section".eq_ignore_ascii_case(s) { + Ok(RelationType::Section) + } else if "self".eq_ignore_ascii_case(s) { + Ok(RelationType::RelationTypeSelf) + } else if "service".eq_ignore_ascii_case(s) { + Ok(RelationType::Service) + } else if "start".eq_ignore_ascii_case(s) { + Ok(RelationType::Start) + } else if "stylesheet".eq_ignore_ascii_case(s) { + Ok(RelationType::Stylesheet) + } else if "subsection".eq_ignore_ascii_case(s) { + Ok(RelationType::Subsection) + } else if "successor-version".eq_ignore_ascii_case(s) { + Ok(RelationType::SuccessorVersion) + } else if "up".eq_ignore_ascii_case(s) { + Ok(RelationType::Up) + } else if "version-history".eq_ignore_ascii_case(s) { + Ok(RelationType::VersionHistory) + } else if "via".eq_ignore_ascii_case(s) { + Ok(RelationType::Via) + } else if "working-copy".eq_ignore_ascii_case(s) { + Ok(RelationType::WorkingCopy) + } else if "working-copy-of".eq_ignore_ascii_case(s) { + Ok(RelationType::WorkingCopyOf) + } else { + Ok(RelationType::ExtRelType(String::from(s))) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Utilities +//////////////////////////////////////////////////////////////////////////////// + +struct SplitAsciiUnquoted<'a> { + src: &'a str, + pos: usize, + del: &'a str +} + +impl<'a> SplitAsciiUnquoted<'a> { + fn new(s: &'a str, d: &'a str) -> SplitAsciiUnquoted<'a> { + SplitAsciiUnquoted{ + src: s, + pos: 0, + del: d, + } + } +} + +impl<'a> Iterator for SplitAsciiUnquoted<'a> { + type Item = &'a str; + + fn next(&mut self) -> Option<&'a str> { + if self.pos < self.src.len() { + let prev_pos = self.pos; + let mut pos = self.pos; + + let mut in_quotes = false; + + for c in self.src[prev_pos..].as_bytes().iter() { + in_quotes ^= *c == b'"'; + + // Ignore `c` if we're `in_quotes`. + if !in_quotes && self.del.as_bytes().contains(c) { + break; + } + + pos += 1; + } + + self.pos = pos + 1; + + Some(&self.src[prev_pos..pos]) + } else { + None + } + } +} + +fn fmt_delimited(f: &mut fmt::Formatter, p: &[T], d: &str, b: (&str, &str)) -> fmt::Result { + if p.len() != 0 { + // Write a starting string `b.0` before the first element + try!(write!(f, "{}{}", b.0, p[0])); + + for i in &p[1..] { + // Write the next element preceded by the delimiter `d` + try!(write!(f, "{}{}", d, i)); + } + + // Write a ending string `b.1` before the first element + try!(write!(f, "{}", b.1)); + } + + Ok(()) +} + +fn verify_and_trim(s: &str, b: (u8, u8)) -> ::Result<&str> { + let length = s.len(); + let byte_array = s.as_bytes(); + + // Verify that `s` starts with `b.0` and ends with `b.1` and return + // the contained substring after triming whitespace. + if length > 1 && b.0 == byte_array[0] && b.1 == byte_array[length - 1] { + Ok(s.trim_matches( + |c: char| c == b.0 as char || c == b.1 as char || c.is_whitespace()) + ) + } else { + Err(::Error::Header) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod tests { + use std::fmt; + use std::fmt::Write; + + use super::{Link, LinkValue, MediaDesc, RelationType, SplitAsciiUnquoted}; + use super::{fmt_delimited, verify_and_trim}; + + use header::Header; + + use buffer::BufReader; + use mock::MockStream; + use http::h1::parse_request; + + use mime::Mime; + use mime::TopLevel::Text; + use mime::SubLevel::Plain; + + #[test] + fn test_link() { + let link_value = LinkValue::new("http://example.com/TheBook/chapter2") + .push_rel(RelationType::Previous) + .push_rev(RelationType::Next) + .set_title("previous chapter"); + + let link_header = b"; \ + rel=\"previous\"; rev=next; title=\"previous chapter\""; + + let expected_link = Link::new(vec![link_value]); + + let link = Header::parse_header(&vec![link_header.to_vec()]); + assert_eq!(link.ok(), Some(expected_link)); + } + + #[test] + fn test_link_multiple_values() { + let first_link = LinkValue::new("/TheBook/chapter2") + .push_rel(RelationType::Previous) + .set_title_star("UTF-8'de'letztes%20Kapitel"); + + let second_link = LinkValue::new("/TheBook/chapter4") + .push_rel(RelationType::Next) + .set_title_star("UTF-8'de'n%c3%a4chstes%20Kapitel"); + + let link_header = b"; \ + rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, \ + ; \ + rel=\"next\"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel"; + + let expected_link = Link::new(vec![first_link, second_link]); + + let link = Header::parse_header(&vec![link_header.to_vec()]); + assert_eq!(link.ok(), Some(expected_link)); + } + + #[test] + fn test_link_all_attributes() { + let link_value = LinkValue::new("http://example.com/TheBook/chapter2") + .push_rel(RelationType::Previous) + .set_anchor("../anchor/example/") + .push_rev(RelationType::Next) + .push_media_desc(MediaDesc::Screen) + .set_title("previous chapter") + .set_title_star("title* unparsed") + .set_media_type(Mime(Text, Plain, vec![])); + + let link_header = b"; \ + rel=\"previous\"; anchor=\"../anchor/example/\"; \ + rev=\"next\"; media=\"screen\"; \ + title=\"previous chapter\"; title*=title* unparsed; \ + type=\"text/plain\""; + + let expected_link = Link::new(vec![link_value]); + + let link = Header::parse_header(&vec![link_header.to_vec()]); + assert_eq!(link.ok(), Some(expected_link)); + } + + #[test] + fn test_link_multiple_link_headers() { + let first_link = LinkValue::new("/TheBook/chapter2") + .push_rel(RelationType::Previous) + .set_title_star("UTF-8'de'letztes%20Kapitel"); + + let second_link = LinkValue::new("/TheBook/chapter4") + .push_rel(RelationType::Next) + .set_title_star("UTF-8'de'n%c3%a4chstes%20Kapitel"); + + let third_link = LinkValue::new("http://example.com/TheBook/chapter2") + .push_rel(RelationType::Previous) + .push_rev(RelationType::Next) + .set_title("previous chapter"); + + let expected_link = Link::new(vec![first_link, second_link, third_link]); + + let mut raw = MockStream::with_input(b"GET /super_short_uri/and_whatever HTTP/1.1\r\nHost: \ + hyper.rs\r\nAccept: a lot of things\r\nAccept-Charset: \ + utf8\r\nAccept-Encoding: *\r\nLink: ; \ + rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, \ + ; rel=\"next\"; title*=\ + UTF-8'de'n%c3%a4chstes%20Kapitel\r\n\ + Access-Control-Allow-Credentials: None\r\nLink: \ + ; \ + rel=\"previous\"; rev=next; title=\"previous chapter\"\ + \r\n\r\n"); + + let mut buf = BufReader::new(&mut raw); + let res = parse_request(&mut buf).unwrap(); + + let link = res.headers.get::().unwrap(); + + assert_eq!(*link, expected_link); + } + + #[test] + fn test_link_display() { + let link_value = LinkValue::new("http://example.com/TheBook/chapter2") + .push_rel(RelationType::Previous) + .set_anchor("/anchor/example/") + .push_rev(RelationType::Next) + .push_media_desc(MediaDesc::Screen) + .set_title("previous chapter") + .set_title_star("title* unparsed") + .set_media_type(Mime(Text, Plain, vec![])); + + let link = Link::new(vec![link_value]); + + let mut link_header = String::new(); + write!(&mut link_header, "{}", link).unwrap(); + + let expected_link_header = "; \ + rel=\"previous\"; anchor=\"/anchor/example/\"; \ + rev=\"next\"; media=\"screen\"; \ + title=\"previous chapter\"; title*=title* unparsed; \ + type=\"text/plain\""; + + assert_eq!(link_header, expected_link_header); + } + + #[test] + fn test_link_parsing_errors() { + let link_a = b"http://example.com/TheBook/chapter2; \ + rel=\"previous\"; rev=next; title=\"previous chapter\""; + + let mut err: Result = Header::parse_header(&vec![link_a.to_vec()]); + assert_eq!(err.is_err(), true); + + let link_b = b"; \ + =\"previous\"; rev=next; title=\"previous chapter\""; + + err = Header::parse_header(&vec![link_b.to_vec()]); + assert_eq!(err.is_err(), true); + + let link_c = b"; \ + rel=; rev=next; title=\"previous chapter\""; + + err = Header::parse_header(&vec![link_c.to_vec()]); + assert_eq!(err.is_err(), true); + + let link_d = b"; \ + rel=\"previous\"; rev=next; title="; + + err = Header::parse_header(&vec![link_d.to_vec()]); + assert_eq!(err.is_err(), true); + + let link_e = b"; \ + rel=\"previous\"; rev=next; attr=unknown"; + + err = Header::parse_header(&vec![link_e.to_vec()]); + assert_eq!(err.is_err(), true); + } + + #[test] + fn test_link_split_ascii_unquoted_iterator() { + let string = "some, text; \"and, more; in quotes\", or not"; + let mut string_split = SplitAsciiUnquoted::new(string, ";,"); + + assert_eq!(Some("some"), string_split.next()); + assert_eq!(Some(" text"), string_split.next()); + assert_eq!(Some(" \"and, more; in quotes\""), string_split.next()); + assert_eq!(Some(" or not"), string_split.next()); + assert_eq!(None, string_split.next()); + } + + #[test] + fn test_link_fmt_delimited() { + struct TestFormatterStruct<'a> { v: Vec<&'a str> }; + + impl<'a> fmt::Display for TestFormatterStruct<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt_delimited(f, self.v.as_slice(), ", ", (">>", "<<")) + } + } + + let test_formatter = TestFormatterStruct { v: vec!["first", "second"] }; + + let mut string = String::new(); + write!(&mut string, "{}", test_formatter).unwrap(); + + let expected_string = ">>first, second<<"; + + assert_eq!(string, expected_string); + } + + #[test] + fn test_link_verify_and_trim() { + let string = verify_and_trim("> some string <", (b'>', b'<')); + assert_eq!(string.ok(), Some("some string")); + + let err = verify_and_trim(" > some string <", (b'>', b'<')); + assert_eq!(err.is_err(), true); + } +} + +bench_header!(bench_link, Link, { vec![b"; rel=\"previous\"; rev=next; title=\"previous chapter\"; type=\"text/html\"; media=\"screen, tty\"".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/location.rs b/vendor/hyper-0.10.16/src/header/common/location.rs new file mode 100644 index 0000000..5369e87 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/location.rs @@ -0,0 +1,43 @@ +header! { + /// `Location` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.1.2) + /// + /// The `Location` header field is used in some responses to refer to a + /// specific resource in relation to the response. The type of + /// relationship is defined by the combination of request method and + /// status code semantics. + /// + /// # ABNF + /// ```plain + /// Location = URI-reference + /// ``` + /// + /// # Example values + /// * `/People.html#tim` + /// * `http://www.example.net/index.html` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, Location}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Location("/People.html#tim".to_owned())); + /// ``` + /// ``` + /// use hyper::header::{Headers, Location}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Location("http://www.example.com/index.html".to_owned())); + /// ``` + // TODO: Use URL + (Location, "Location") => [String] + + test_location { + // Testcase from RFC + test_header!(test1, vec![b"/People.html#tim"]); + test_header!(test2, vec![b"http://www.example.net/index.html"]); + } + +} + +bench_header!(bench, Location, { vec![b"http://foo.com/hello:3000".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/mod.rs b/vendor/hyper-0.10.16/src/header/common/mod.rs new file mode 100644 index 0000000..6ed641d --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/mod.rs @@ -0,0 +1,416 @@ +//! A Collection of Header implementations for common HTTP Headers. +//! +//! ## Mime +//! +//! Several header fields use MIME values for their contents. Keeping with the +//! strongly-typed theme, the [mime](http://seanmonstar.github.io/mime.rs) crate +//! is used, such as `ContentType(pub Mime)`. + +pub use self::accept::Accept; +pub use self::access_control_allow_credentials::AccessControlAllowCredentials; +pub use self::access_control_allow_headers::AccessControlAllowHeaders; +pub use self::access_control_allow_methods::AccessControlAllowMethods; +pub use self::access_control_allow_origin::AccessControlAllowOrigin; +pub use self::access_control_expose_headers::AccessControlExposeHeaders; +pub use self::access_control_max_age::AccessControlMaxAge; +pub use self::access_control_request_headers::AccessControlRequestHeaders; +pub use self::access_control_request_method::AccessControlRequestMethod; +pub use self::accept_charset::AcceptCharset; +pub use self::accept_encoding::AcceptEncoding; +pub use self::accept_ranges::{AcceptRanges, RangeUnit}; +pub use self::allow::Allow; +pub use self::authorization::{Authorization, Scheme, Basic, Bearer}; +pub use self::cache_control::{CacheControl, CacheDirective}; +pub use self::connection::{Connection, ConnectionOption}; +pub use self::content_length::ContentLength; +pub use self::content_encoding::ContentEncoding; +pub use self::content_range::{ContentRange, ContentRangeSpec}; +pub use self::content_type::ContentType; +pub use self::cookie::Cookie; +pub use self::date::Date; +pub use self::etag::ETag; +pub use self::expect::Expect; +pub use self::expires::Expires; +pub use self::from::From; +pub use self::host::Host; +pub use self::if_match::IfMatch; +pub use self::if_modified_since::IfModifiedSince; +pub use self::if_none_match::IfNoneMatch; +pub use self::if_unmodified_since::IfUnmodifiedSince; +pub use self::if_range::IfRange; +pub use self::last_modified::LastModified; +pub use self::location::Location; +pub use self::origin::Origin; +pub use self::pragma::Pragma; +pub use self::prefer::{Prefer, Preference}; +pub use self::preference_applied::PreferenceApplied; +pub use self::range::{Range, ByteRangeSpec}; +pub use self::referer::Referer; +pub use self::referrer_policy::ReferrerPolicy; +pub use self::server::Server; +pub use self::set_cookie::SetCookie; +pub use self::strict_transport_security::StrictTransportSecurity; +pub use self::transfer_encoding::TransferEncoding; +pub use self::upgrade::{Upgrade, Protocol, ProtocolName}; +pub use self::user_agent::UserAgent; +pub use self::vary::Vary; +pub use self::link::{Link, LinkValue, RelationType, MediaDesc}; + +#[doc(hidden)] +#[macro_export] +macro_rules! bench_header( + ($name:ident, $ty:ty, $value:expr) => { + #[cfg(test)] + #[cfg(feature = "nightly")] + #[allow(deprecated)] + mod $name { + use test::Bencher; + use super::*; + + use header::{Header, HeaderFormatter}; + + #[bench] + fn bench_parse(b: &mut Bencher) { + let val = $value; + b.iter(|| { + let _: $ty = Header::parse_header(&val[..]).unwrap(); + }); + } + + #[bench] + fn bench_format(b: &mut Bencher) { + let val: $ty = Header::parse_header(&$value[..]).unwrap(); + let fmt = HeaderFormatter(&val); + b.iter(|| { + format!("{}", fmt); + }); + } + } + } +); + +#[doc(hidden)] +#[macro_export] +macro_rules! __hyper__deref { + ($from:ty => $to:ty) => { + impl ::std::ops::Deref for $from { + type Target = $to; + + fn deref(&self) -> &$to { + &self.0 + } + } + + impl ::std::ops::DerefMut for $from { + fn deref_mut(&mut self) -> &mut $to { + &mut self.0 + } + } + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! __hyper__tm { + ($id:ident, $tm:ident{$($tf:item)*}) => { + #[allow(unused_imports)] + #[cfg(test)] + mod $tm{ + use std::str; + use $crate::header::*; + use $crate::mime::*; + use $crate::language_tags::*; + use $crate::method::Method; + use super::$id as HeaderField; + $($tf)* + } + + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! test_header { + ($id:ident, $raw:expr) => { + #[test] + fn $id() { + #[allow(unused_imports)] + use std::ascii::AsciiExt; + let raw = $raw; + let a: Vec> = raw.iter().map(|x| x.to_vec()).collect(); + let value = HeaderField::parse_header(&a[..]); + let result = format!("{}", value.unwrap()); + let expected = String::from_utf8(raw[0].to_vec()).unwrap(); + let result_cmp: Vec = result + .to_ascii_lowercase() + .split(' ') + .map(|x| x.to_owned()) + .collect(); + let expected_cmp: Vec = expected + .to_ascii_lowercase() + .split(' ') + .map(|x| x.to_owned()) + .collect(); + assert_eq!(result_cmp.concat(), expected_cmp.concat()); + } + }; + ($id:ident, $raw:expr, $typed:expr) => { + #[test] + fn $id() { + let a: Vec> = $raw.iter().map(|x| x.to_vec()).collect(); + let val = HeaderField::parse_header(&a[..]); + let typed: Option = $typed; + // Test parsing + assert_eq!(val.ok(), typed); + // Test formatting + if typed.is_some() { + let raw = &($raw)[..]; + let mut iter = raw.iter().map(|b|str::from_utf8(&b[..]).unwrap()); + let mut joined = String::new(); + joined.push_str(iter.next().unwrap()); + for s in iter { + joined.push_str(", "); + joined.push_str(s); + } + assert_eq!(format!("{}", typed.unwrap()), joined); + } + } + } +} + +#[macro_export] +macro_rules! header { + // $a:meta: Attributes associated with the header item (usually docs) + // $id:ident: Identifier of the header + // $n:expr: Lowercase name of the header + // $nn:expr: Nice name of the header + + // List header, zero or more items + ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)*) => { + $(#[$a])* + #[derive(Clone, Debug, PartialEq)] + pub struct $id(pub Vec<$item>); + __hyper__deref!($id => Vec<$item>); + impl $crate::header::Header for $id { + fn header_name() -> &'static str { + $n + } + fn parse_header>(raw: &[T]) -> $crate::Result { + $crate::header::parsing::from_comma_delimited(raw).map($id) + } + } + impl $crate::header::HeaderFormat for $id { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + $crate::header::parsing::fmt_comma_delimited(f, &self.0[..]) + } + } + impl ::std::fmt::Display for $id { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + use $crate::header::HeaderFormat; + self.fmt_header(f) + } + } + }; + // List header, one or more items + ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+) => { + $(#[$a])* + #[derive(Clone, Debug, PartialEq)] + pub struct $id(pub Vec<$item>); + __hyper__deref!($id => Vec<$item>); + impl $crate::header::Header for $id { + fn header_name() -> &'static str { + $n + } + fn parse_header>(raw: &[T]) -> $crate::Result { + $crate::header::parsing::from_comma_delimited(raw).map($id) + } + } + impl $crate::header::HeaderFormat for $id { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + $crate::header::parsing::fmt_comma_delimited(f, &self.0[..]) + } + } + impl ::std::fmt::Display for $id { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + use $crate::header::HeaderFormat; + self.fmt_header(f) + } + } + }; + // List header, one or more items, smallvec of size 1 + ($(#[$a:meta])*($id:ident, $n:expr) => [$item:ty]+) => { + $(#[$a])* + #[derive(Clone, Debug, PartialEq)] + pub struct $id(pub smallvec::SmallVec<[$item; 1]>); + __hyper__deref!($id => smallvec::SmallVec<[$item; 1]>); + impl $crate::header::Header for $id { + fn header_name() -> &'static str { + $n + } + fn parse_header>(raw: &[T]) -> $crate::Result { + $crate::header::parsing::from_comma_delimited_small(raw).map($id) + } + } + impl $crate::header::HeaderFormat for $id { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + $crate::header::parsing::fmt_comma_delimited(f, &self.0[..]) + } + } + impl ::std::fmt::Display for $id { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + use $crate::header::HeaderFormat; + self.fmt_header(f) + } + } + }; + // Single value header + ($(#[$a:meta])*($id:ident, $n:expr) => [$value:ty]) => { + $(#[$a])* + #[derive(Clone, Debug, PartialEq)] + pub struct $id(pub $value); + __hyper__deref!($id => $value); + impl $crate::header::Header for $id { + fn header_name() -> &'static str { + $n + } + fn parse_header>(raw: &[T]) -> $crate::Result { + $crate::header::parsing::from_one_raw_str(raw).map($id) + } + } + impl $crate::header::HeaderFormat for $id { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::std::fmt::Display::fmt(&**self, f) + } + } + impl ::std::fmt::Display for $id { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::std::fmt::Display::fmt(&**self, f) + } + } + }; + // List header, one or more items with "*" option + ($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+}) => { + $(#[$a])* + #[derive(Clone, Debug, PartialEq)] + pub enum $id { + /// Any value is a match + Any, + /// Only the listed items are a match + Items(Vec<$item>), + } + impl $crate::header::Header for $id { + fn header_name() -> &'static str { + $n + } + fn parse_header>(raw: &[T]) -> $crate::Result { + // FIXME: Return None if no item is in $id::Only + if raw.len() == 1 { + if raw[0].as_ref() == b"*" { + return Ok($id::Any) + } + } + $crate::header::parsing::from_comma_delimited(raw).map($id::Items) + } + } + impl $crate::header::HeaderFormat for $id { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + match *self { + $id::Any => f.write_str("*"), + $id::Items(ref fields) => $crate::header::parsing::fmt_comma_delimited( + f, &fields[..]) + } + } + } + impl ::std::fmt::Display for $id { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + use $crate::header::HeaderFormat; + self.fmt_header(f) + } + } + }; + + // optional test module + ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)* $tm:ident{$($tf:item)*}) => { + header! { + $(#[$a])* + ($id, $n) => ($item)* + } + + __hyper__tm! { $id, $tm { $($tf)* }} + }; + ($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+ $tm:ident{$($tf:item)*}) => { + header! { + $(#[$a])* + ($id, $n) => ($item)+ + } + + __hyper__tm! { $id, $tm { $($tf)* }} + }; + ($(#[$a:meta])*($id:ident, $n:expr) => [$item:ty] $tm:ident{$($tf:item)*}) => { + header! { + $(#[$a])* + ($id, $n) => [$item] + } + + __hyper__tm! { $id, $tm { $($tf)* }} + }; + ($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+} $tm:ident{$($tf:item)*}) => { + header! { + $(#[$a])* + ($id, $n) => {Any / ($item)+} + } + + __hyper__tm! { $id, $tm { $($tf)* }} + }; +} + + +mod accept; +mod access_control_allow_credentials; +mod access_control_allow_headers; +mod access_control_allow_methods; +mod access_control_allow_origin; +mod access_control_expose_headers; +mod access_control_max_age; +mod access_control_request_headers; +mod access_control_request_method; +mod accept_charset; +mod accept_encoding; +mod accept_ranges; +mod allow; +mod authorization; +mod cache_control; +mod cookie; +mod connection; +mod content_encoding; +mod content_length; +mod content_range; +mod content_type; +mod date; +mod etag; +mod expect; +mod expires; +mod from; +mod host; +mod if_match; +mod if_modified_since; +mod if_none_match; +mod if_range; +mod if_unmodified_since; +mod last_modified; +mod location; +mod origin; +mod pragma; +mod prefer; +mod preference_applied; +mod range; +mod referer; +mod referrer_policy; +mod server; +mod set_cookie; +mod strict_transport_security; +mod transfer_encoding; +mod upgrade; +mod user_agent; +mod vary; +mod link; diff --git a/vendor/hyper-0.10.16/src/header/common/origin.rs b/vendor/hyper-0.10.16/src/header/common/origin.rs new file mode 100644 index 0000000..d44a701 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/origin.rs @@ -0,0 +1,119 @@ +use header::{Header, Host, HeaderFormat}; +use std::fmt; +use std::str::FromStr; +use header::parsing::from_one_raw_str; + +/// The `Origin` header. +/// +/// The `Origin` header is a version of the `Referer` header that is used for all HTTP fetches and `POST`s whose CORS flag is set. +/// This header is often used to inform recipients of the security context of where the request was initiated. +/// +/// +/// Following the spec, https://fetch.spec.whatwg.org/#origin-header, the value of this header is composed of +/// a String (scheme), header::Host (host/port) +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Origin}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Origin::new("http", "hyper.rs", None) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, Origin}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Origin::new("https", "wikipedia.org", Some(443)) +/// ); +/// ``` + +#[derive(Clone, Debug)] +pub struct Origin { + /// The scheme, such as http or https + pub scheme: String, + /// The host, such as Host{hostname: "hyper.rs".to_owned(), port: None} + pub host: Host, +} + +impl Origin { + /// Creates a new `Origin` header. + pub fn new, H: Into>(scheme: S, hostname: H, port: Option) -> Origin{ + Origin { + scheme: scheme.into(), + host: Host { + hostname: hostname.into(), + port: port + } + } + } +} + +impl Header for Origin { + fn header_name() -> &'static str { + static NAME: &'static str = "Origin"; + NAME + } + + fn parse_header>(raw: &[T]) -> ::Result { + from_one_raw_str(raw) + } +} + +impl FromStr for Origin { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + let idx = match s.find("://") { + Some(idx) => idx, + None => return Err(::Error::Header) + }; + // idx + 3 because thats how long "://" is + let (scheme, etc) = (&s[..idx], &s[idx + 3..]); + let host = try!(Host::from_str(etc)); + + + Ok(Origin{ + scheme: scheme.to_owned(), + host: host + }) + } +} + +impl HeaderFormat for Origin { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Origin { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}://{}", self.scheme, self.host) + } +} + +impl PartialEq for Origin { + fn eq(&self, other: &Origin) -> bool { + self.scheme == other.scheme && self.host == other.host + } +} + + +#[cfg(test)] +mod tests { + use super::Origin; + use header::Header; + + #[test] + fn test_origin() { + let origin = Header::parse_header([b"http://foo.com".to_vec()].as_ref()); + assert_eq!(origin.ok(), Some(Origin::new("http", "foo.com", None))); + + let origin = Header::parse_header([b"https://foo.com:443".to_vec()].as_ref()); + assert_eq!(origin.ok(), Some(Origin::new("https", "foo.com", Some(443)))); + } +} + +bench_header!(bench, Origin, { vec![b"https://foo.com".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/pragma.rs b/vendor/hyper-0.10.16/src/header/common/pragma.rs new file mode 100644 index 0000000..3d1fa1f --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/pragma.rs @@ -0,0 +1,84 @@ +use std::fmt; + +#[allow(unused_imports)] +use std::ascii::AsciiExt; + +use header::{Header, HeaderFormat, parsing}; + +/// The `Pragma` header defined by HTTP/1.0. +/// +/// > The "Pragma" header field allows backwards compatibility with +/// > HTTP/1.0 caches, so that clients can specify a "no-cache" request +/// > that they will understand (as Cache-Control was not defined until +/// > HTTP/1.1). When the Cache-Control header field is also present and +/// > understood in a request, Pragma is ignored. + +/// > In HTTP/1.0, Pragma was defined as an extensible field for +/// > implementation-specified directives for recipients. This +/// > specification deprecates such extensions to improve interoperability. +/// +/// Spec: https://tools.ietf.org/html/rfc7234#section-5.4 +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Pragma}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Pragma::NoCache); +/// ``` +/// ``` +/// use hyper::header::{Headers, Pragma}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Pragma::Ext("foobar".to_owned())); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub enum Pragma { + /// Corresponds to the `no-cache` value. + NoCache, + /// Every value other than `no-cache`. + Ext(String), +} + +impl Header for Pragma { + fn header_name() -> &'static str { + "Pragma" + } + + fn parse_header>(raw: &[T]) -> ::Result { + parsing::from_one_raw_str(raw).and_then(|s: String| { + let slice = &s.to_ascii_lowercase()[..]; + match slice { + "no-cache" => Ok(Pragma::NoCache), + _ => Ok(Pragma::Ext(s)), + } + }) + } +} + +impl HeaderFormat for Pragma { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Pragma { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + Pragma::NoCache => "no-cache", + Pragma::Ext(ref string) => &string[..], + }) + } +} + +#[test] +fn test_parse_header() { + let a: Pragma = Header::parse_header([b"no-cache".to_vec()].as_ref()).unwrap(); + let b = Pragma::NoCache; + assert_eq!(a, b); + let c: Pragma = Header::parse_header([b"FoObar".to_vec()].as_ref()).unwrap(); + let d = Pragma::Ext("FoObar".to_owned()); + assert_eq!(c, d); + let e: ::Result = Header::parse_header([b"".to_vec()].as_ref()); + assert_eq!(e.ok(), None); +} diff --git a/vendor/hyper-0.10.16/src/header/common/prefer.rs b/vendor/hyper-0.10.16/src/header/common/prefer.rs new file mode 100644 index 0000000..fff08c6 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/prefer.rs @@ -0,0 +1,209 @@ +use std::fmt; +use std::str::FromStr; +use header::{Header, HeaderFormat}; +use header::parsing::{from_comma_delimited, fmt_comma_delimited}; + +/// `Prefer` header, defined in [RFC7240](http://tools.ietf.org/html/rfc7240) +/// +/// The `Prefer` header field is HTTP header field that can be used by a +/// client to request that certain behaviors be employed by a server +/// while processing a request. +/// +/// # ABNF +/// ```plain +/// Prefer = "Prefer" ":" 1#preference +/// preference = token [ BWS "=" BWS word ] +/// *( OWS ";" [ OWS parameter ] ) +/// parameter = token [ BWS "=" BWS word ] +/// ``` +/// +/// # Example values +/// * `respond-async` +/// * `return=minimal` +/// * `wait=30` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Prefer, Preference}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Prefer(vec![Preference::RespondAsync]) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, Prefer, Preference}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// Prefer(vec![ +/// Preference::RespondAsync, +/// Preference::ReturnRepresentation, +/// Preference::Wait(10u32), +/// Preference::Extension("foo".to_owned(), +/// "bar".to_owned(), +/// vec![]), +/// ]) +/// ); +/// ``` +#[derive(PartialEq, Clone, Debug)] +pub struct Prefer(pub Vec); + +__hyper__deref!(Prefer => Vec); + +impl Header for Prefer { + fn header_name() -> &'static str { + "Prefer" + } + + fn parse_header>(raw: &[T]) -> ::Result { + let preferences = try!(from_comma_delimited(raw)); + if !preferences.is_empty() { + Ok(Prefer(preferences)) + } else { + Err(::Error::Header) + } + } +} + +impl HeaderFormat for Prefer { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Prefer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt_comma_delimited(f, &self[..]) + } +} + +/// Prefer contains a list of these preferences. +#[derive(PartialEq, Clone, Debug)] +pub enum Preference { + /// "respond-async" + RespondAsync, + /// "return=representation" + ReturnRepresentation, + /// "return=minimal" + ReturnMinimal, + /// "handling=strict" + HandlingStrict, + /// "handling=leniant" + HandlingLeniant, + /// "wait=delta" + Wait(u32), + + /// Extension preferences. Always has a value, if none is specified it is + /// just "". A preference can also have a list of parameters. + Extension(String, String, Vec<(String, String)>) +} + +impl fmt::Display for Preference { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Preference::*; + fmt::Display::fmt(match *self { + RespondAsync => "respond-async", + ReturnRepresentation => "return=representation", + ReturnMinimal => "return=minimal", + HandlingStrict => "handling=strict", + HandlingLeniant => "handling=leniant", + + Wait(secs) => return write!(f, "wait={}", secs), + + Extension(ref name, ref value, ref params) => { + try!(write!(f, "{}", name)); + if value != "" { try!(write!(f, "={}", value)); } + if params.len() > 0 { + for &(ref name, ref value) in params { + try!(write!(f, "; {}", name)); + if value != "" { try!(write!(f, "={}", value)); } + } + } + return Ok(()); + } + }, f) + } +} + +impl FromStr for Preference { + type Err = Option<::Err>; + fn from_str(s: &str) -> Result::Err>> { + use self::Preference::*; + let mut params = s.split(';').map(|p| { + let mut param = p.splitn(2, '='); + match (param.next(), param.next()) { + (Some(name), Some(value)) => (name.trim(), value.trim().trim_matches('"')), + (Some(name), None) => (name.trim(), ""), + // This can safely be unreachable because the [`splitn`][1] + // function (used above) will always have at least one value. + // + // [1]: http://doc.rust-lang.org/std/primitive.str.html#method.splitn + _ => { unreachable!(); } + } + }); + match params.nth(0) { + Some(param) => { + let rest: Vec<(String, String)> = params.map(|(l, r)| (l.to_owned(), r.to_owned())).collect(); + match param { + ("respond-async", "") => if rest.len() == 0 { Ok(RespondAsync) } else { Err(None) }, + ("return", "representation") => if rest.len() == 0 { Ok(ReturnRepresentation) } else { Err(None) }, + ("return", "minimal") => if rest.len() == 0 { Ok(ReturnMinimal) } else { Err(None) }, + ("handling", "strict") => if rest.len() == 0 { Ok(HandlingStrict) } else { Err(None) }, + ("handling", "leniant") => if rest.len() == 0 { Ok(HandlingLeniant) } else { Err(None) }, + ("wait", secs) => if rest.len() == 0 { secs.parse().map(Wait).map_err(Some) } else { Err(None) }, + (left, right) => Ok(Extension(left.to_owned(), right.to_owned(), rest)) + } + }, + None => Err(None) + } + } +} + +#[cfg(test)] +mod tests { + use header::Header; + use super::*; + + #[test] + fn test_parse_multiple_headers() { + let prefer = Header::parse_header(&[b"respond-async, return=representation".to_vec()]); + assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::RespondAsync, + Preference::ReturnRepresentation]))) + } + + #[test] + fn test_parse_argument() { + let prefer = Header::parse_header(&[b"wait=100, handling=leniant, respond-async".to_vec()]); + assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::Wait(100), + Preference::HandlingLeniant, + Preference::RespondAsync]))) + } + + #[test] + fn test_parse_quote_form() { + let prefer = Header::parse_header(&[b"wait=\"200\", handling=\"strict\"".to_vec()]); + assert_eq!(prefer.ok(), Some(Prefer(vec![Preference::Wait(200), + Preference::HandlingStrict]))) + } + + #[test] + fn test_parse_extension() { + let prefer = Header::parse_header(&[b"foo, bar=baz, baz; foo; bar=baz, bux=\"\"; foo=\"\", buz=\"some parameter\"".to_vec()]); + assert_eq!(prefer.ok(), Some(Prefer(vec![ + Preference::Extension("foo".to_owned(), "".to_owned(), vec![]), + Preference::Extension("bar".to_owned(), "baz".to_owned(), vec![]), + Preference::Extension("baz".to_owned(), "".to_owned(), vec![("foo".to_owned(), "".to_owned()), ("bar".to_owned(), "baz".to_owned())]), + Preference::Extension("bux".to_owned(), "".to_owned(), vec![("foo".to_owned(), "".to_owned())]), + Preference::Extension("buz".to_owned(), "some parameter".to_owned(), vec![])]))) + } + + #[test] + fn test_fail_with_args() { + let prefer: ::Result = Header::parse_header(&[b"respond-async; foo=bar".to_vec()]); + assert_eq!(prefer.ok(), None); + } +} + +bench_header!(normal, + Prefer, { vec![b"respond-async, return=representation".to_vec(), b"wait=100".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/preference_applied.rs b/vendor/hyper-0.10.16/src/header/common/preference_applied.rs new file mode 100644 index 0000000..f0c4dc7 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/preference_applied.rs @@ -0,0 +1,107 @@ +use std::fmt; +use header::{Header, HeaderFormat, Preference}; +use header::parsing::{from_comma_delimited, fmt_comma_delimited}; + +/// `Preference-Applied` header, defined in [RFC7240](http://tools.ietf.org/html/rfc7240) +/// +/// The `Preference-Applied` response header may be included within a +/// response message as an indication as to which `Prefer` header tokens were +/// honored by the server and applied to the processing of a request. +/// +/// # ABNF +/// ```plain +/// Preference-Applied = "Preference-Applied" ":" 1#applied-pref +/// applied-pref = token [ BWS "=" BWS word ] +/// ``` +/// +/// # Example values +/// * `respond-async` +/// * `return=minimal` +/// * `wait=30` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, PreferenceApplied, Preference}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// PreferenceApplied(vec![Preference::RespondAsync]) +/// ); +/// ``` +/// ``` +/// use hyper::header::{Headers, PreferenceApplied, Preference}; +/// +/// let mut headers = Headers::new(); +/// headers.set( +/// PreferenceApplied(vec![ +/// Preference::RespondAsync, +/// Preference::ReturnRepresentation, +/// Preference::Wait(10u32), +/// Preference::Extension("foo".to_owned(), +/// "bar".to_owned(), +/// vec![]), +/// ]) +/// ); +/// ``` +#[derive(PartialEq, Clone, Debug)] +pub struct PreferenceApplied(pub Vec); + +__hyper__deref!(PreferenceApplied => Vec); + +impl Header for PreferenceApplied { + fn header_name() -> &'static str { + "Preference-Applied" + } + + fn parse_header>(raw: &[T]) -> ::Result { + let preferences = try!(from_comma_delimited(raw)); + if !preferences.is_empty() { + Ok(PreferenceApplied(preferences)) + } else { + Err(::Error::Header) + } + } +} + +impl HeaderFormat for PreferenceApplied { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for PreferenceApplied { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + //TODO: format this without allocating a Vec and cloning contents + let preferences: Vec<_> = self.0.iter().map(|pref| match pref { + // The spec ignores parameters in `Preferences-Applied` + &Preference::Extension(ref name, ref value, _) => Preference::Extension( + name.to_owned(), + value.to_owned(), + vec![] + ), + preference @ _ => preference.clone() + }).collect(); + fmt_comma_delimited(f, &preferences) + } +} + +#[cfg(test)] +mod tests { + use header::{HeaderFormat, Preference}; + use super::*; + + #[test] + fn test_format_ignore_parameters() { + assert_eq!( + format!("{}", &PreferenceApplied(vec![Preference::Extension( + "foo".to_owned(), + "bar".to_owned(), + vec![("bar".to_owned(), "foo".to_owned()), ("buz".to_owned(), "".to_owned())] + )]) as &(HeaderFormat + Send + Sync)), + "foo=bar".to_owned() + ); + } +} + +bench_header!(normal, + PreferenceApplied, { vec![b"respond-async, return=representation".to_vec(), b"wait=100".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/range.rs b/vendor/hyper-0.10.16/src/header/common/range.rs new file mode 100644 index 0000000..2a67cb0 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/range.rs @@ -0,0 +1,288 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +use header::{Header, HeaderFormat}; +use header::parsing::{from_one_raw_str, from_comma_delimited}; + +/// `Range` header, defined in [RFC7233](https://tools.ietf.org/html/rfc7233#section-3.1) +/// +/// The "Range" header field on a GET request modifies the method +/// semantics to request transfer of only one or more subranges of the +/// selected representation data, rather than the entire selected +/// representation data. +/// +/// # ABNF +/// ```plain +/// Range = byte-ranges-specifier / other-ranges-specifier +/// other-ranges-specifier = other-range-unit "=" other-range-set +/// other-range-set = 1*VCHAR +/// +/// bytes-unit = "bytes" +/// +/// byte-ranges-specifier = bytes-unit "=" byte-range-set +/// byte-range-set = 1#(byte-range-spec / suffix-byte-range-spec) +/// byte-range-spec = first-byte-pos "-" [last-byte-pos] +/// first-byte-pos = 1*DIGIT +/// last-byte-pos = 1*DIGIT +/// ``` +/// +/// # Example values +/// * `bytes=1000-` +/// * `bytes=-2000` +/// * `bytes=0-1,30-40` +/// * `bytes=0-10,20-90,-100` +/// * `custom_unit=0-123` +/// * `custom_unit=xxx-yyy` +/// +/// # Examples +/// ``` +/// use hyper::header::{Headers, Range, ByteRangeSpec}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Range::Bytes( +/// vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::AllFrom(200)] +/// )); +/// +/// headers.clear(); +/// headers.set(Range::Unregistered("letters".to_owned(), "a-f".to_owned())); +/// ``` +/// ``` +/// use hyper::header::{Headers, Range}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Range::bytes(1, 100)); +/// +/// headers.clear(); +/// headers.set(Range::bytes_multi(vec![(1, 100), (200, 300)])); +/// ``` +#[derive(PartialEq, Clone, Debug)] +pub enum Range { + /// Byte range + Bytes(Vec), + /// Custom range, with unit not registered at IANA + /// (`other-range-unit`: String , `other-range-set`: String) + Unregistered(String, String) +} + +/// Each `Range::Bytes` header can contain one or more `ByteRangeSpecs`. +/// Each `ByteRangeSpec` defines a range of bytes to fetch +#[derive(PartialEq, Clone, Debug)] +pub enum ByteRangeSpec { + /// Get all bytes between x and y ("x-y") + FromTo(u64, u64), + /// Get all bytes starting from x ("x-") + AllFrom(u64), + /// Get last x bytes ("-x") + Last(u64) +} + +impl Range { + /// Get the most common byte range header ("bytes=from-to") + pub fn bytes(from: u64, to: u64) -> Range { + Range::Bytes(vec![ByteRangeSpec::FromTo(from, to)]) + } + + /// Get byte range header with multiple subranges + /// ("bytes=from1-to1,from2-to2,fromX-toX") + pub fn bytes_multi(ranges: Vec<(u64, u64)>) -> Range { + Range::Bytes(ranges.iter().map(|r| ByteRangeSpec::FromTo(r.0, r.1)).collect()) + } +} + + +impl fmt::Display for ByteRangeSpec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ByteRangeSpec::FromTo(from, to) => write!(f, "{}-{}", from, to), + ByteRangeSpec::Last(pos) => write!(f, "-{}", pos), + ByteRangeSpec::AllFrom(pos) => write!(f, "{}-", pos), + } + } +} + + +impl fmt::Display for Range { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Range::Bytes(ref ranges) => { + try!(write!(f, "bytes=")); + + for (i, range) in ranges.iter().enumerate() { + if i != 0 { + try!(f.write_str(",")); + } + try!(Display::fmt(range, f)); + } + Ok(()) + }, + Range::Unregistered(ref unit, ref range_str) => { + write!(f, "{}={}", unit, range_str) + }, + } + } +} + +impl FromStr for Range { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + let mut iter = s.splitn(2, "="); + + match (iter.next(), iter.next()) { + (Some("bytes"), Some(ranges)) => { + match from_comma_delimited(&[ranges]) { + Ok(ranges) => { + if ranges.is_empty() { + return Err(::Error::Header); + } + Ok(Range::Bytes(ranges)) + }, + Err(_) => Err(::Error::Header) + } + } + (Some(unit), Some(range_str)) if unit != "" && range_str != "" => { + Ok(Range::Unregistered(unit.to_owned(), range_str.to_owned())) + + }, + _ => Err(::Error::Header) + } + } +} + +impl FromStr for ByteRangeSpec { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + let mut parts = s.splitn(2, "-"); + + match (parts.next(), parts.next()) { + (Some(""), Some(end)) => { + end.parse().or(Err(::Error::Header)).map(ByteRangeSpec::Last) + }, + (Some(start), Some("")) => { + start.parse().or(Err(::Error::Header)).map(ByteRangeSpec::AllFrom) + }, + (Some(start), Some(end)) => { + match (start.parse(), end.parse()) { + (Ok(start), Ok(end)) if start <= end => Ok(ByteRangeSpec::FromTo(start, end)), + _ => Err(::Error::Header) + } + }, + _ => Err(::Error::Header) + } + } +} + +impl Header for Range { + + fn header_name() -> &'static str { + "Range" + } + + fn parse_header>(raw: &[T]) -> ::Result { + from_one_raw_str(raw) + } +} + +impl HeaderFormat for Range { + + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(self, f) + } + +} + +#[test] +fn test_parse_bytes_range_valid() { + let r: Range = Header::parse_header(&[b"bytes=1-100".to_vec()]).unwrap(); + let r2: Range = Header::parse_header(&[b"bytes=1-100,-".to_vec()]).unwrap(); + let r3 = Range::bytes(1, 100); + assert_eq!(r, r2); + assert_eq!(r2, r3); + + let r: Range = Header::parse_header(&[b"bytes=1-100,200-".to_vec()]).unwrap(); + let r2: Range = Header::parse_header(&[b"bytes= 1-100 , 101-xxx, 200- ".to_vec()]).unwrap(); + let r3 = Range::Bytes( + vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::AllFrom(200)] + ); + assert_eq!(r, r2); + assert_eq!(r2, r3); + + let r: Range = Header::parse_header(&[b"bytes=1-100,-100".to_vec()]).unwrap(); + let r2: Range = Header::parse_header(&[b"bytes=1-100, ,,-100".to_vec()]).unwrap(); + let r3 = Range::Bytes( + vec![ByteRangeSpec::FromTo(1, 100), ByteRangeSpec::Last(100)] + ); + assert_eq!(r, r2); + assert_eq!(r2, r3); + + let r: Range = Header::parse_header(&[b"custom=1-100,-100".to_vec()]).unwrap(); + let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned()); + assert_eq!(r, r2); + +} + +#[test] +fn test_parse_unregistered_range_valid() { + let r: Range = Header::parse_header(&[b"custom=1-100,-100".to_vec()]).unwrap(); + let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned()); + assert_eq!(r, r2); + + let r: Range = Header::parse_header(&[b"custom=abcd".to_vec()]).unwrap(); + let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned()); + assert_eq!(r, r2); + + let r: Range = Header::parse_header(&[b"custom=xxx-yyy".to_vec()]).unwrap(); + let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned()); + assert_eq!(r, r2); +} + +#[test] +fn test_parse_invalid() { + let r: ::Result = Header::parse_header(&[b"bytes=1-a,-".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"bytes=1-2-3".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"abc".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"bytes=1-100=".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"bytes=".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"custom=".to_vec()]); + assert_eq!(r.ok(), None); + + let r: ::Result = Header::parse_header(&[b"=1-100".to_vec()]); + assert_eq!(r.ok(), None); +} + +#[test] +fn test_fmt() { + use header::Headers; + + let mut headers = Headers::new(); + + headers.set( + Range::Bytes( + vec![ByteRangeSpec::FromTo(0, 1000), ByteRangeSpec::AllFrom(2000)] + )); + assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n"); + + headers.clear(); + headers.set(Range::Bytes(vec![])); + + assert_eq!(&headers.to_string(), "Range: bytes=\r\n"); + + headers.clear(); + headers.set(Range::Unregistered("custom".to_owned(), "1-xxx".to_owned())); + + assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n"); +} + +bench_header!(bytes_multi, Range, { vec![b"bytes=1-1001,2001-3001,10001-".to_vec()]}); +bench_header!(custom_unit, Range, { vec![b"other=0-100000".to_vec()]}); diff --git a/vendor/hyper-0.10.16/src/header/common/referer.rs b/vendor/hyper-0.10.16/src/header/common/referer.rs new file mode 100644 index 0000000..2c7bf14 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/referer.rs @@ -0,0 +1,41 @@ +header! { + /// `Referer` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.2) + /// + /// The `Referer` [sic] header field allows the user agent to specify a + /// URI reference for the resource from which the target URI was obtained + /// (i.e., the "referrer", though the field name is misspelled). A user + /// agent MUST NOT include the fragment and userinfo components of the + /// URI reference, if any, when generating the Referer field value. + /// + /// # ABNF + /// ```plain + /// Referer = absolute-URI / partial-URI + /// ``` + /// + /// # Example values + /// * `http://www.example.org/hypertext/Overview.html` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, Referer}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Referer("/People.html#tim".to_owned())); + /// ``` + /// ``` + /// use hyper::header::{Headers, Referer}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Referer("http://www.example.com/index.html".to_owned())); + /// ``` + // TODO Use URL + (Referer, "Referer") => [String] + + test_referer { + // Testcase from the RFC + test_header!(test1, vec![b"http://www.example.org/hypertext/Overview.html"]); + } +} + +bench_header!(bench, Referer, { vec![b"http://foo.com/hello:3000".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/referrer_policy.rs b/vendor/hyper-0.10.16/src/header/common/referrer_policy.rs new file mode 100644 index 0000000..5752960 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/referrer_policy.rs @@ -0,0 +1,121 @@ +use std::fmt; + +#[allow(unused_imports)] +use std::ascii::AsciiExt; + +use header::{Header, HeaderFormat, parsing}; + +/// `Referrer-Policy` header, part of +/// [Referrer Policy](https://www.w3.org/TR/referrer-policy/#referrer-policy-header) +/// +/// The `Referrer-Policy` HTTP header specifies the referrer +/// policy that the user agent applies when determining what +/// referrer information should be included with requests made, +/// and with browsing contexts created from the context of the +/// protected resource. +/// +/// # ABNF +/// ```plain +/// Referrer-Policy: 1#policy-token +/// policy-token = "no-referrer" / "no-referrer-when-downgrade" +/// / "same-origin" / "origin" +/// / "origin-when-cross-origin" / "unsafe-url" +/// ``` +/// +/// # Example values +/// * `no-referrer` +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, ReferrerPolicy}; +/// +/// let mut headers = Headers::new(); +/// headers.set(ReferrerPolicy::NoReferrer); +/// ``` +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum ReferrerPolicy { + /// `no-referrer` + NoReferrer, + /// `no-referrer-when-downgrade` + NoReferrerWhenDowngrade, + /// `same-origin` + SameOrigin, + /// `origin` + Origin, + /// `origin-when-cross-origin` + OriginWhenCrossOrigin, + /// `unsafe-url` + UnsafeUrl, + /// `strict-origin` + StrictOrigin, + ///`strict-origin-when-cross-origin` + StrictOriginWhenCrossOrigin, +} + +impl Header for ReferrerPolicy { + fn header_name() -> &'static str { + static NAME: &'static str = "Referrer-Policy"; + NAME + } + + fn parse_header>(raw: &[T]) -> ::Result { + use self::ReferrerPolicy::*; + // See https://www.w3.org/TR/referrer-policy/#determine-policy-for-token + let headers: Vec = try!(parsing::from_comma_delimited(raw)); + + for h in headers.iter().rev() { + let slice = &h.to_ascii_lowercase()[..]; + match slice { + "no-referrer" | "never" => return Ok(NoReferrer), + "no-referrer-when-downgrade" | "default" => return Ok(NoReferrerWhenDowngrade), + "same-origin" => return Ok(SameOrigin), + "origin" => return Ok(Origin), + "origin-when-cross-origin" => return Ok(OriginWhenCrossOrigin), + "strict-origin" => return Ok(StrictOrigin), + "strict-origin-when-cross-origin" => return Ok(StrictOriginWhenCrossOrigin), + "unsafe-url" | "always" => return Ok(UnsafeUrl), + _ => continue, + } + } + + Err(::Error::Header) + } +} + +impl HeaderFormat for ReferrerPolicy { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for ReferrerPolicy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::ReferrerPolicy::*; + f.write_str(match *self { + NoReferrer => "no-referrer", + NoReferrerWhenDowngrade => "no-referrer-when-downgrade", + SameOrigin => "same-origin", + Origin => "origin", + OriginWhenCrossOrigin => "origin-when-cross-origin", + StrictOrigin => "strict-origin", + StrictOriginWhenCrossOrigin => "strict-origin-when-cross-origin", + UnsafeUrl => "unsafe-url", + }) + } +} + +#[test] +fn test_parse_header() { + let a: ReferrerPolicy = Header::parse_header([b"origin".to_vec()].as_ref()).unwrap(); + let b = ReferrerPolicy::Origin; + assert_eq!(a, b); + let e: ::Result = Header::parse_header([b"foobar".to_vec()].as_ref()); + assert!(e.is_err()); +} + +#[test] +fn test_rightmost_header() { + let a: ReferrerPolicy = Header::parse_header(&["same-origin, origin, foobar".into()]).unwrap(); + let b = ReferrerPolicy::Origin; + assert_eq!(a, b); +} diff --git a/vendor/hyper-0.10.16/src/header/common/server.rs b/vendor/hyper-0.10.16/src/header/common/server.rs new file mode 100644 index 0000000..bb661cf --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/server.rs @@ -0,0 +1,55 @@ +/// `Server` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.2) +/// +/// The `Server` header field contains information about the software +/// used by the origin server to handle the request, which is often used +/// by clients to help identify the scope of reported interoperability +/// problems, to work around or tailor requests to avoid particular +/// server limitations, and for analytics regarding server or operating +/// system use. An origin server MAY generate a Server field in its +/// responses. +/// +/// # ABNF +/// ```plain +/// Server = product *( RWS ( product / comment ) ) +/// ``` +/// +/// # Example values +/// * `CERN/3.0 libwww/2.17` +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, Server}; +/// +/// let mut headers = Headers::new(); +/// headers.set(Server("hyper/0.5.2".to_owned())); +/// ``` +// TODO: Maybe parse as defined in the spec? +#[derive(Clone, Debug, PartialEq)] +pub struct Server(pub std::borrow::Cow<'static, str>); +impl ::header::Header for Server { + fn header_name() -> &'static str { + "Server" + } + fn parse_header>(raw: &[T]) -> ::Result { + ::header::parsing::from_one_raw_str(raw).map(std::borrow::Cow::Owned).map(Server) + } +} +impl ::header::HeaderFormat for Server { + fn fmt_header(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::std::fmt::Display::fmt(&**self, f) + } +} +impl ::std::fmt::Display for Server { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + ::std::fmt::Display::fmt(&**self, f) + } +} +impl ::std::ops::Deref for Server { + type Target = str; + + fn deref(&self) -> &str { + &self.0 + } +} + +bench_header!(bench, Server, { vec![b"Some String".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/set_cookie.rs b/vendor/hyper-0.10.16/src/header/common/set_cookie.rs new file mode 100644 index 0000000..5966255 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/set_cookie.rs @@ -0,0 +1,121 @@ +use header::{Header, HeaderFormat}; +use std::fmt::{self}; +use std::str::from_utf8; + + +/// `Set-Cookie` header, defined [RFC6265](http://tools.ietf.org/html/rfc6265#section-4.1) +/// +/// The Set-Cookie HTTP response header is used to send cookies from the +/// server to the user agent. +/// +/// Informally, the Set-Cookie response header contains the header name +/// "Set-Cookie" followed by a ":" and a cookie. Each cookie begins with +/// a name-value-pair, followed by zero or more attribute-value pairs. +/// +/// # ABNF +/// ```plain +/// set-cookie-header = "Set-Cookie:" SP set-cookie-string +/// set-cookie-string = cookie-pair *( ";" SP cookie-av ) +/// cookie-pair = cookie-name "=" cookie-value +/// cookie-name = token +/// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) +/// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E +/// ; US-ASCII characters excluding CTLs, +/// ; whitespace DQUOTE, comma, semicolon, +/// ; and backslash +/// token = +/// +/// cookie-av = expires-av / max-age-av / domain-av / +/// path-av / secure-av / httponly-av / +/// extension-av +/// expires-av = "Expires=" sane-cookie-date +/// sane-cookie-date = +/// max-age-av = "Max-Age=" non-zero-digit *DIGIT +/// ; In practice, both expires-av and max-age-av +/// ; are limited to dates representable by the +/// ; user agent. +/// non-zero-digit = %x31-39 +/// ; digits 1 through 9 +/// domain-av = "Domain=" domain-value +/// domain-value = +/// ; defined in [RFC1034], Section 3.5, as +/// ; enhanced by [RFC1123], Section 2.1 +/// path-av = "Path=" path-value +/// path-value = +/// secure-av = "Secure" +/// httponly-av = "HttpOnly" +/// extension-av = +/// ``` +/// +/// # Example values +/// * `SID=31d4d96e407aad42` +/// * `lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT` +/// * `lang=; Expires=Sun, 06 Nov 1994 08:49:37 GMT` +/// * `lang=en-US; Path=/; Domain=example.com` +/// +/// # Example +/// ``` +/// use hyper::header::{Headers, SetCookie}; +/// +/// let mut headers = Headers::new(); +/// +/// headers.set( +/// SetCookie(vec![ +/// String::from("foo=bar; Path=/path; Domain=example.com") +/// ]) +/// ); +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct SetCookie(pub Vec); + +__hyper__deref!(SetCookie => Vec); + +impl Header for SetCookie { + fn header_name() -> &'static str { + "Set-Cookie" + } + + fn parse_header>(raw: &[T]) -> ::Result { + let mut set_cookies = Vec::with_capacity(raw.len()); + for set_cookies_raw in raw { + if let Ok(s) = from_utf8(set_cookies_raw.as_ref()) { + set_cookies.push(s.trim().to_owned()); + } + } + + if !set_cookies.is_empty() { + Ok(SetCookie(set_cookies)) + } else { + Err(::Error::Header) + } + } + +} + +impl HeaderFormat for SetCookie { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.0.len() == 1 { + write!(f, "{}", &self.0[0]) + } else { + panic!("SetCookie with multiple cookies cannot be used with fmt_header, must use fmt_multi_header"); + } + } + + fn fmt_multi_header(&self, f: &mut ::header::MultilineFormatter) -> fmt::Result { + for cookie in &self.0 { + try!(f.fmt_line(cookie)); + } + Ok(()) + } +} + +#[test] +fn test_set_cookie_fmt() { + use ::header::Headers; + let mut headers = Headers::new(); + headers.set(SetCookie(vec![ + "foo=bar".into(), + "baz=quux".into(), + ])); + assert_eq!(headers.to_string(), "Set-Cookie: foo=bar\r\nSet-Cookie: baz=quux\r\n"); +} diff --git a/vendor/hyper-0.10.16/src/header/common/strict_transport_security.rs b/vendor/hyper-0.10.16/src/header/common/strict_transport_security.rs new file mode 100644 index 0000000..5fbc622 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/strict_transport_security.rs @@ -0,0 +1,201 @@ +use std::fmt; +use std::str::{self, FromStr}; + +use unicase::UniCase; + +use header::{Header, HeaderFormat, parsing}; + +/// `StrictTransportSecurity` header, defined in [RFC6797](https://tools.ietf.org/html/rfc6797) +/// +/// This specification defines a mechanism enabling web sites to declare +/// themselves accessible only via secure connections and/or for users to be +/// able to direct their user agent(s) to interact with given sites only over +/// secure connections. This overall policy is referred to as HTTP Strict +/// Transport Security (HSTS). The policy is declared by web sites via the +/// Strict-Transport-Security HTTP response header field and/or by other means, +/// such as user agent configuration, for example. +/// +/// # ABNF +/// +/// ```plain +/// [ directive ] *( ";" [ directive ] ) +/// +/// directive = directive-name [ "=" directive-value ] +/// directive-name = token +/// directive-value = token | quoted-string +/// +/// ``` +/// +/// # Example values +/// * `max-age=31536000` +/// * `max-age=15768000 ; includeSubDomains` +/// +/// # Example +/// ``` +/// # extern crate hyper; +/// # fn main() { +/// use hyper::header::{Headers, StrictTransportSecurity}; +/// +/// let mut headers = Headers::new(); +/// +/// headers.set( +/// StrictTransportSecurity::including_subdomains(31536000u64) +/// ); +/// # } +/// ``` +#[derive(Clone, PartialEq, Debug)] +pub struct StrictTransportSecurity { + /// Signals the UA that the HSTS Policy applies to this HSTS Host as well as + /// any subdomains of the host's domain name. + pub include_subdomains: bool, + + /// Specifies the number of seconds, after the reception of the STS header + /// field, during which the UA regards the host (from whom the message was + /// received) as a Known HSTS Host. + pub max_age: u64 +} + +impl StrictTransportSecurity { + /// Create an STS header that includes subdomains + pub fn including_subdomains(max_age: u64) -> StrictTransportSecurity { + StrictTransportSecurity { + max_age: max_age, + include_subdomains: true + } + } + + /// Create an STS header that excludes subdomains + pub fn excluding_subdomains(max_age: u64) -> StrictTransportSecurity { + StrictTransportSecurity { + max_age: max_age, + include_subdomains: false + } + } +} + +enum Directive { + MaxAge(u64), + IncludeSubdomains, + Unknown +} + +impl FromStr for StrictTransportSecurity { + type Err = ::Error; + + fn from_str(s: &str) -> ::Result { + s.split(';') + .map(str::trim) + .map(|sub| if UniCase(sub) == UniCase("includeSubdomains") { + Ok(Directive::IncludeSubdomains) + } else { + let mut sub = sub.splitn(2, '='); + match (sub.next(), sub.next()) { + (Some(left), Some(right)) + if UniCase(left.trim()) == UniCase("max-age") => { + right + .trim() + .trim_matches('"') + .parse() + .map(Directive::MaxAge) + }, + _ => Ok(Directive::Unknown) + } + }) + .fold(Ok((None, None)), |res, dir| match (res, dir) { + (Ok((None, sub)), Ok(Directive::MaxAge(age))) => Ok((Some(age), sub)), + (Ok((age, None)), Ok(Directive::IncludeSubdomains)) => Ok((age, Some(()))), + (Ok((Some(_), _)), Ok(Directive::MaxAge(_))) => Err(::Error::Header), + (Ok((_, Some(_))), Ok(Directive::IncludeSubdomains)) => Err(::Error::Header), + (_, Err(_)) => Err(::Error::Header), + (res, _) => res + }) + .and_then(|res| match res { + (Some(age), sub) => Ok(StrictTransportSecurity { + max_age: age, + include_subdomains: sub.is_some() + }), + _ => Err(::Error::Header) + }) + } +} + +impl Header for StrictTransportSecurity { + fn header_name() -> &'static str { + "Strict-Transport-Security" + } + + fn parse_header>(raw: &[T]) -> ::Result { + parsing::from_one_raw_str(raw) + } +} + +impl HeaderFormat for StrictTransportSecurity { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for StrictTransportSecurity { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.include_subdomains { + write!(f, "max-age={}; includeSubdomains", self.max_age) + } else { + write!(f, "max-age={}", self.max_age) + } + } +} + +#[cfg(test)] +mod tests { + use super::StrictTransportSecurity; + use header::Header; + + #[test] + fn test_parse_max_age() { + let h = Header::parse_header(&[b"max-age=31536000".to_vec()][..]); + assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); + } + + #[test] + fn test_parse_max_age_no_value() { + let h: ::Result = Header::parse_header(&[b"max-age".to_vec()][..]); + assert!(h.is_err()); + } + + #[test] + fn test_parse_quoted_max_age() { + let h = Header::parse_header(&[b"max-age=\"31536000\"".to_vec()][..]); + assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); + } + + #[test] + fn test_parse_spaces_max_age() { + let h = Header::parse_header(&[b"max-age = 31536000".to_vec()][..]); + assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: false, max_age: 31536000u64 })); + } + + #[test] + fn test_parse_include_subdomains() { + let h = Header::parse_header(&[b"max-age=15768000 ; includeSubDomains".to_vec()][..]); + assert_eq!(h.ok(), Some(StrictTransportSecurity { include_subdomains: true, max_age: 15768000u64 })); + } + + #[test] + fn test_parse_no_max_age() { + let h: ::Result = Header::parse_header(&[b"includeSubDomains".to_vec()][..]); + assert!(h.is_err()); + } + + #[test] + fn test_parse_max_age_nan() { + let h: ::Result = Header::parse_header(&[b"max-age = derp".to_vec()][..]); + assert!(h.is_err()); + } + + #[test] + fn test_parse_duplicate_directives() { + assert!(StrictTransportSecurity::parse_header(&[b"max-age=100; max-age=5; max-age=0".to_vec()][..]).is_err()); + } +} + +bench_header!(bench, StrictTransportSecurity, { vec![b"max-age=15768000 ; includeSubDomains".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/transfer_encoding.rs b/vendor/hyper-0.10.16/src/header/common/transfer_encoding.rs new file mode 100644 index 0000000..13ae54a --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/transfer_encoding.rs @@ -0,0 +1,53 @@ +use header::Encoding; + +header! { + /// `Transfer-Encoding` header, defined in + /// [RFC7230](http://tools.ietf.org/html/rfc7230#section-3.3.1) + /// + /// The `Transfer-Encoding` header field lists the transfer coding names + /// corresponding to the sequence of transfer codings that have been (or + /// will be) applied to the payload body in order to form the message + /// body. + /// + /// # ABNF + /// ```plain + /// Transfer-Encoding = 1#transfer-coding + /// ``` + /// + /// # Example values + /// * `gzip, chunked` + /// + /// # Example + /// ``` + /// use hyper::header::{Headers, TransferEncoding, Encoding}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// TransferEncoding(vec![ + /// Encoding::Gzip, + /// Encoding::Chunked, + /// ]) + /// ); + /// ``` + (TransferEncoding, "Transfer-Encoding") => (Encoding)+ + + transfer_encoding { + test_header!( + test1, + vec![b"gzip, chunked"], + Some(HeaderField( + vec![Encoding::Gzip, Encoding::Chunked] + ))); + // Issue: #683 + test_header!( + test2, + vec![b"chunked", b"chunked"], + Some(HeaderField( + vec![Encoding::Chunked, Encoding::Chunked] + ))); + + } +} + +bench_header!(normal, TransferEncoding, { vec![b"chunked, gzip".to_vec()] }); +bench_header!(ext, TransferEncoding, { vec![b"ext".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/upgrade.rs b/vendor/hyper-0.10.16/src/header/common/upgrade.rs new file mode 100644 index 0000000..7f2a613 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/upgrade.rs @@ -0,0 +1,158 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; +use unicase::UniCase; + +header! { + /// `Upgrade` header, defined in [RFC7230](http://tools.ietf.org/html/rfc7230#section-6.7) + /// + /// The `Upgrade` header field is intended to provide a simple mechanism + /// for transitioning from HTTP/1.1 to some other protocol on the same + /// connection. A client MAY send a list of protocols in the Upgrade + /// header field of a request to invite the server to switch to one or + /// more of those protocols, in order of descending preference, before + /// sending the final response. A server MAY ignore a received Upgrade + /// header field if it wishes to continue using the current protocol on + /// that connection. Upgrade cannot be used to insist on a protocol + /// change. + /// + /// # ABNF + /// ```plain + /// Upgrade = 1#protocol + /// + /// protocol = protocol-name ["/" protocol-version] + /// protocol-name = token + /// protocol-version = token + /// ``` + /// + /// # Example values + /// * `HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11` + /// + /// # Examples + /// ``` + /// use hyper::header::{Headers, Upgrade, Protocol, ProtocolName}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)])); + /// ``` + /// ``` + /// use hyper::header::{Headers, Upgrade, Protocol, ProtocolName}; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// Upgrade(vec![ + /// Protocol::new(ProtocolName::Http, Some("2.0".to_owned())), + /// Protocol::new(ProtocolName::Unregistered("SHTTP".to_owned()), + /// Some("1.3".to_owned())), + /// Protocol::new(ProtocolName::Unregistered("IRC".to_owned()), + /// Some("6.9".to_owned())), + /// ]) + /// ); + /// ``` + (Upgrade, "Upgrade") => (Protocol)+ + + test_upgrade { + // Testcase from the RFC + test_header!( + test1, + vec![b"HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11"], + Some(Upgrade(vec![ + Protocol::new(ProtocolName::Http, Some("2.0".to_owned())), + Protocol::new(ProtocolName::Unregistered("SHTTP".to_owned()), + Some("1.3".to_owned())), + Protocol::new(ProtocolName::Unregistered("IRC".to_owned()), Some("6.9".to_owned())), + Protocol::new(ProtocolName::Unregistered("RTA".to_owned()), Some("x11".to_owned())), + ]))); + // Own tests + test_header!( + test2, vec![b"websocket"], + Some(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)]))); + #[test] + fn test3() { + let x: ::Result = Header::parse_header(&[b"WEbSOCKet".to_vec()]); + assert_eq!(x.ok(), Some(Upgrade(vec![Protocol::new(ProtocolName::WebSocket, None)]))); + } + } +} + +/// A protocol name used to identify a spefic protocol. Names are case-sensitive +/// except for the `WebSocket` value. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ProtocolName { + /// `HTTP` value, Hypertext Transfer Protocol + Http, + /// `TLS` value, Transport Layer Security [RFC2817](http://tools.ietf.org/html/rfc2817) + Tls, + /// `WebSocket` value, matched case insensitively,Web Socket Protocol + /// [RFC6455](http://tools.ietf.org/html/rfc6455) + WebSocket, + /// `h2c` value, HTTP/2 over cleartext TCP + H2c, + /// Any other protocol name not known to hyper + Unregistered(String), +} + +impl FromStr for ProtocolName { + type Err = (); + fn from_str(s: &str) -> Result { + Ok(match s { + "HTTP" => ProtocolName::Http, + "TLS" => ProtocolName::Tls, + "h2c" => ProtocolName::H2c, + _ => { + if UniCase(s) == UniCase("websocket") { + ProtocolName::WebSocket + } else { + ProtocolName::Unregistered(s.to_owned()) + } + } + }) + } +} + +impl Display for ProtocolName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + ProtocolName::Http => "HTTP", + ProtocolName::Tls => "TLS", + ProtocolName::WebSocket => "websocket", + ProtocolName::H2c => "h2c", + ProtocolName::Unregistered(ref s) => s, + }) + } +} + +/// Protocols that appear in the `Upgrade` header field +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Protocol { + /// The protocol identifier + pub name: ProtocolName, + /// The optional version of the protocol, often in the format "DIGIT.DIGIT" (e.g.. "1.2") + pub version: Option, +} + +impl Protocol { + /// Creates a new Protocol with the given name and version + pub fn new(name: ProtocolName, version: Option) -> Protocol { + Protocol { name: name, version: version } + } +} + +impl FromStr for Protocol { + type Err =(); + fn from_str(s: &str) -> Result { + let mut parts = s.splitn(2, '/'); + Ok(Protocol::new(try!(parts.next().unwrap().parse()), parts.next().map(|x| x.to_owned()))) + } +} + +impl Display for Protocol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(fmt::Display::fmt(&self.name, f)); + if let Some(ref version) = self.version { + try!(write!(f, "/{}", version)); + } + Ok(()) + } +} + +bench_header!(bench, Upgrade, { vec![b"HTTP/2.0, RTA/x11, websocket".to_vec()] }); diff --git a/vendor/hyper-0.10.16/src/header/common/user_agent.rs b/vendor/hyper-0.10.16/src/header/common/user_agent.rs new file mode 100644 index 0000000..90e5bd0 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/user_agent.rs @@ -0,0 +1,42 @@ +header! { + /// `User-Agent` header, defined in + /// [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.5.3) + /// + /// The `User-Agent` header field contains information about the user + /// agent originating the request, which is often used by servers to help + /// identify the scope of reported interoperability problems, to work + /// around or tailor responses to avoid particular user agent + /// limitations, and for analytics regarding browser or operating system + /// use. A user agent SHOULD send a User-Agent field in each request + /// unless specifically configured not to do so. + /// + /// # ABNF + /// ```plain + /// User-Agent = product *( RWS ( product / comment ) ) + /// product = token ["/" product-version] + /// product-version = token + /// ``` + /// + /// # Example values + /// * `CERN-LineMode/2.15 libwww/2.17b3` + /// * `Bunnies` + /// + /// # Notes + /// * The parser does not split the value + /// + /// # Example + /// ``` + /// use hyper::header::{Headers, UserAgent}; + /// + /// let mut headers = Headers::new(); + /// headers.set(UserAgent("hyper/0.5.2".to_owned())); + /// ``` + (UserAgent, "User-Agent") => [String] + + test_user_agent { + // Testcase from RFC + test_header!(test1, vec![b"CERN-LineMode/2.15 libwww/2.17b3"]); + // Own testcase + test_header!(test2, vec![b"Bunnies"], Some(UserAgent("Bunnies".to_owned()))); + } +} diff --git a/vendor/hyper-0.10.16/src/header/common/vary.rs b/vendor/hyper-0.10.16/src/header/common/vary.rs new file mode 100644 index 0000000..d9113e9 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/common/vary.rs @@ -0,0 +1,66 @@ +use unicase::UniCase; + +header! { + /// `Vary` header, defined in [RFC7231](https://tools.ietf.org/html/rfc7231#section-7.1.4) + /// + /// The "Vary" header field in a response describes what parts of a + /// request message, aside from the method, Host header field, and + /// request target, might influence the origin server's process for + /// selecting and representing this response. The value consists of + /// either a single asterisk ("*") or a list of header field names + /// (case-insensitive). + /// + /// # ABNF + /// ```plain + /// Vary = "*" / 1#field-name + /// ``` + /// + /// # Example values + /// * `accept-encoding, accept-language` + /// + /// # Example + /// ``` + /// use hyper::header::{Headers, Vary}; + /// + /// let mut headers = Headers::new(); + /// headers.set(Vary::Any); + /// ``` + /// + /// # Example + /// ``` + /// # extern crate hyper; + /// # extern crate unicase; + /// # fn main() { + /// // extern crate unicase; + /// + /// use hyper::header::{Headers, Vary}; + /// use unicase::UniCase; + /// + /// let mut headers = Headers::new(); + /// headers.set( + /// Vary::Items(vec![ + /// UniCase("accept-encoding".to_owned()), + /// UniCase("accept-language".to_owned()), + /// ]) + /// ); + /// # } + /// ``` + (Vary, "Vary") => {Any / (UniCase)+} + + test_vary { + test_header!(test1, vec![b"accept-encoding, accept-language"]); + + #[test] + fn test2() { + let mut vary: ::Result; + + vary = Header::parse_header([b"*".to_vec()].as_ref()); + assert_eq!(vary.ok(), Some(Vary::Any)); + + vary = Header::parse_header([b"etag,cookie,allow".to_vec()].as_ref()); + assert_eq!(vary.ok(), Some(Vary::Items(vec!["eTag".parse().unwrap(), + "cookIE".parse().unwrap(), + "AlLOw".parse().unwrap(),]))); + } + } +} diff --git a/vendor/hyper-0.10.16/src/header/internals/cell.rs b/vendor/hyper-0.10.16/src/header/internals/cell.rs new file mode 100644 index 0000000..fd15e1c --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/internals/cell.rs @@ -0,0 +1,204 @@ +use std::any::{Any, TypeId}; +use std::cell::UnsafeCell; +use self::super::VecMap; +use std::fmt; +use std::mem; +use std::ops::Deref; + +pub struct OptCell(UnsafeCell>); + +impl OptCell { + #[inline] + pub fn new(val: Option) -> OptCell { + OptCell(UnsafeCell::new(val)) + } + + #[inline] + pub fn set(&self, val: T) { + unsafe { + let opt = self.0.get(); + debug_assert!((*opt).is_none()); + *opt = Some(val) + } + } + + #[inline] + pub unsafe fn get_mut(&mut self) -> &mut T { + let opt = &mut *self.0.get(); + opt.as_mut().unwrap() + } +} + +impl Deref for OptCell { + type Target = Option; + #[inline] + fn deref(&self) -> &Option { + unsafe { &*self.0.get() } + } +} + +impl Clone for OptCell { + #[inline] + fn clone(&self) -> OptCell { + OptCell::new((**self).clone()) + } +} + +pub struct PtrMapCell(UnsafeCell>>); + +#[derive(Clone, Debug)] +enum PtrMap { + Empty, + One(TypeId, T), + Many(VecMap) +} + +impl PtrMapCell { + #[inline] + pub fn new() -> PtrMapCell { + PtrMapCell(UnsafeCell::new(PtrMap::Empty)) + } + + #[inline] + pub fn get(&self, key: TypeId) -> Option<&V> { + let map = unsafe { &*self.0.get() }; + match *map { + PtrMap::Empty => None, + PtrMap::One(id, ref v) => if id == key { + Some(v) + } else { + None + }, + PtrMap::Many(ref hm) => hm.get(&key) + }.map(|val| &**val) + } + + #[inline] + pub fn get_mut(&mut self, key: TypeId) -> Option<&mut V> { + let map = unsafe { &mut *self.0.get() }; + match *map { + PtrMap::Empty => None, + PtrMap::One(id, ref mut v) => if id == key { + Some(v) + } else { + None + }, + PtrMap::Many(ref mut hm) => hm.get_mut(&key) + }.map(|val| &mut **val) + } + + #[inline] + pub unsafe fn insert(&self, key: TypeId, val: Box) { + let map = &mut *self.0.get(); + match *map { + PtrMap::Empty => *map = PtrMap::One(key, val), + PtrMap::One(..) => { + let one = mem::replace(map, PtrMap::Empty); + match one { + PtrMap::One(id, one) => { + debug_assert!(id != key); + let mut hm = VecMap::with_capacity(2); + hm.insert(id, one); + hm.insert(key, val); + mem::replace(map, PtrMap::Many(hm)); + }, + _ => unsafe { std::hint::unreachable_unchecked(); }, + } + }, + PtrMap::Many(ref mut hm) => { hm.insert(key, val); } + } + } + + #[inline] + pub unsafe fn one(&self) -> &V { + let map = &*self.0.get(); + match *map { + PtrMap::One(_, ref one) => one, + _ => panic!("not PtrMap::One value, {:?}", *map) + } + } +} + +impl Clone for PtrMapCell where Box: Clone { + #[inline] + fn clone(&self) -> PtrMapCell { + let cell = PtrMapCell::new(); + unsafe { + *cell.0.get() = (&*self.0.get()).clone() + } + cell + } +} + +#[cfg(test)] +mod test { + use std::any::TypeId; + use super::*; + + #[test] + fn test_opt_cell_set() { + let one:OptCell = OptCell::new(None); + one.set(1); + assert_eq!(*one,Some(1)); + } + + #[test] + fn test_opt_cell_clone() { + let one:OptCell = OptCell::new(Some(3)); + let stored = *one.clone(); + assert_eq!(stored,Some(3)); + } + + + #[test] + fn test_ptr_map_cell_none() { + let type_id = TypeId::of::(); + let pm:PtrMapCell = PtrMapCell::new(); + assert_eq!(pm.get(type_id),None); + } + + #[test] + fn test_ptr_map_cell_one() { + let type_id = TypeId::of::(); + let pm:PtrMapCell = PtrMapCell::new(); + unsafe { pm.insert(type_id, Box::new("a".to_string())); } + assert_eq!(pm.get(type_id), Some(&"a".to_string())); + assert_eq!(unsafe {pm.one()}, "a"); + } + + #[test] + fn test_ptr_map_cell_two() { + let type_id = TypeId::of::(); + let type_id2 = TypeId::of::>(); + let pm:PtrMapCell = PtrMapCell::new(); + unsafe { pm.insert(type_id, Box::new("a".to_string())); } + unsafe { pm.insert(type_id2, Box::new("b".to_string())); } + assert_eq!(pm.get(type_id), Some(&"a".to_string())); + assert_eq!(pm.get(type_id2), Some(&"b".to_string())); + } + + #[test] + fn test_ptr_map_cell_many() { + let id1 = TypeId::of::(); + let id2 = TypeId::of::>(); + let id3 = TypeId::of::>(); + let pm:PtrMapCell = PtrMapCell::new(); + unsafe { pm.insert(id1, Box::new("a".to_string())); } + unsafe { pm.insert(id2, Box::new("b".to_string())); } + unsafe { pm.insert(id3, Box::new("c".to_string())); } + assert_eq!(pm.get(id1), Some(&"a".to_string())); + assert_eq!(pm.get(id2), Some(&"b".to_string())); + assert_eq!(pm.get(id3), Some(&"c".to_string())); + } + + + #[test] + fn test_ptr_map_cell_clone() { + let type_id = TypeId::of::(); + let pm:PtrMapCell = PtrMapCell::new(); + unsafe { pm.insert(type_id, Box::new("a".to_string())); } + let cloned = pm.clone(); + assert_eq!(cloned.get(type_id), Some(&"a".to_string())); + } + +} diff --git a/vendor/hyper-0.10.16/src/header/internals/item.rs b/vendor/hyper-0.10.16/src/header/internals/item.rs new file mode 100644 index 0000000..c5cd74f --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/internals/item.rs @@ -0,0 +1,122 @@ +use std::any::Any; +use std::any::TypeId; +use std::fmt; +use std::borrow::Cow; +use std::str::from_utf8; + +use super::cell::{OptCell, PtrMapCell}; +use header::{Header, HeaderFormat, MultilineFormatter}; + + +#[derive(Clone)] +pub struct Item { + raw: OptCell>>, + typed: PtrMapCell +} + +impl Item { + #[inline] + pub fn new_raw(data: Vec>) -> Item { + Item { + raw: OptCell::new(Some(data)), + typed: PtrMapCell::new(), + } + } + + #[inline] + pub fn new_typed(ty: Box) -> Item { + let map = PtrMapCell::new(); + unsafe { map.insert((*ty).get_type(), ty); } + Item { + raw: OptCell::new(None), + typed: map, + } + } + + #[inline] + pub fn raw_mut(&mut self) -> &mut Vec> { + self.raw(); + self.typed = PtrMapCell::new(); + unsafe { + self.raw.get_mut() + } + } + + pub fn raw(&self) -> &[Cow<'static, [u8]>] { + if let Some(ref raw) = *self.raw { + return &raw[..]; + } + + let raw = vec![unsafe { self.typed.one() }.to_string().into_bytes().into()]; + self.raw.set(raw); + + let raw = self.raw.as_ref().unwrap(); + &raw[..] + } + + pub fn typed(&self) -> Option<&H> { + let tid = TypeId::of::(); + match self.typed.get(tid) { + Some(val) => Some(val), + None => { + match parse::(self.raw.as_ref().expect("item.raw must exist")) { + Ok(typed) => { + unsafe { self.typed.insert(tid, typed); } + self.typed.get(tid) + }, + Err(_) => None + } + } + }.map(|typed| unsafe { typed.downcast_ref_unchecked() }) + } + + pub fn typed_mut(&mut self) -> Option<&mut H> { + let tid = TypeId::of::(); + if self.typed.get_mut(tid).is_none() { + match parse::(self.raw.as_ref().expect("item.raw must exist")) { + Ok(typed) => { + unsafe { self.typed.insert(tid, typed); } + }, + Err(_) => () + } + } + if self.raw.is_some() && self.typed.get_mut(tid).is_some() { + self.raw = OptCell::new(None); + } + self.typed.get_mut(tid).map(|typed| unsafe { typed.downcast_mut_unchecked() }) + } + + pub fn write_h1(&self, f: &mut MultilineFormatter) -> fmt::Result { + match *self.raw { + Some(ref raw) => { + for part in raw.iter() { + match from_utf8(&part[..]) { + Ok(s) => { + try!(f.fmt_line(&s)); + }, + Err(_) => { + error!("raw header value is not utf8, value={:?}", part); + return Err(fmt::Error); + } + } + } + Ok(()) + }, + None => { + let typed = unsafe { self.typed.one() }; + typed.fmt_multi_header(f) + } + } + } +} + +#[inline] +fn parse(raw: &Vec>) -> + ::Result> { + Header::parse_header(&raw[..]).map(|h: H| { + // FIXME: Use Type ascription + let h: Box = Box::new(h); + h + }) +} + diff --git a/vendor/hyper-0.10.16/src/header/internals/mod.rs b/vendor/hyper-0.10.16/src/header/internals/mod.rs new file mode 100644 index 0000000..89a655d --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/internals/mod.rs @@ -0,0 +1,6 @@ +pub use self::item::Item; +pub use self::vec_map::{VecMap, Entry}; + +mod cell; +mod item; +mod vec_map; diff --git a/vendor/hyper-0.10.16/src/header/internals/vec_map.rs b/vendor/hyper-0.10.16/src/header/internals/vec_map.rs new file mode 100644 index 0000000..a70937b --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/internals/vec_map.rs @@ -0,0 +1,95 @@ +#[derive(Clone, Debug)] +pub struct VecMap { + vec: Vec<(K, V)>, +} + +impl VecMap { + pub fn new() -> VecMap { + VecMap { + vec: Vec::new() + } + } + + pub fn with_capacity(cap: usize) -> VecMap { + VecMap { + vec: Vec::with_capacity(cap) + } + } + + pub fn insert(&mut self, key: K, value: V) { + match self.find(&key) { + Some(pos) => self.vec[pos] = (key, value), + None => self.vec.push((key, value)) + } + } + + pub fn entry(&mut self, key: K) -> Entry { + match self.find(&key) { + Some(pos) => Entry::Occupied(OccupiedEntry { + vec: self, + pos: pos, + }), + None => Entry::Vacant(VacantEntry { + vec: self, + key: key, + }) + } + } + + pub fn get(&self, key: &K) -> Option<&V> { + self.find(key).map(move |pos| &self.vec[pos].1) + } + + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + self.find(key).map(move |pos| &mut self.vec[pos].1) + } + + pub fn contains_key(&self, key: &K) -> bool { + self.find(key).is_some() + } + + pub fn len(&self) -> usize { self.vec.len() } + pub fn iter(&self) -> ::std::slice::Iter<(K, V)> { + self.vec.iter() + } + pub fn remove(&mut self, key: &K) -> Option { + self.find(key).map(|pos| self.vec.remove(pos)).map(|(_, v)| v) + } + pub fn clear(&mut self) { + self.vec.clear(); + } + + fn find(&self, key: &K) -> Option { + self.vec.iter().position(|entry| key == &entry.0) + } +} + +pub enum Entry<'a, K: 'a, V: 'a> { + Vacant(VacantEntry<'a, K, V>), + Occupied(OccupiedEntry<'a, K, V>) +} + +pub struct VacantEntry<'a, K: 'a, V: 'a> { + vec: &'a mut VecMap, + key: K, +} + +impl<'a, K, V> VacantEntry<'a, K, V> { + pub fn insert(self, val: V) -> &'a mut V { + let vec = self.vec; + vec.vec.push((self.key, val)); + let pos = vec.vec.len() - 1; + &mut vec.vec[pos].1 + } +} + +pub struct OccupiedEntry<'a, K: 'a, V: 'a> { + vec: &'a mut VecMap, + pos: usize, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + pub fn into_mut(self) -> &'a mut V { + &mut self.vec.vec[self.pos].1 + } +} diff --git a/vendor/hyper-0.10.16/src/header/mod.rs b/vendor/hyper-0.10.16/src/header/mod.rs new file mode 100644 index 0000000..3be02b3 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/mod.rs @@ -0,0 +1,891 @@ +//! Headers container, and common header fields. +//! +//! hyper has the opinion that Headers should be strongly-typed, because that's +//! why we're using Rust in the first place. To set or get any header, an object +//! must implement the `Header` trait from this module. Several common headers +//! are already provided, such as `Host`, `ContentType`, `UserAgent`, and others. +//! +//! # Why Typed? +//! +//! Or, why not stringly-typed? Types give the following advantages: +//! +//! - More difficult to typo, since typos in types should be caught by the compiler +//! - Parsing to a proper type by default +//! +//! # Defining Custom Headers +//! +//! Hyper provides many of the most commonly used headers in HTTP. If +//! you need to define a custom header, it's easy to do while still taking +//! advantage of the type system. Hyper includes a `header!` macro for defining +//! many wrapper-style headers. +//! +//! ``` +//! #[macro_use] extern crate hyper; +//! use hyper::header::Headers; +//! header! { (XRequestGuid, "X-Request-Guid") => [String] } +//! +//! fn main () { +//! let mut headers = Headers::new(); +//! +//! headers.set(XRequestGuid("a proper guid".to_owned())) +//! } +//! ``` +//! +//! This works well for simple "string" headers. But the header system +//! actually involves 2 parts: parsing, and formatting. If you need to +//! customize either part, you can do so. +//! +//! ## `Header` and `HeaderFormat` +//! +//! Consider a Do Not Track header. It can be true or false, but it represents +//! that via the numerals `1` and `0`. +//! +//! ``` +//! use std::fmt; +//! use hyper::header::{Header, HeaderFormat}; +//! +//! #[derive(Debug, Clone, Copy)] +//! struct Dnt(bool); +//! +//! impl Header for Dnt { +//! fn header_name() -> &'static str { +//! "DNT" +//! } +//! +//! fn parse_header(raw: &[Vec]) -> hyper::Result { +//! if raw.len() == 1 { +//! let line = &raw[0]; +//! if line.len() == 1 { +//! let byte = line[0]; +//! match byte { +//! b'0' => return Ok(Dnt(true)), +//! b'1' => return Ok(Dnt(false)), +//! _ => () +//! } +//! } +//! } +//! Err(hyper::Error::Header) +//! } +//! } +//! +//! impl HeaderFormat for Dnt { +//! fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { +//! if self.0 { +//! f.write_str("1") +//! } else { +//! f.write_str("0") +//! } +//! } +//! } +//! ``` +use std::any::Any; +use std::borrow::{Cow, ToOwned}; +//use std::collections::HashMap; +//use std::collections::hash_map::{Iter, Entry}; +use std::iter::{FromIterator, IntoIterator}; +use std::{mem, fmt}; + +use {httparse, traitobject}; +use typeable::Typeable; +use unicase::UniCase; + +use self::internals::{Item, VecMap, Entry}; +use self::sealed::Sealed; + +pub use self::shared::*; +pub use self::common::*; + +mod common; +mod internals; +mod shared; +pub mod parsing; + +type HeaderName = UniCase>; + +/// A trait for any object that will represent a header field and value. +/// +/// This trait represents the construction and identification of headers, +/// and contains trait-object unsafe methods. +pub trait Header: Clone + Any + Send + Sync { + /// Returns the name of the header field this belongs to. + /// + /// This will become an associated constant once available. + fn header_name() -> &'static str; + + /// Parse a header from a raw stream of bytes. + /// + /// It's possible that a request can include a header field more than once, + /// and in that case, the slice will have a length greater than 1. However, + /// it's not necessarily the case that a Header is *allowed* to have more + /// than one field value. If that's the case, you **should** return `None` + /// if `raw.len() > 1`. + fn parse_header>(raw: &[T]) -> ::Result; +} + +/// A trait for any object that will represent a header field and value. +/// +/// This trait represents the formatting of a `Header` for output to a TcpStream. +pub trait HeaderFormat: fmt::Debug + HeaderClone + Any + Typeable + Send + Sync { + /// Format a header to be output into a TcpStream. + /// + /// This method is not allowed to introduce an Err not produced + /// by the passed-in Formatter. + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result; + + /// Formats a header over multiple lines. + /// + /// The main example here is `Set-Cookie`, which requires that every + /// cookie being set be specified in a separate line. + /// + /// The API here is still being explored, so this is hidden by default. + /// The passed in formatter doesn't have any public methods, so it would + /// be quite difficult to depend on this externally. + #[doc(hidden)] + #[inline] + fn fmt_multi_header(&self, f: &mut MultilineFormatter) -> fmt::Result { + f.fmt_line(&FmtHeader(self)) + } +} + +#[doc(hidden)] +#[allow(missing_debug_implementations)] +pub struct MultilineFormatter<'a, 'b: 'a>(Multi<'a, 'b>); + +enum Multi<'a, 'b: 'a> { + Line(&'a str, &'a mut fmt::Formatter<'b>), + Join(bool, &'a mut fmt::Formatter<'b>), +} + +impl<'a, 'b> MultilineFormatter<'a, 'b> { + fn fmt_line(&mut self, line: &fmt::Display) -> fmt::Result { + use std::fmt::Write; + match self.0 { + Multi::Line(ref name, ref mut f) => { + try!(f.write_str(*name)); + try!(f.write_str(": ")); + try!(write!(NewlineReplacer(*f), "{}", line)); + f.write_str("\r\n") + }, + Multi::Join(ref mut first, ref mut f) => { + if !*first { + try!(f.write_str(", ")); + } else { + *first = false; + } + write!(NewlineReplacer(*f), "{}", line) + } + } + } +} + +// Internal helper to wrap fmt_header into a fmt::Display +struct FmtHeader<'a, H: ?Sized + 'a>(&'a H); + +impl<'a, H: HeaderFormat + ?Sized + 'a> fmt::Display for FmtHeader<'a, H> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt_header(f) + } +} + +struct ValueString<'a>(&'a Item); + +impl<'a> fmt::Display for ValueString<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.write_h1(&mut MultilineFormatter(Multi::Join(true, f))) + } +} + +struct NewlineReplacer<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); + +impl<'a, 'b> fmt::Write for NewlineReplacer<'a, 'b> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let mut since = 0; + for (i, &byte) in s.as_bytes().iter().enumerate() { + if byte == b'\r' || byte == b'\n' { + try!(self.0.write_str(&s[since..i])); + try!(self.0.write_str(" ")); + since = i + 1; + } + } + if since < s.len() { + self.0.write_str(&s[since..]) + } else { + Ok(()) + } + } +} + +/// Internal implementation detail. +/// +/// This trait is automatically implemented for all types that implement +/// `HeaderFormat + Clone`. No methods are exposed, and so is not useful +/// outside this crate. +pub trait HeaderClone: Sealed {} +impl HeaderClone for T {} + +mod sealed { + use super::HeaderFormat; + + #[doc(hidden)] + pub trait Sealed { + #[doc(hidden)] + fn clone_box(&self) -> Box; + } + + #[doc(hidden)] + impl Sealed for T { + #[inline] + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + } +} + +impl HeaderFormat + Send + Sync { + #[inline] + unsafe fn downcast_ref_unchecked(&self) -> &T { + mem::transmute(traitobject::data(self)) + } + + #[inline] + unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { + mem::transmute(traitobject::data_mut(self)) + } +} + +impl Clone for Box { + #[inline] + fn clone(&self) -> Box { + self.clone_box() + } +} + +#[inline] +fn header_name() -> &'static str { + ::header_name() +} + +/// A map of header fields on requests and responses. +#[derive(Clone)] +pub struct Headers { + //data: HashMap + data: VecMap, +} + +impl Headers { + + /// Creates a new, empty headers map. + pub fn new() -> Headers { + Headers { + data: VecMap::new() + } + } + + #[doc(hidden)] + pub fn from_raw(raw: &[httparse::Header]) -> ::Result { + let mut headers = Headers::new(); + for header in raw { + trace!("raw header: {:?}={:?}", header.name, &header.value[..]); + let name = UniCase(Cow::Owned(header.name.to_owned())); + let item = match headers.data.entry(name) { + Entry::Vacant(entry) => entry.insert(Item::new_raw(vec![])), + Entry::Occupied(entry) => entry.into_mut() + }; + let trim = header.value.iter().rev().take_while(|&&x| x == b' ').count(); + let value = &header.value[.. header.value.len() - trim]; + item.raw_mut().push(value.to_vec().into()); + } + Ok(headers) + } + + /// Set a header field to the corresponding value. + /// + /// The field is determined by the type of the value being set. + pub fn set(&mut self, value: H) { + trace!("Headers.set( {:?}, {:?} )", header_name::(), value); + self.data.insert(UniCase(Cow::Borrowed(header_name::())), + Item::new_typed(Box::new(value))); + } + + /// Access the raw value of a header. + /// + /// Prefer to use the typed getters instead. + /// + /// Example: + /// + /// ``` + /// # use hyper::header::Headers; + /// # let mut headers = Headers::new(); + /// let raw_content_type = headers.get_raw("content-type"); + /// ``` + pub fn get_raw(&self, name: &str) -> Option<&[Cow<'static, [u8]>]> { + self.data + .get(&UniCase(Cow::Borrowed(unsafe { mem::transmute::<&str, &str>(name) }))) + .map(Item::raw) + } + + /// Set the raw value of a header, bypassing any typed headers. + /// + /// Note: This will completely replace any current value for this + /// header name. + /// + /// Example: + /// + /// ``` + /// # use hyper::header::Headers; + /// # let mut headers = Headers::new(); + /// headers.set_raw("content-length", vec![b"5".to_vec()]); + /// ``` + pub fn set_raw>>(&mut self, name: K, + value: Vec>) { + let name = name.into(); + trace!("Headers.set_raw( {:?}, {:?} )", name, value); + self.data.insert(UniCase(name), Item::new_raw(value)); + } + + /// Append a value to raw value of this header. + /// + /// If a header already contains a value, this will add another line to it. + /// + /// If a header doesnot exist for this name, a new one will be created with + /// the value. + /// + /// Example: + /// + /// ``` + /// # use hyper::header::Headers; + /// # let mut headers = Headers::new(); + /// headers.append_raw("x-foo", b"bar".to_vec()); + /// headers.append_raw("x-foo", b"quux".to_vec()); + /// ``` + pub fn append_raw>>(&mut self, name: K, value: Cow<'static, [u8]>) { + let name = name.into(); + trace!("Headers.append_raw( {:?}, {:?} )", name, value); + match self.data.entry(UniCase(name)) { + Entry::Vacant(ve) => { ve.insert(Item::new_raw(vec![value])); }, + Entry::Occupied(oe) => oe.into_mut().raw_mut().push(value), + } + } + + /// Remove a header set by set_raw + pub fn remove_raw(&mut self, name: &str) { + trace!("Headers.remove_raw( {:?} )", name); + self.data.remove( + &UniCase(Cow::Borrowed(unsafe { mem::transmute::<&str, &str>(name) })) + ); + } + + /// Get a reference to the header field's value, if it exists. + pub fn get(&self) -> Option<&H> { + self.data.get(&UniCase(Cow::Borrowed(header_name::()))) + .and_then(Item::typed::) + } + + /// Get a mutable reference to the header field's value, if it exists. + pub fn get_mut(&mut self) -> Option<&mut H> { + self.data.get_mut(&UniCase(Cow::Borrowed(header_name::()))) + .and_then(Item::typed_mut::) + } + + /// Returns a boolean of whether a certain header is in the map. + /// + /// Example: + /// + /// ``` + /// # use hyper::header::Headers; + /// # use hyper::header::ContentType; + /// # let mut headers = Headers::new(); + /// let has_type = headers.has::(); + /// ``` + pub fn has(&self) -> bool { + self.data.contains_key(&UniCase(Cow::Borrowed(header_name::()))) + } + + /// Removes a header from the map, if one existed. + /// Returns true if a header has been removed. + pub fn remove(&mut self) -> bool { + trace!("Headers.remove( {:?} )", header_name::()); + self.data.remove(&UniCase(Cow::Borrowed(header_name::()))).is_some() + } + + /// Returns an iterator over the header fields. + pub fn iter(&self) -> HeadersItems { + HeadersItems { + inner: self.data.iter() + } + } + + /// Returns the number of headers in the map. + pub fn len(&self) -> usize { + self.data.len() + } + + /// Remove all headers from the map. + pub fn clear(&mut self) { + self.data.clear() + } +} + +impl PartialEq for Headers { + fn eq(&self, other: &Headers) -> bool { + if self.len() != other.len() { + return false; + } + + for header in self.iter() { + match other.get_raw(header.name()) { + Some(val) if val == self.get_raw(header.name()).unwrap() => {}, + _ => { return false; } + } + } + true + } +} + +impl fmt::Display for Headers { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for header in self.iter() { + try!(fmt::Display::fmt(&header, f)); + } + Ok(()) + } +} + +impl fmt::Debug for Headers { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(f.write_str("Headers { ")); + for header in self.iter() { + try!(write!(f, "{:?}, ", header)); + } + try!(f.write_str("}")); + Ok(()) + } +} + +/// An `Iterator` over the fields in a `Headers` map. +pub struct HeadersItems<'a> { + inner: ::std::slice::Iter<'a, (HeaderName, Item)> +} + +impl<'a> Iterator for HeadersItems<'a> { + type Item = HeaderView<'a>; + + fn next(&mut self) -> Option> { + self.inner.next().map(|&(ref k, ref v)| HeaderView(k, v)) + } +} + +/// Returned with the `HeadersItems` iterator. +pub struct HeaderView<'a>(&'a HeaderName, &'a Item); + +impl<'a> HeaderView<'a> { + /// Check if a HeaderView is a certain Header. + #[inline] + pub fn is(&self) -> bool { + UniCase(Cow::Borrowed(header_name::())) == *self.0 + } + + /// Get the Header name as a slice. + #[inline] + pub fn name(&self) -> &'a str { + self.0.as_ref() + } + + /// Cast the value to a certain Header type. + #[inline] + pub fn value(&self) -> Option<&'a H> { + self.1.typed::() + } + + /// Get just the header value as a String. + /// + /// This will join multiple values of this header with a `, `. + /// + /// **Warning:** This may not be the format that should be used to send + /// a Request or Response. + #[inline] + pub fn value_string(&self) -> String { + ValueString(self.1).to_string() + } +} + +impl<'a> fmt::Display for HeaderView<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.1.write_h1(&mut MultilineFormatter(Multi::Line(&self.0, f))) + } +} + +impl<'a> fmt::Debug for HeaderView<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl<'a> Extend> for Headers { + fn extend>>(&mut self, iter: I) { + for header in iter { + self.data.insert((*header.0).clone(), (*header.1).clone()); + } + } +} + +impl<'a> FromIterator> for Headers { + fn from_iter>>(iter: I) -> Headers { + let mut headers = Headers::new(); + headers.extend(iter); + headers + } +} + +#[deprecated(note="The semantics of formatting a HeaderFormat directly are not clear")] +impl<'a> fmt::Display for &'a (HeaderFormat + Send + Sync) { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut multi = MultilineFormatter(Multi::Join(true, f)); + self.fmt_multi_header(&mut multi) + } +} + +/// A wrapper around any Header with a Display impl that calls fmt_header. +/// +/// This can be used like so: `format!("{}", HeaderFormatter(&header))` to +/// get the 'value string' representation of this Header. +/// +/// Note: This may not necessarily be the value written to stream, such +/// as with the SetCookie header. +#[deprecated(note="The semantics of formatting a HeaderFormat directly are not clear")] +pub struct HeaderFormatter<'a, H: HeaderFormat>(pub &'a H); + +#[allow(deprecated)] +impl<'a, H: HeaderFormat> fmt::Display for HeaderFormatter<'a, H> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut multi = MultilineFormatter(Multi::Join(true, f)); + self.0.fmt_multi_header(&mut multi) + } +} + +#[allow(deprecated)] +impl<'a, H: HeaderFormat> fmt::Debug for HeaderFormatter<'a, H> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + + +#[cfg(test)] +mod tests { + use std::fmt; + use mime::Mime; + use mime::TopLevel::Text; + use mime::SubLevel::Plain; + use super::{Headers, Header, HeaderFormat, ContentLength, ContentType, + Accept, Host, qitem}; + use httparse; + + #[cfg(feature = "nightly")] + use test::Bencher; + + // Slice.position_elem was unstable + fn index_of(slice: &[u8], byte: u8) -> Option { + for (index, &b) in slice.iter().enumerate() { + if b == byte { + return Some(index); + } + } + None + } + + macro_rules! raw { + ($($line:expr),*) => ({ + [$({ + let line = $line; + let pos = index_of(line, b':').expect("raw splits on ':', not found"); + httparse::Header { + name: ::std::str::from_utf8(&line[..pos]).unwrap(), + value: &line[pos + 2..] + } + }),*] + }) + } + + #[test] + fn test_from_raw() { + let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); + assert_eq!(headers.get(), Some(&ContentLength(10))); + } + + #[test] + fn test_content_type() { + let content_type = Header::parse_header([b"text/plain".to_vec()].as_ref()); + assert_eq!(content_type.ok(), Some(ContentType(Mime(Text, Plain, vec![])))); + } + + #[test] + fn test_accept() { + let text_plain = qitem(Mime(Text, Plain, vec![])); + let application_vendor = "application/vnd.github.v3.full+json; q=0.5".parse().unwrap(); + + let accept = Header::parse_header([b"text/plain".to_vec()].as_ref()); + assert_eq!(accept.ok(), Some(Accept(vec![text_plain.clone()]))); + + let bytevec = [b"application/vnd.github.v3.full+json; q=0.5, text/plain".to_vec()]; + let accept = Header::parse_header(bytevec.as_ref()); + assert_eq!(accept.ok(), Some(Accept(vec![application_vendor, text_plain]))); + } + + #[derive(Clone, PartialEq, Debug)] + struct CrazyLength(Option, usize); + + impl Header for CrazyLength { + fn header_name() -> &'static str { + "content-length" + } + fn parse_header(raw: &[Vec]) -> ::Result { + use std::str::from_utf8; + use std::str::FromStr; + + if raw.len() != 1 { + return Err(::Error::Header); + } + // we JUST checked that raw.len() == 1, so raw[0] WILL exist. + match match from_utf8(unsafe { &raw.get_unchecked(0)[..] }) { + Ok(s) => FromStr::from_str(s).ok(), + Err(_) => None + }.map(|u| CrazyLength(Some(false), u)) { + Some(x) => Ok(x), + None => Err(::Error::Header), + } + } + } + + impl HeaderFormat for CrazyLength { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + let CrazyLength(ref opt, ref value) = *self; + write!(f, "{:?}, {:?}", opt, value) + } + } + + #[test] + fn test_different_structs_for_same_header() { + let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); + assert_eq!(headers.get::(), Some(&ContentLength(10))); + assert_eq!(headers.get::(), Some(&CrazyLength(Some(false), 10))); + } + + #[test] + fn test_trailing_whitespace() { + let headers = Headers::from_raw(&raw!(b"Content-Length: 10 ")).unwrap(); + assert_eq!(headers.get::(), Some(&ContentLength(10))); + } + + #[test] + fn test_multiple_reads() { + let headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); + let ContentLength(one) = *headers.get::().unwrap(); + let ContentLength(two) = *headers.get::().unwrap(); + assert_eq!(one, two); + } + + #[test] + fn test_different_reads() { + let headers = Headers::from_raw( + &raw!(b"Content-Length: 10", b"Content-Type: text/plain")).unwrap(); + let ContentLength(_) = *headers.get::().unwrap(); + let ContentType(_) = *headers.get::().unwrap(); + } + + #[test] + fn test_get_mutable() { + let mut headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); + *headers.get_mut::().unwrap() = ContentLength(20); + assert_eq!(headers.get_raw("content-length").unwrap(), &[b"20".to_vec()][..]); + assert_eq!(*headers.get::().unwrap(), ContentLength(20)); + } + + #[test] + fn test_headers_fmt() { + let mut headers = Headers::new(); + headers.set(ContentLength(15)); + headers.set(Host { hostname: "foo.bar".to_owned(), port: None }); + + let s = headers.to_string(); + assert!(s.contains("Host: foo.bar\r\n")); + assert!(s.contains("Content-Length: 15\r\n")); + } + + #[test] + fn test_headers_fmt_raw() { + let mut headers = Headers::from_raw(&raw!(b"Content-Length: 10")).unwrap(); + headers.set_raw("x-foo", vec![b"foo".to_vec(), b"bar".to_vec()]); + let s = headers.to_string(); + assert_eq!(s, "Content-Length: 10\r\nx-foo: foo\r\nx-foo: bar\r\n"); + } + + #[test] + fn test_set_raw() { + let mut headers = Headers::new(); + headers.set(ContentLength(10)); + headers.set_raw("content-LENGTH", vec![b"20".to_vec()]); + assert_eq!(headers.get_raw("Content-length").unwrap(), &[b"20".to_vec()][..]); + assert_eq!(headers.get(), Some(&ContentLength(20))); + } + + #[test] + fn test_append_raw() { + let mut headers = Headers::new(); + headers.set(ContentLength(10)); + headers.append_raw("content-LENGTH", b"20".to_vec()); + assert_eq!(headers.get_raw("Content-length").unwrap(), &[b"10".to_vec(), b"20".to_vec()][..]); + headers.append_raw("x-foo", b"bar".to_vec()); + assert_eq!(headers.get_raw("x-foo"), Some(&[b"bar".to_vec()][..])); + } + + #[test] + fn test_remove_raw() { + let mut headers = Headers::new(); + headers.set_raw("content-LENGTH", vec![b"20".to_vec()]); + headers.remove_raw("content-LENGTH"); + assert_eq!(headers.get_raw("Content-length"), None); + } + + #[test] + fn test_len() { + let mut headers = Headers::new(); + headers.set(ContentLength(10)); + assert_eq!(headers.len(), 1); + headers.set(ContentType(Mime(Text, Plain, vec![]))); + assert_eq!(headers.len(), 2); + // Redundant, should not increase count. + headers.set(ContentLength(20)); + assert_eq!(headers.len(), 2); + } + + #[test] + fn test_clear() { + let mut headers = Headers::new(); + headers.set(ContentLength(10)); + headers.set(ContentType(Mime(Text, Plain, vec![]))); + assert_eq!(headers.len(), 2); + headers.clear(); + assert_eq!(headers.len(), 0); + } + + #[test] + fn test_iter() { + let mut headers = Headers::new(); + headers.set(ContentLength(11)); + for header in headers.iter() { + assert!(header.is::()); + assert_eq!(header.name(), ::header_name()); + assert_eq!(header.value(), Some(&ContentLength(11))); + assert_eq!(header.value_string(), "11".to_owned()); + } + } + + #[test] + fn test_header_view_value_string() { + let mut headers = Headers::new(); + headers.set_raw("foo", vec![b"one".to_vec(), b"two".to_vec()]); + for header in headers.iter() { + assert_eq!(header.name(), "foo"); + assert_eq!(header.value_string(), "one, two"); + } + } + + #[test] + fn test_eq() { + let mut headers1 = Headers::new(); + let mut headers2 = Headers::new(); + + assert_eq!(headers1, headers2); + + headers1.set(ContentLength(11)); + headers2.set(Host {hostname: "foo.bar".to_owned(), port: None}); + assert!(headers1 != headers2); + + headers1 = Headers::new(); + headers2 = Headers::new(); + + headers1.set(ContentLength(11)); + headers2.set(ContentLength(11)); + assert_eq!(headers1, headers2); + + headers1.set(ContentLength(10)); + assert!(headers1 != headers2); + + headers1 = Headers::new(); + headers2 = Headers::new(); + + headers1.set(Host { hostname: "foo.bar".to_owned(), port: None }); + headers1.set(ContentLength(11)); + headers2.set(ContentLength(11)); + assert!(headers1 != headers2); + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_new(b: &mut Bencher) { + b.iter(|| { + let mut h = Headers::new(); + h.set(ContentLength(11)); + h + }) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_from_raw(b: &mut Bencher) { + let raw = raw!(b"Content-Length: 10"); + b.iter(|| Headers::from_raw(&raw).unwrap()) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_get(b: &mut Bencher) { + let mut headers = Headers::new(); + headers.set(ContentLength(11)); + b.iter(|| assert_eq!(headers.get::(), Some(&ContentLength(11)))) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_get_miss(b: &mut Bencher) { + let headers = Headers::new(); + b.iter(|| assert!(headers.get::().is_none())) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_set(b: &mut Bencher) { + let mut headers = Headers::new(); + b.iter(|| headers.set(ContentLength(12))) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_has(b: &mut Bencher) { + let mut headers = Headers::new(); + headers.set(ContentLength(11)); + b.iter(|| assert!(headers.has::())) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_view_is(b: &mut Bencher) { + let mut headers = Headers::new(); + headers.set(ContentLength(11)); + let mut iter = headers.iter(); + let view = iter.next().unwrap(); + b.iter(|| assert!(view.is::())) + } + + #[cfg(feature = "nightly")] + #[bench] + fn bench_headers_fmt(b: &mut Bencher) { + let mut headers = Headers::new(); + headers.set(ContentLength(11)); + b.iter(|| headers.to_string()) + } +} diff --git a/vendor/hyper-0.10.16/src/header/parsing.rs b/vendor/hyper-0.10.16/src/header/parsing.rs new file mode 100644 index 0000000..0ffbdfc --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/parsing.rs @@ -0,0 +1,228 @@ +//! Utility functions for Header implementations. + +use std::str; +use std::str::FromStr; +use std::fmt::{self, Display}; +use url::percent_encoding; + +use header::shared::Charset; + +/// Reads a single raw string when parsing a header. +pub fn from_one_raw_str>(raw: &[S]) -> ::Result { + if raw.len() != 1 || unsafe { raw.get_unchecked(0) }.as_ref() == b"" { return Err(::Error::Header) } + // we JUST checked that raw.len() == 1, so raw[0] WILL exist. + from_raw_str(unsafe { raw.get_unchecked(0) }.as_ref()) +} + +/// Reads a raw string into a value. +pub fn from_raw_str(raw: &[u8]) -> ::Result { + let s = try!(str::from_utf8(raw)); + T::from_str(s).or(Err(::Error::Header)) +} + +/// Reads a comma-delimited raw header into a Vec. +#[inline] +pub fn from_comma_delimited>(raw: &[S]) -> ::Result> { + let mut result = Vec::new(); + for s in raw { + let s = try!(str::from_utf8(s.as_ref())); + result.extend(s.split(',') + .filter_map(|x| match x.trim() { + "" => None, + y => Some(y) + }) + .filter_map(|x| x.parse().ok())) + } + Ok(result) +} + +/// Reads a comma-delimited raw header into a Vec. +#[inline] +pub fn from_comma_delimited_small>(raw: &[S]) -> ::Result> { + let mut result = smallvec::SmallVec::new(); + for s in raw { + let s = try!(str::from_utf8(s.as_ref())); + result.extend(s.split(',') + .filter_map(|x| match x.trim() { + "" => None, + y => Some(y) + }) + .filter_map(|x| x.parse().ok())) + } + Ok(result) +} + +/// Format an array into a comma-delimited string. +pub fn fmt_comma_delimited(f: &mut fmt::Formatter, parts: &[T]) -> fmt::Result { + for (i, part) in parts.iter().enumerate() { + if i != 0 { + try!(f.write_str(", ")); + } + try!(Display::fmt(part, f)); + } + Ok(()) +} + +/// An extended header parameter value (i.e., tagged with a character set and optionally, +/// a language), as defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). +#[derive(Clone, Debug, PartialEq)] +pub struct ExtendedValue { + /// The character set that is used to encode the `value` to a string. + pub charset: Charset, + /// The parameter value, as expressed in octets. + pub value: Vec, +} + +/// Parses extended header parameter values (`ext-value`), as defined in +/// [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). +/// +/// Extended values are denoted by parameter names that end with `*`. +/// +/// ## ABNF +/// ```plain +/// ext-value = charset "'" [ language ] "'" value-chars +/// ; like RFC 2231's +/// ; (see [RFC2231], Section 7) +/// +/// charset = "UTF-8" / "ISO-8859-1" / mime-charset +/// +/// mime-charset = 1*mime-charsetc +/// mime-charsetc = ALPHA / DIGIT +/// / "!" / "#" / "$" / "%" / "&" +/// / "+" / "-" / "^" / "_" / "`" +/// / "{" / "}" / "~" +/// ; as in Section 2.3 of [RFC2978] +/// ; except that the single quote is not included +/// ; SHOULD be registered in the IANA charset registry +/// +/// language = +/// +/// value-chars = *( pct-encoded / attr-char ) +/// +/// pct-encoded = "%" HEXDIG HEXDIG +/// ; see [RFC3986], Section 2.1 +/// +/// attr-char = ALPHA / DIGIT +/// / "!" / "#" / "$" / "&" / "+" / "-" / "." +/// / "^" / "_" / "`" / "|" / "~" +/// ; token except ( "*" / "'" / "%" ) +/// ``` +pub fn parse_extended_value(val: &str) -> ::Result { + + // Break into three pieces separated by the single-quote character + let mut parts = val.splitn(3,'\''); + + // Interpret the first piece as a Charset + let charset: Charset = match parts.next() { + None => return Err(::Error::Header), + Some(n) => try!(FromStr::from_str(n)), + }; + + // Ignore the second piece (language tag) + match parts.next() { + None => return Err(::Error::Header), + Some(_) => (), + }; + + // Interpret the third piece as a sequence of value characters + let value: Vec = match parts.next() { + None => return Err(::Error::Header), + Some(v) => percent_encoding::percent_decode(v.as_bytes()).collect(), + }; + + Ok(ExtendedValue { + charset: charset, + value: value, + }) +} + +define_encode_set! { + /// This encode set is used for HTTP header values and is defined at + /// https://tools.ietf.org/html/rfc5987#section-3.2 + pub HTTP_VALUE = [percent_encoding::SIMPLE_ENCODE_SET] | { + ' ', '"', '%', '\'', '(', ')', '*', ',', '/', ':', ';', '<', '-', '>', '?', + '[', '\\', ']', '{', '}' + } +} + +impl Display for ExtendedValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let encoded_value = + percent_encoding::percent_encode(&self.value[..], HTTP_VALUE); + write!(f, "{}''{}", self.charset, encoded_value) + } +} + +#[cfg(test)] +mod tests { + use header::shared::Charset; + use super::{ExtendedValue, parse_extended_value}; + + #[test] + fn test_parse_extended_value_with_encoding_and_language_tag() { + let expected_language_tag = langtag!(en); + // RFC 5987, Section 3.2.2 + // Extended notation, using the Unicode character U+00A3 (POUND SIGN) + let result = parse_extended_value("iso-8859-1'en'%A3%20rates"); + assert!(result.is_ok()); + let extended_value = result.unwrap(); + assert_eq!(Charset::Iso_8859_1, extended_value.charset); + assert!(extended_value.language_tag.is_some()); + assert_eq!(expected_language_tag, extended_value.language_tag.unwrap()); + assert_eq!(vec![163, b' ', b'r', b'a', b't', b'e', b's'], extended_value.value); + } + + #[test] + fn test_parse_extended_value_with_encoding() { + // RFC 5987, Section 3.2.2 + // Extended notation, using the Unicode characters U+00A3 (POUND SIGN) + // and U+20AC (EURO SIGN) + let result = parse_extended_value("UTF-8''%c2%a3%20and%20%e2%82%ac%20rates"); + assert!(result.is_ok()); + let extended_value = result.unwrap(); + assert_eq!(Charset::Ext("UTF-8".to_string()), extended_value.charset); + assert!(extended_value.language_tag.is_none()); + assert_eq!(vec![194, 163, b' ', b'a', b'n', b'd', b' ', 226, 130, 172, b' ', b'r', b'a', b't', b'e', b's'], extended_value.value); + } + + #[test] + fn test_parse_extended_value_missing_language_tag_and_encoding() { + // From: https://greenbytes.de/tech/tc2231/#attwithfn2231quot2 + let result = parse_extended_value("foo%20bar.html"); + assert!(result.is_err()); + } + + #[test] + fn test_parse_extended_value_partially_formatted() { + let result = parse_extended_value("UTF-8'missing third part"); + assert!(result.is_err()); + } + + #[test] + fn test_parse_extended_value_partially_formatted_blank() { + let result = parse_extended_value("blank second part'"); + assert!(result.is_err()); + } + + #[test] + fn test_fmt_extended_value_with_encoding_and_language_tag() { + let extended_value = ExtendedValue { + charset: Charset::Iso_8859_1, + language_tag: Some("en".parse().expect("Could not parse language tag")), + value: vec![163, b' ', b'r', b'a', b't', b'e', b's'], + }; + assert_eq!("ISO-8859-1'en'%A3%20rates", format!("{}", extended_value)); + } + + #[test] + fn test_fmt_extended_value_with_encoding() { + let extended_value = ExtendedValue { + charset: Charset::Ext("UTF-8".to_string()), + language_tag: None, + value: vec![194, 163, b' ', b'a', b'n', b'd', b' ', 226, 130, 172, b' ', b'r', b'a', + b't', b'e', b's'], + }; + assert_eq!("UTF-8''%C2%A3%20and%20%E2%82%AC%20rates", + format!("{}", extended_value)); + } +} diff --git a/vendor/hyper-0.10.16/src/header/shared/charset.rs b/vendor/hyper-0.10.16/src/header/shared/charset.rs new file mode 100644 index 0000000..df2b92b --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/charset.rs @@ -0,0 +1,153 @@ +use std::fmt::{self, Display}; +use std::str::FromStr; + +#[allow(unused_imports)] +use std::ascii::AsciiExt; + +use self::Charset::*; + +/// A Mime charset. +/// +/// The string representation is normalised to upper case. +/// +/// See http://www.iana.org/assignments/character-sets/character-sets.xhtml +#[derive(Clone,Debug,PartialEq)] +#[allow(non_camel_case_types)] +pub enum Charset{ + /// US ASCII + Us_Ascii, + /// ISO-8859-1 + Iso_8859_1, + /// ISO-8859-2 + Iso_8859_2, + /// ISO-8859-3 + Iso_8859_3, + /// ISO-8859-4 + Iso_8859_4, + /// ISO-8859-5 + Iso_8859_5, + /// ISO-8859-6 + Iso_8859_6, + /// ISO-8859-7 + Iso_8859_7, + /// ISO-8859-8 + Iso_8859_8, + /// ISO-8859-9 + Iso_8859_9, + /// ISO-8859-10 + Iso_8859_10, + /// Shift_JIS + Shift_Jis, + /// EUC-JP + Euc_Jp, + /// ISO-2022-KR + Iso_2022_Kr, + /// EUC-KR + Euc_Kr, + /// ISO-2022-JP + Iso_2022_Jp, + /// ISO-2022-JP-2 + Iso_2022_Jp_2, + /// ISO-8859-6-E + Iso_8859_6_E, + /// ISO-8859-6-I + Iso_8859_6_I, + /// ISO-8859-8-E + Iso_8859_8_E, + /// ISO-8859-8-I + Iso_8859_8_I, + /// GB2312 + Gb2312, + /// Big5 + Big5, + /// KOI8-R + Koi8_R, + /// An arbitrary charset specified as a string + Ext(String) +} + +impl Charset { + fn name(&self) -> &str { + match *self { + Us_Ascii => "US-ASCII", + Iso_8859_1 => "ISO-8859-1", + Iso_8859_2 => "ISO-8859-2", + Iso_8859_3 => "ISO-8859-3", + Iso_8859_4 => "ISO-8859-4", + Iso_8859_5 => "ISO-8859-5", + Iso_8859_6 => "ISO-8859-6", + Iso_8859_7 => "ISO-8859-7", + Iso_8859_8 => "ISO-8859-8", + Iso_8859_9 => "ISO-8859-9", + Iso_8859_10 => "ISO-8859-10", + Shift_Jis => "Shift-JIS", + Euc_Jp => "EUC-JP", + Iso_2022_Kr => "ISO-2022-KR", + Euc_Kr => "EUC-KR", + Iso_2022_Jp => "ISO-2022-JP", + Iso_2022_Jp_2 => "ISO-2022-JP-2", + Iso_8859_6_E => "ISO-8859-6-E", + Iso_8859_6_I => "ISO-8859-6-I", + Iso_8859_8_E => "ISO-8859-8-E", + Iso_8859_8_I => "ISO-8859-8-I", + Gb2312 => "GB2312", + Big5 => "5", + Koi8_R => "KOI8-R", + Ext(ref s) => &s + } + } +} + +impl Display for Charset { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self.name()) + } +} + +impl FromStr for Charset { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + Ok(match s.to_ascii_uppercase().as_ref() { + "US-ASCII" => Us_Ascii, + "ISO-8859-1" => Iso_8859_1, + "ISO-8859-2" => Iso_8859_2, + "ISO-8859-3" => Iso_8859_3, + "ISO-8859-4" => Iso_8859_4, + "ISO-8859-5" => Iso_8859_5, + "ISO-8859-6" => Iso_8859_6, + "ISO-8859-7" => Iso_8859_7, + "ISO-8859-8" => Iso_8859_8, + "ISO-8859-9" => Iso_8859_9, + "ISO-8859-10" => Iso_8859_10, + "SHIFT-JIS" => Shift_Jis, + "EUC-JP" => Euc_Jp, + "ISO-2022-KR" => Iso_2022_Kr, + "EUC-KR" => Euc_Kr, + "ISO-2022-JP" => Iso_2022_Jp, + "ISO-2022-JP-2" => Iso_2022_Jp_2, + "ISO-8859-6-E" => Iso_8859_6_E, + "ISO-8859-6-I" => Iso_8859_6_I, + "ISO-8859-8-E" => Iso_8859_8_E, + "ISO-8859-8-I" => Iso_8859_8_I, + "GB2312" => Gb2312, + "5" => Big5, + "KOI8-R" => Koi8_R, + s => Ext(s.to_owned()) + }) + } +} + +#[test] +fn test_parse() { + assert_eq!(Us_Ascii,"us-ascii".parse().unwrap()); + assert_eq!(Us_Ascii,"US-Ascii".parse().unwrap()); + assert_eq!(Us_Ascii,"US-ASCII".parse().unwrap()); + assert_eq!(Shift_Jis,"Shift-JIS".parse().unwrap()); + assert_eq!(Ext("ABCD".to_owned()),"abcd".parse().unwrap()); +} + +#[test] +fn test_display() { + assert_eq!("US-ASCII", format!("{}", Us_Ascii)); + assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned()))); +} diff --git a/vendor/hyper-0.10.16/src/header/shared/encoding.rs b/vendor/hyper-0.10.16/src/header/shared/encoding.rs new file mode 100644 index 0000000..0d618a1 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/encoding.rs @@ -0,0 +1,80 @@ +use std::fmt; +use std::str; + +/// A value to represent an encoding used in `Transfer-Encoding` +/// or `Accept-Encoding` header. +/// +/// bool is `x-`-prefixed. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct Encoding(pub EncodingType, pub String, pub bool); +impl Encoding { + #[allow(non_upper_case_globals)] + pub const Chunked: Encoding = Encoding(EncodingType::Chunked, String::new(), false); +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum EncodingType { + /// The `chunked` encoding. + Chunked, + /// The `gzip` encoding. + Gzip, + /// The `deflate` encoding. + Deflate, + /// The `compress` encoding. + Compress, + /// The `identity` encoding. + Identity, + /// The `br` encoding. + Brotli, + /// The `bzip2` encoding. + Bzip2, + /// The `zstd` encoding. + Zstd, + /// See upper String. + Custom, +} + +impl fmt::Display for Encoding { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.2 { + f.write_str("x-")?; + } + f.write_str(match self.0 { + EncodingType::Chunked => "chunked", + EncodingType::Gzip => "gzip", + EncodingType::Deflate => "deflate", + EncodingType::Compress => "compress", + EncodingType::Identity => "identity", + EncodingType::Brotli => "br", + EncodingType::Bzip2 => "bzip2", + EncodingType::Zstd => "zstd", + EncodingType::Custom => self.1.as_ref(), + }) + } +} + +impl str::FromStr for Encoding { + type Err = ::Error; + fn from_str(mut s: &str) -> ::Result { + let x = s.starts_with("x-"); + if x { + s = &s[2..]; + } + let mut custom = String::new(); + let enc = match s { + "chunked" => EncodingType::Chunked, + "deflate" => EncodingType::Deflate, + "gzip" => EncodingType::Gzip, + "compress" => EncodingType::Compress, + "identity" => EncodingType::Identity, + "br" => EncodingType::Brotli, + "bzip2" => EncodingType::Bzip2, + "zstd" => EncodingType::Zstd, + _ => { + custom = s.to_owned(); + EncodingType::Custom + }, + }; + Ok(Encoding(enc, custom, x)) + } +} diff --git a/vendor/hyper-0.10.16/src/header/shared/entity.rs b/vendor/hyper-0.10.16/src/header/shared/entity.rs new file mode 100644 index 0000000..0d51d5c --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/entity.rs @@ -0,0 +1,215 @@ +use std::str::FromStr; +use std::fmt::{self, Display}; + +// check that each char in the slice is either: +// 1. %x21, or +// 2. in the range %x23 to %x7E, or +// 3. in the range %x80 to %xFF +fn check_slice_validity(slice: &str) -> bool { + slice.bytes().all(|c| + c == b'\x21' || (c >= b'\x23' && c <= b'\x7e') | (c >= b'\x80' && c <= b'\xff')) +} + +/// An entity tag, defined in [RFC7232](https://tools.ietf.org/html/rfc7232#section-2.3) +/// +/// An entity tag consists of a string enclosed by two literal double quotes. +/// Preceding the first double quote is an optional weakness indicator, +/// which always looks like `W/`. Examples for valid tags are `"xyzzy"` and `W/"xyzzy"`. +/// +/// # ABNF +/// ```plain +/// entity-tag = [ weak ] opaque-tag +/// weak = %x57.2F ; "W/", case-sensitive +/// opaque-tag = DQUOTE *etagc DQUOTE +/// etagc = %x21 / %x23-7E / obs-text +/// ; VCHAR except double quotes, plus obs-text +/// ``` +/// +/// # Comparison +/// To check if two entity tags are equivalent in an application always use the `strong_eq` or +/// `weak_eq` methods based on the context of the Tag. Only use `==` to check if two tags are +/// identical. +/// +/// The example below shows the results for a set of entity-tag pairs and +/// both the weak and strong comparison function results: +/// +/// | ETag 1 | ETag 2 | Strong Comparison | Weak Comparison | +/// |---------|---------|-------------------|-----------------| +/// | `W/"1"` | `W/"1"` | no match | match | +/// | `W/"1"` | `W/"2"` | no match | no match | +/// | `W/"1"` | `"1"` | no match | match | +/// | `"1"` | `"1"` | match | match | +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct EntityTag { + /// Weakness indicator for the tag + pub weak: bool, + /// The opaque string in between the DQUOTEs + tag: String +} + +impl EntityTag { + /// Constructs a new EntityTag. + /// # Panics + /// If the tag contains invalid characters. + pub fn new(weak: bool, tag: String) -> EntityTag { + assert!(check_slice_validity(&tag), "Invalid tag: {:?}", tag); + EntityTag { weak: weak, tag: tag } + } + + /// Constructs a new weak EntityTag. + /// # Panics + /// If the tag contains invalid characters. + pub fn weak(tag: String) -> EntityTag { + EntityTag::new(true, tag) + } + + /// Constructs a new strong EntityTag. + /// # Panics + /// If the tag contains invalid characters. + pub fn strong(tag: String) -> EntityTag { + EntityTag::new(false, tag) + } + + /// Get the tag. + pub fn tag(&self) -> &str { + self.tag.as_ref() + } + + /// Set the tag. + /// # Panics + /// If the tag contains invalid characters. + pub fn set_tag(&mut self, tag: String) { + assert!(check_slice_validity(&tag), "Invalid tag: {:?}", tag); + self.tag = tag + } + + /// For strong comparison two entity-tags are equivalent if both are not weak and their + /// opaque-tags match character-by-character. + pub fn strong_eq(&self, other: &EntityTag) -> bool { + !self.weak && !other.weak && self.tag == other.tag + } + + /// For weak comparison two entity-tags are equivalent if their + /// opaque-tags match character-by-character, regardless of either or + /// both being tagged as "weak". + pub fn weak_eq(&self, other: &EntityTag) -> bool { + self.tag == other.tag + } + + /// The inverse of `EntityTag.strong_eq()`. + pub fn strong_ne(&self, other: &EntityTag) -> bool { + !self.strong_eq(other) + } + + /// The inverse of `EntityTag.weak_eq()`. + pub fn weak_ne(&self, other: &EntityTag) -> bool { + !self.weak_eq(other) + } +} + +impl Display for EntityTag { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.weak { + write!(f, "W/\"{}\"", self.tag) + } else { + write!(f, "\"{}\"", self.tag) + } + } +} + +impl FromStr for EntityTag { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + let length: usize = s.len(); + let slice = &s[..]; + // Early exits if it doesn't terminate in a DQUOTE. + if !slice.ends_with('"') { + return Err(::Error::Header); + } + // The etag is weak if its first char is not a DQUOTE. + if slice.starts_with('"') && check_slice_validity(&slice[1..length-1]) { + // No need to check if the last char is a DQUOTE, + // we already did that above. + return Ok(EntityTag { weak: false, tag: slice[1..length-1].to_owned() }); + } else if slice.starts_with("W/\"") && check_slice_validity(&slice[3..length-1]) { + return Ok(EntityTag { weak: true, tag: slice[3..length-1].to_owned() }); + } + Err(::Error::Header) + } +} + +#[cfg(test)] +mod tests { + use super::EntityTag; + + #[test] + fn test_etag_parse_success() { + // Expected success + assert_eq!("\"foobar\"".parse::().unwrap(), + EntityTag::strong("foobar".to_owned())); + assert_eq!("\"\"".parse::().unwrap(), + EntityTag::strong("".to_owned())); + assert_eq!("W/\"weaktag\"".parse::().unwrap(), + EntityTag::weak("weaktag".to_owned())); + assert_eq!("W/\"\x65\x62\"".parse::().unwrap(), + EntityTag::weak("\x65\x62".to_owned())); + assert_eq!("W/\"\"".parse::().unwrap(), EntityTag::weak("".to_owned())); + } + + #[test] + fn test_etag_parse_failures() { + // Expected failures + assert!("no-dquotes".parse::().is_err()); + assert!("w/\"the-first-w-is-case-sensitive\"".parse::().is_err()); + assert!("".parse::().is_err()); + assert!("\"unmatched-dquotes1".parse::().is_err()); + assert!("unmatched-dquotes2\"".parse::().is_err()); + assert!("matched-\"dquotes\"".parse::().is_err()); + } + + #[test] + fn test_etag_fmt() { + assert_eq!(format!("{}", EntityTag::strong("foobar".to_owned())), "\"foobar\""); + assert_eq!(format!("{}", EntityTag::strong("".to_owned())), "\"\""); + assert_eq!(format!("{}", EntityTag::weak("weak-etag".to_owned())), "W/\"weak-etag\""); + assert_eq!(format!("{}", EntityTag::weak("\u{0065}".to_owned())), "W/\"\x65\""); + assert_eq!(format!("{}", EntityTag::weak("".to_owned())), "W/\"\""); + } + + #[test] + fn test_cmp() { + // | ETag 1 | ETag 2 | Strong Comparison | Weak Comparison | + // |---------|---------|-------------------|-----------------| + // | `W/"1"` | `W/"1"` | no match | match | + // | `W/"1"` | `W/"2"` | no match | no match | + // | `W/"1"` | `"1"` | no match | match | + // | `"1"` | `"1"` | match | match | + let mut etag1 = EntityTag::weak("1".to_owned()); + let mut etag2 = EntityTag::weak("1".to_owned()); + assert!(!etag1.strong_eq(&etag2)); + assert!(etag1.weak_eq(&etag2)); + assert!(etag1.strong_ne(&etag2)); + assert!(!etag1.weak_ne(&etag2)); + + etag1 = EntityTag::weak("1".to_owned()); + etag2 = EntityTag::weak("2".to_owned()); + assert!(!etag1.strong_eq(&etag2)); + assert!(!etag1.weak_eq(&etag2)); + assert!(etag1.strong_ne(&etag2)); + assert!(etag1.weak_ne(&etag2)); + + etag1 = EntityTag::weak("1".to_owned()); + etag2 = EntityTag::strong("1".to_owned()); + assert!(!etag1.strong_eq(&etag2)); + assert!(etag1.weak_eq(&etag2)); + assert!(etag1.strong_ne(&etag2)); + assert!(!etag1.weak_ne(&etag2)); + + etag1 = EntityTag::strong("1".to_owned()); + etag2 = EntityTag::strong("1".to_owned()); + assert!(etag1.strong_eq(&etag2)); + assert!(etag1.weak_eq(&etag2)); + assert!(!etag1.strong_ne(&etag2)); + assert!(!etag1.weak_ne(&etag2)); + } +} diff --git a/vendor/hyper-0.10.16/src/header/shared/httpdate.rs b/vendor/hyper-0.10.16/src/header/shared/httpdate.rs new file mode 100644 index 0000000..1a2dee8 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/httpdate.rs @@ -0,0 +1,91 @@ +use std::str::FromStr; +use std::fmt::{self, Display}; + +use time; + +/// A `time::Time` with HTTP formatting and parsing +/// +// Prior to 1995, there were three different formats commonly used by +// servers to communicate timestamps. For compatibility with old +// implementations, all three are defined here. The preferred format is +// a fixed-length and single-zone subset of the date and time +// specification used by the Internet Message Format [RFC5322]. +// +// HTTP-date = IMF-fixdate / obs-date +// +// An example of the preferred format is +// +// Sun, 06 Nov 1994 08:49:37 GMT ; IMF-fixdate +// +// Examples of the two obsolete formats are +// +// Sunday, 06-Nov-94 08:49:37 GMT ; obsolete RFC 850 format +// Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format +// +// A recipient that parses a timestamp value in an HTTP header field +// MUST accept all three HTTP-date formats. When a sender generates a +// header field that contains one or more timestamps defined as +// HTTP-date, the sender MUST generate those timestamps in the +// IMF-fixdate format. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct HttpDate(pub time::Tm); + +impl FromStr for HttpDate { + type Err = ::Error; + fn from_str(s: &str) -> ::Result { + match time::strptime(s, "%a, %d %b %Y %T %Z").or_else(|_| { + time::strptime(s, "%A, %d-%b-%y %T %Z") + }).or_else(|_| { + time::strptime(s, "%c") + }) { + Ok(t) => Ok(HttpDate(t)), + Err(_) => Err(::Error::Header), + } + } +} + +impl Display for HttpDate { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0.to_utc().rfc822(), f) + } +} + +#[cfg(test)] +mod tests { + use time::Tm; + use super::HttpDate; + + const NOV_07: HttpDate = HttpDate(Tm { + tm_nsec: 0, + tm_sec: 37, + tm_min: 48, + tm_hour: 8, + tm_mday: 7, + tm_mon: 10, + tm_year: 94, + tm_wday: 0, + tm_isdst: 0, + tm_yday: 0, + tm_utcoff: 0, + }); + + #[test] + fn test_imf_fixdate() { + assert_eq!("Sun, 07 Nov 1994 08:48:37 GMT".parse::().unwrap(), NOV_07); + } + + #[test] + fn test_rfc_850() { + assert_eq!("Sunday, 07-Nov-94 08:48:37 GMT".parse::().unwrap(), NOV_07); + } + + #[test] + fn test_asctime() { + assert_eq!("Sun Nov 7 08:48:37 1994".parse::().unwrap(), NOV_07); + } + + #[test] + fn test_no_date() { + assert!("this-is-no-date".parse::().is_err()); + } +} diff --git a/vendor/hyper-0.10.16/src/header/shared/mod.rs b/vendor/hyper-0.10.16/src/header/shared/mod.rs new file mode 100644 index 0000000..eca52c7 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/mod.rs @@ -0,0 +1,11 @@ +pub use self::charset::Charset; +pub use self::encoding::{Encoding, EncodingType}; +pub use self::entity::EntityTag; +pub use self::httpdate::HttpDate; +pub use self::quality_item::{Quality, QualityItem, qitem, q}; + +mod charset; +mod encoding; +mod entity; +mod httpdate; +mod quality_item; diff --git a/vendor/hyper-0.10.16/src/header/shared/quality_item.rs b/vendor/hyper-0.10.16/src/header/shared/quality_item.rs new file mode 100644 index 0000000..70089e4 --- /dev/null +++ b/vendor/hyper-0.10.16/src/header/shared/quality_item.rs @@ -0,0 +1,215 @@ +use std::cmp; +use std::default::Default; +use std::fmt; +use std::str; + +/// Represents a quality used in quality values. +/// +/// Can be created with the `q` function. +/// +/// # Implementation notes +/// +/// The quality value is defined as a number between 0 and 1 with three decimal places. This means +/// there are 1000 possible values. Since floating point numbers are not exact and the smallest +/// floating point data type (`f32`) consumes four bytes, hyper uses an `u16` value to store the +/// quality internally. For performance reasons you may set quality directly to a value between +/// 0 and 1000 e.g. `Quality(532)` matches the quality `q=0.532`. +/// +/// [RFC7231 Section 5.3.1](https://tools.ietf.org/html/rfc7231#section-5.3.1) +/// gives more information on quality values in HTTP header fields. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct Quality(pub u16); + +impl fmt::Display for Quality { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + 1000 => Ok(()), + 0 => f.write_str("; q=0"), + x => write!(f, "; q=0.{}", format!("{:03}", x).trim_right_matches('0')) + } + } +} + +impl Default for Quality { + fn default() -> Quality { + Quality(1000) + } +} + +/// Represents an item with a quality value as defined in +/// [RFC7231](https://tools.ietf.org/html/rfc7231#section-5.3.1). +#[derive(Clone, PartialEq, Debug)] +pub struct QualityItem { + /// The actual contents of the field. + pub item: T, + /// The quality (client or server preference) for the value. + pub quality: Quality, +} + +impl QualityItem { + /// Creates a new `QualityItem` from an item and a quality. + /// The item can be of any type. + /// The quality should be a value in the range [0, 1]. + pub fn new(item: T, quality: Quality) -> QualityItem { + QualityItem { + item: item, + quality: quality + } + } +} + +impl cmp::PartialOrd for QualityItem { + fn partial_cmp(&self, other: &QualityItem) -> Option { + self.quality.partial_cmp(&other.quality) + } +} + +impl fmt::Display for QualityItem { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}{}", self.item, format!("{}", self.quality)) + } +} + +impl str::FromStr for QualityItem { + type Err = ::Error; + fn from_str(s: &str) -> ::Result> { + // Set defaults used if parsing fails. + let mut raw_item = s; + let mut quality = 1f32; + + let parts: Vec<&str> = s.rsplitn(2, ';').map(|x| x.trim()).collect(); + if parts.len() == 2 { + let start = &parts[0][0..2]; + if start == "q=" || start == "Q=" { + let q_part = &parts[0][2..parts[0].len()]; + if q_part.len() > 5 { + return Err(::Error::Header); + } + match q_part.parse::() { + Ok(q_value) => { + if 0f32 <= q_value && q_value <= 1f32 { + quality = q_value; + raw_item = parts[1]; + } else { + return Err(::Error::Header); + } + }, + Err(_) => return Err(::Error::Header), + } + } + } + match raw_item.parse::() { + // we already checked above that the quality is within range + Ok(item) => Ok(QualityItem::new(item, from_f32(quality))), + Err(_) => Err(::Error::Header), + } + } +} + +fn from_f32(f: f32) -> Quality { + // this function is only used internally. A check that `f` is within range + // should be done before calling this method. Just in case, this + // debug_assert should catch if we were forgetful + debug_assert!(f >= 0f32 && f <= 1f32, "q value must be between 0.0 and 1.0"); + Quality((f * 1000f32) as u16) +} + +/// Convinience function to wrap a value in a `QualityItem` +/// Sets `q` to the default 1.0 +pub fn qitem(item: T) -> QualityItem { + QualityItem::new(item, Default::default()) +} + +/// Convenience function to create a `Quality` from a float. +pub fn q(f: f32) -> Quality { + assert!(f >= 0f32 && f <= 1f32, "q value must be between 0.0 and 1.0"); + from_f32(f) +} + +#[cfg(test)] +mod tests { + use super::*; + use super::super::encoding::*; + + #[test] + fn test_quality_item_show1() { + let x = qitem(Chunked); + assert_eq!(format!("{}", x), "chunked"); + } + #[test] + fn test_quality_item_show2() { + let x = QualityItem::new(Chunked, Quality(1)); + assert_eq!(format!("{}", x), "chunked; q=0.001"); + } + #[test] + fn test_quality_item_show3() { + // Custom value + let x = QualityItem{ + item: EncodingExt("identity".to_owned()), + quality: Quality(500), + }; + assert_eq!(format!("{}", x), "identity; q=0.5"); + } + + #[test] + fn test_quality_item_from_str1() { + let x: ::Result> = "chunked".parse(); + assert_eq!(x.unwrap(), QualityItem{ item: Chunked, quality: Quality(1000), }); + } + #[test] + fn test_quality_item_from_str2() { + let x: ::Result> = "chunked; q=1".parse(); + assert_eq!(x.unwrap(), QualityItem{ item: Chunked, quality: Quality(1000), }); + } + #[test] + fn test_quality_item_from_str3() { + let x: ::Result> = "gzip; q=0.5".parse(); + assert_eq!(x.unwrap(), QualityItem{ item: Gzip, quality: Quality(500), }); + } + #[test] + fn test_quality_item_from_str4() { + let x: ::Result> = "gzip; q=0.273".parse(); + assert_eq!(x.unwrap(), QualityItem{ item: Gzip, quality: Quality(273), }); + } + #[test] + fn test_quality_item_from_str5() { + let x: ::Result> = "gzip; q=0.2739999".parse(); + assert!(x.is_err()); + } + #[test] + fn test_quality_item_from_str6() { + let x: ::Result> = "gzip; q=2".parse(); + assert!(x.is_err()); + } + #[test] + fn test_quality_item_ordering() { + let x: QualityItem = "gzip; q=0.5".parse().ok().unwrap(); + let y: QualityItem = "gzip; q=0.273".parse().ok().unwrap(); + let comparision_result: bool = x.gt(&y); + assert!(comparision_result) + } + + #[test] + fn test_quality() { + assert_eq!(q(0.5), Quality(500)); + } + + #[test] + fn test_quality2() { + assert_eq!(format!("{}", q(0.0)), "; q=0"); + } + + #[test] + #[should_panic] // FIXME - 32-bit msvc unwinding broken + #[cfg_attr(all(target_arch="x86", target_env="msvc"), ignore)] + fn test_quality_invalid() { + q(-1.0); + } + + #[test] + #[should_panic] // FIXME - 32-bit msvc unwinding broken + #[cfg_attr(all(target_arch="x86", target_env="msvc"), ignore)] + fn test_quality_invalid2() { + q(2.0); + } +} diff --git a/vendor/hyper-0.10.16/src/http/h1.rs b/vendor/hyper-0.10.16/src/http/h1.rs new file mode 100644 index 0000000..a685c9f --- /dev/null +++ b/vendor/hyper-0.10.16/src/http/h1.rs @@ -0,0 +1,623 @@ +//! Adapts the HTTP/1.1 implementation into the `HttpMessage` API. +use std::cmp::min; +use std::fmt; +use std::io::{self, Write, BufRead, Read}; + +use httparse; + +use buffer::BufReader; +use Error; +use header::{Headers}; +use method::{Method}; +use version::HttpVersion; +use version::HttpVersion::{Http10, Http11}; +use uri::RequestUri; + +use self::HttpReader::{SizedReader, ChunkedReader, EofReader, EmptyReader}; +use self::HttpWriter::{SizedWriter, ThroughWriter}; + +/// Readers to handle different Transfer-Encodings. +/// +/// If a message body does not include a Transfer-Encoding, it *should* +/// include a Content-Length header. +pub enum HttpReader { + /// A Reader used when a Content-Length header is passed with a positive integer. + SizedReader(R, u64), + /// A Reader used when Transfer-Encoding is `chunked`. + ChunkedReader(R, Option), + /// A Reader used for responses that don't indicate a length or chunked. + /// + /// Note: This should only used for `Response`s. It is illegal for a + /// `Request` to be made with both `Content-Length` and + /// `Transfer-Encoding: chunked` missing, as explained from the spec: + /// + /// > If a Transfer-Encoding header field is present in a response and + /// > the chunked transfer coding is not the final encoding, the + /// > message body length is determined by reading the connection until + /// > it is closed by the server. If a Transfer-Encoding header field + /// > is present in a request and the chunked transfer coding is not + /// > the final encoding, the message body length cannot be determined + /// > reliably; the server MUST respond with the 400 (Bad Request) + /// > status code and then close the connection. + EofReader(R), + /// A Reader used for messages that should never have a body. + /// + /// See https://tools.ietf.org/html/rfc7230#section-3.3.3 + EmptyReader(R), +} + +impl HttpReader { + + /// Unwraps this HttpReader and returns the underlying Reader. + pub fn into_inner(self) -> R { + match self { + SizedReader(r, _) => r, + ChunkedReader(r, _) => r, + EofReader(r) => r, + EmptyReader(r) => r, + } + } + + /// Gets a borrowed reference to the underlying Reader. + pub fn get_ref(&self) -> &R { + match *self { + SizedReader(ref r, _) => r, + ChunkedReader(ref r, _) => r, + EofReader(ref r) => r, + EmptyReader(ref r) => r, + } + } + + /// Gets a mutable reference to the underlying Reader. + pub fn get_mut(&mut self) -> &mut R { + match *self { + SizedReader(ref mut r, _) => r, + ChunkedReader(ref mut r, _) => r, + EofReader(ref mut r) => r, + EmptyReader(ref mut r) => r, + } + } +} + +impl fmt::Debug for HttpReader { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + SizedReader(_,rem) => write!(fmt, "SizedReader(remaining={:?})", rem), + ChunkedReader(_, None) => write!(fmt, "ChunkedReader(chunk_remaining=unknown)"), + ChunkedReader(_, Some(rem)) => write!(fmt, "ChunkedReader(chunk_remaining={:?})", rem), + EofReader(_) => write!(fmt, "EofReader"), + EmptyReader(_) => write!(fmt, "EmptyReader"), + } + } +} + +impl Read for HttpReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } + match *self { + SizedReader(ref mut body, ref mut remaining) => { + trace!("Sized read, remaining={:?}", remaining); + if *remaining == 0 { + Ok(0) + } else { + let to_read = min(*remaining as usize, buf.len()); + let num = try!(body.read(&mut buf[..to_read])) as u64; + trace!("Sized read: {}", num); + if num > *remaining { + *remaining = 0; + } else if num == 0 { + return Err(io::Error::new(io::ErrorKind::Other, "early eof")); + } else { + *remaining -= num; + } + Ok(num as usize) + } + }, + ChunkedReader(ref mut body, ref mut opt_remaining) => { + let mut rem = match *opt_remaining { + Some(ref rem) => *rem, + // None means we don't know the size of the next chunk + None => try!(read_chunk_size(body)) + }; + trace!("Chunked read, remaining={:?}", rem); + + if rem == 0 { + if opt_remaining.is_none() { + try!(eat(body, LINE_ENDING.as_bytes())); + } + + *opt_remaining = Some(0); + + // chunk of size 0 signals the end of the chunked stream + // if the 0 digit was missing from the stream, it would + // be an InvalidInput error instead. + trace!("end of chunked"); + + return Ok(0) + } + + let to_read = min(rem as usize, buf.len()); + let count = try!(body.read(&mut buf[..to_read])) as u64; + + if count == 0 { + *opt_remaining = Some(0); + return Err(io::Error::new(io::ErrorKind::Other, "early eof")); + } + + rem -= count; + *opt_remaining = if rem > 0 { + Some(rem) + } else { + try!(eat(body, LINE_ENDING.as_bytes())); + None + }; + Ok(count as usize) + }, + EofReader(ref mut body) => { + let r = body.read(buf); + trace!("eofread: {:?}", r); + r + }, + EmptyReader(_) => Ok(0) + } + } +} + +fn eat(rdr: &mut R, bytes: &[u8]) -> io::Result<()> { + let mut buf = [0]; + for &b in bytes.iter() { + match try!(rdr.read(&mut buf)) { + 1 if buf[0] == b => (), + _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid characters found")), + } + } + Ok(()) +} + +/// Chunked chunks start with 1*HEXDIGIT, indicating the size of the chunk. +fn read_chunk_size(rdr: &mut R) -> io::Result { + macro_rules! byte ( + ($rdr:ident) => ({ + let mut buf = [0]; + match try!($rdr.read(&mut buf)) { + 1 => buf[0], + _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid chunk size line")), + + } + }) + ); + let mut size = 0u64; + let radix = 16; + let mut in_ext = false; + let mut in_chunk_size = true; + loop { + match byte!(rdr) { + b@b'0'...b'9' if in_chunk_size => { + size = size.checked_mul(radix).ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "too long"))?; + size += (b - b'0') as u64; + }, + b@b'a'...b'f' if in_chunk_size => { + size = size.checked_mul(radix).ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "too long"))?; + size += (b + 10 - b'a') as u64; + }, + b@b'A'...b'F' if in_chunk_size => { + size = size.checked_mul(radix).ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "too long"))?; + size += (b + 10 - b'A') as u64; + }, + CR => { + match byte!(rdr) { + LF => break, + _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid chunk size line")) + + } + }, + // If we weren't in the extension yet, the ";" signals its start + b';' if !in_ext => { + in_ext = true; + in_chunk_size = false; + }, + // "Linear white space" is ignored between the chunk size and the + // extension separator token (";") due to the "implied *LWS rule". + b'\t' | b' ' if !in_ext & !in_chunk_size => {}, + // LWS can follow the chunk size, but no more digits can come + b'\t' | b' ' if in_chunk_size => in_chunk_size = false, + // We allow any arbitrary octet once we are in the extension, since + // they all get ignored anyway. According to the HTTP spec, valid + // extensions would have a more strict syntax: + // (token ["=" (token | quoted-string)]) + // but we gain nothing by rejecting an otherwise valid chunk size. + ext if in_ext => { + todo!("chunk extension byte={}", ext); + }, + // Finally, if we aren't in the extension and we're reading any + // other octet, the chunk size line is invalid! + _ => { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid chunk size line")); + } + } + } + trace!("chunk size={:?}", size); + Ok(size) +} + +/// Writers to handle different Transfer-Encodings. +pub enum HttpWriter { + /// A no-op Writer, used initially before Transfer-Encoding is determined. + ThroughWriter(W), + /// A Writer for when Content-Length is set. + /// + /// Enforces that the body is not longer than the Content-Length header. + SizedWriter(W, u64), +} + +impl HttpWriter { + /// Unwraps the HttpWriter and returns the underlying Writer. + #[inline] + pub fn into_inner(self) -> W { + match self { + ThroughWriter(w) => w, + SizedWriter(w, _) => w, + } + } + + /// Access the inner Writer. + #[inline] + pub fn get_ref(&self) -> &W { + match *self { + ThroughWriter(ref w) => w, + SizedWriter(ref w, _) => w, + } + } + + /// Access the inner Writer mutably. + /// + /// Warning: You should not write to this directly, as you can corrupt + /// the state. + #[inline] + pub fn get_mut(&mut self) -> &mut W { + match *self { + ThroughWriter(ref mut w) => w, + SizedWriter(ref mut w, _) => w, + } + } + + /// Ends the HttpWriter, and returns the underlying Writer. + /// + /// A final `write_all()` is called with an empty message, and then flushed. + /// The ChunkedWriter variant will use this to write the 0-sized last-chunk. + #[inline] + pub fn end(mut self) -> Result> { + match self.flush() { + Ok(..) => Ok(self.into_inner()), + Err(e) => Err(EndError(e, self)) + } + } +} + +#[derive(Debug)] +pub struct EndError(io::Error, HttpWriter); + +impl From> for io::Error { + fn from(e: EndError) -> io::Error { + e.0 + } +} + +impl Write for HttpWriter { + #[inline] + fn write(&mut self, msg: &[u8]) -> io::Result { + match *self { + ThroughWriter(ref mut w) => w.write(msg), + SizedWriter(ref mut w, ref mut remaining) => { + let len = msg.len() as u64; + if len > *remaining { + let len = *remaining; + try!(w.write_all(&msg[..len as usize])); + *remaining = 0; + Ok(len as usize) + } else { + try!(w.write_all(msg)); + *remaining -= len; + Ok(len as usize) + } + }, + } + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { + self.get_mut().flush() + } +} + +impl fmt::Debug for HttpWriter { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + ThroughWriter(_) => write!(fmt, "ThroughWriter"), + SizedWriter(_, rem) => write!(fmt, "SizedWriter(remaining={:?})", rem), + } + } +} + +const MAX_HEADERS: usize = 100; + +/// Parses a request into an Incoming message head. +#[inline] +pub fn parse_request(buf: &mut BufReader) -> ::Result> { + parse::(buf) +} + +fn parse, I>(rdr: &mut BufReader) -> ::Result> { + loop { + match try!(try_parse::(rdr)) { + httparse::Status::Complete((inc, len)) => { + rdr.consume(len); + return Ok(inc); + }, + _partial => () + } + let n = try!(rdr.read_into_buf()); + if n == 0 { + let buffered = rdr.get_buf().len(); + if buffered == ::buffer::MAX_BUFFER_SIZE { + return Err(Error::TooLarge); + } else { + return Err(Error::Io(io::Error::new( + io::ErrorKind::UnexpectedEof, + "end of stream before headers finished" + ))); + } + } + } +} + +fn try_parse, I>(rdr: &mut BufReader) -> TryParseResult { + let mut headers = [httparse::EMPTY_HEADER; MAX_HEADERS]; + let buf = rdr.get_buf(); + if buf.len() == 0 { + return Ok(httparse::Status::Partial); + } + trace!("try_parse({:?})", buf); + ::try_parse(&mut headers, buf) +} + +#[doc(hidden)] +trait TryParse { + type Subject; + fn try_parse<'a>(headers: &'a mut [httparse::Header<'a>], buf: &'a [u8]) -> + TryParseResult; +} + +type TryParseResult = Result, usize)>, Error>; + +impl<'a> TryParse for httparse::Request<'a, 'a> { + type Subject = (Method, RequestUri); + + fn try_parse<'b>(headers: &'b mut [httparse::Header<'b>], buf: &'b [u8]) -> + TryParseResult<(Method, RequestUri)> { + trace!("Request.try_parse([Header; {}], [u8; {}])", headers.len(), buf.len()); + let mut req = httparse::Request::new(headers); + Ok(match try!(req.parse(buf)) { + httparse::Status::Complete(len) => { + trace!("Request.try_parse Complete({})", len); + httparse::Status::Complete((Incoming { + version: if req.version.unwrap() == 1 { Http11 } else { Http10 }, + subject: ( + try!(req.method.unwrap().parse()), + try!(req.path.unwrap().parse()) + ), + headers: try!(Headers::from_raw(req.headers)) + }, len)) + }, + httparse::Status::Partial => httparse::Status::Partial + }) + } +} + +/// An Incoming Message head. Includes request/status line, and headers. +#[derive(Debug)] +pub struct Incoming { + /// HTTP version of the message. + pub version: HttpVersion, + /// Subject (request line or status line) of Incoming message. + pub subject: S, + /// Headers of the Incoming message. + pub headers: Headers +} + +/// The `\r` byte. +pub const CR: u8 = b'\r'; +/// The `\n` byte. +pub const LF: u8 = b'\n'; +/// The bytes `\r\n`. +pub const LINE_ENDING: &'static str = "\r\n"; + +#[cfg(test)] +mod tests { + use std::error::Error; + use std::io::{self, Read, Write}; + + + use buffer::BufReader; + use mock::MockStream; + use http::HttpMessage; + + use super::{read_chunk_size, parse_request, parse_response, Http11Message}; + + #[test] + fn test_write_sized() { + use std::str::from_utf8; + let mut w = super::HttpWriter::SizedWriter(Vec::new(), 8); + w.write_all(b"foo bar").unwrap(); + assert_eq!(w.write(b"baz").unwrap(), 1); + + let buf = w.end().unwrap(); + let s = from_utf8(buf.as_ref()).unwrap(); + assert_eq!(s, "foo barb"); + } + + #[test] + fn test_read_chunk_size() { + fn read(s: &str, result: u64) { + assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap(), result); + } + + fn read_err(s: &str) { + assert_eq!(read_chunk_size(&mut s.as_bytes()).unwrap_err().kind(), + io::ErrorKind::InvalidInput); + } + + read("1\r\n", 1); + read("01\r\n", 1); + read("0\r\n", 0); + read("00\r\n", 0); + read("A\r\n", 10); + read("a\r\n", 10); + read("Ff\r\n", 255); + read("Ff \r\n", 255); + // Missing LF or CRLF + read_err("F\rF"); + read_err("F"); + // Invalid hex digit + read_err("X\r\n"); + read_err("1X\r\n"); + read_err("-\r\n"); + read_err("-1\r\n"); + // Acceptable (if not fully valid) extensions do not influence the size + read("1;extension\r\n", 1); + read("a;ext name=value\r\n", 10); + read("1;extension;extension2\r\n", 1); + read("1;;; ;\r\n", 1); + read("2; extension...\r\n", 2); + read("3 ; extension=123\r\n", 3); + read("3 ;\r\n", 3); + read("3 ; \r\n", 3); + // Invalid extensions cause an error + read_err("1 invalid extension\r\n"); + read_err("1 A\r\n"); + read_err("1;no CRLF"); + // https://github.com/hyperium/hyper/security/advisories/GHSA-5h46-h7hh-c6x9 + read_err("f0000000000000003"); + } + + #[test] + fn test_read_sized_early_eof() { + let mut r = super::HttpReader::SizedReader(MockStream::with_input(b"foo bar"), 10); + let mut buf = [0u8; 10]; + assert_eq!(r.read(&mut buf).unwrap(), 7); + let e = r.read(&mut buf).unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::Other); + assert_eq!(e.description(), "early eof"); + } + + #[test] + fn test_read_chunked_early_eof() { + let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"\ + 9\r\n\ + foo bar\ + "), None); + + let mut buf = [0u8; 10]; + assert_eq!(r.read(&mut buf).unwrap(), 7); + let e = r.read(&mut buf).unwrap_err(); + assert_eq!(e.kind(), io::ErrorKind::Other); + assert_eq!(e.description(), "early eof"); + } + + #[test] + fn test_read_sized_zero_len_buf() { + let mut r = super::HttpReader::SizedReader(MockStream::with_input(b"foo bar"), 7); + let mut buf = [0u8; 0]; + assert_eq!(r.read(&mut buf).unwrap(), 0); + } + + #[test] + fn test_read_chunked_zero_len_buf() { + let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"\ + 7\r\n\ + foo bar\ + 0\r\n\r\n\ + "), None); + + let mut buf = [0u8; 0]; + assert_eq!(r.read(&mut buf).unwrap(), 0); + } + + #[test] + fn test_read_chunked_fully_consumes() { + let mut r = super::HttpReader::ChunkedReader(MockStream::with_input(b"0\r\n\r\n"), None); + let mut buf = [0; 1]; + assert_eq!(r.read(&mut buf).unwrap(), 0); + assert_eq!(r.read(&mut buf).unwrap(), 0); + + match r { + super::HttpReader::ChunkedReader(mut r, _) => assert_eq!(r.read(&mut buf).unwrap(), 0), + _ => unreachable!(), + } + } + + #[test] + fn test_message_get_incoming_invalid_content_length() { + let raw = MockStream::with_input( + b"HTTP/1.1 200 OK\r\nContent-Length: asdf\r\n\r\n"); + let mut msg = Http11Message::with_stream(Box::new(raw)); + assert!(msg.get_incoming().is_err()); + assert!(msg.close_connection().is_ok()); + } + + #[test] + fn test_parse_incoming() { + let mut raw = MockStream::with_input(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); + let mut buf = BufReader::new(&mut raw); + parse_request(&mut buf).unwrap(); + } + + #[test] + fn test_parse_raw_status() { + let mut raw = MockStream::with_input(b"HTTP/1.1 200 OK\r\n\r\n"); + let mut buf = BufReader::new(&mut raw); + let res = parse_response(&mut buf).unwrap(); + + assert_eq!(res.subject.1, "OK"); + + let mut raw = MockStream::with_input(b"HTTP/1.1 200 Howdy\r\n\r\n"); + let mut buf = BufReader::new(&mut raw); + let res = parse_response(&mut buf).unwrap(); + + assert_eq!(res.subject.1, "Howdy"); + } + + + #[test] + fn test_parse_tcp_closed() { + use std::io::ErrorKind; + use error::Error; + + let mut empty = MockStream::new(); + let mut buf = BufReader::new(&mut empty); + match parse_request(&mut buf) { + Err(Error::Io(ref e)) if e.kind() == ErrorKind::UnexpectedEof => (), + other => panic!("unexpected result: {:?}", other) + } + } + + #[cfg(feature = "nightly")] + use test::Bencher; + + #[cfg(feature = "nightly")] + #[bench] + fn bench_parse_incoming(b: &mut Bencher) { + let mut raw = MockStream::with_input(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n"); + let mut buf = BufReader::new(&mut raw); + b.iter(|| { + parse_request(&mut buf).unwrap(); + buf.get_mut().read.set_position(0); + }); + } +} diff --git a/vendor/hyper-0.10.16/src/http/mod.rs b/vendor/hyper-0.10.16/src/http/mod.rs new file mode 100644 index 0000000..56af9a9 --- /dev/null +++ b/vendor/hyper-0.10.16/src/http/mod.rs @@ -0,0 +1,35 @@ +//! Pieces pertaining to the HTTP message protocol. +use header::Connection; +use header::ConnectionOption::{KeepAlive, Close}; +use header::Headers; +use version::HttpVersion; +use version::HttpVersion::{Http10, Http11}; + +pub mod h1; + +#[inline] +pub fn should_keep_alive(version: HttpVersion, headers: &Headers) -> bool { + trace!("should_keep_alive( {:?}, {:?} )", version, headers.get::()); + match (version, headers.get::()) { + (Http10, None) => false, + (Http10, Some(conn)) if !conn.contains(&KeepAlive) => false, + (Http11, Some(conn)) if conn.contains(&Close) => false, + _ => true + } +} + +#[test] +fn test_should_keep_alive() { + let mut headers = Headers::new(); + + assert!(!should_keep_alive(Http10, &headers)); + assert!(should_keep_alive(Http11, &headers)); + + headers.set(Connection::close()); + assert!(!should_keep_alive(Http10, &headers)); + assert!(!should_keep_alive(Http11, &headers)); + + headers.set(Connection::keep_alive()); + assert!(should_keep_alive(Http10, &headers)); + assert!(should_keep_alive(Http11, &headers)); +} diff --git a/vendor/hyper-0.10.16/src/lib.rs b/vendor/hyper-0.10.16/src/lib.rs new file mode 100644 index 0000000..7bfb794 --- /dev/null +++ b/vendor/hyper-0.10.16/src/lib.rs @@ -0,0 +1,199 @@ +#![doc(html_root_url = "https://docs.rs/hyper/v0.10.16")] +//#![cfg_attr(test, deny(missing_docs))] +//#![cfg_attr(test, deny(warnings))] +#![cfg_attr(all(test, feature = "nightly"), feature(test))] +#![allow(useless_deprecated)] +#![allow(deprecated)] +#![allow(anonymous_parameters)] +#![allow(ellipsis_inclusive_range_patterns)] +#![allow(bare_trait_objects)] +#![allow(unused_must_use)] +#![allow(unused_variables)] + +//! # Hyper +//! +//! Hyper is a fast, modern HTTP implementation written in and for Rust. It +//! is a low-level typesafe abstraction over raw HTTP, providing an elegant +//! layer over "stringly-typed" HTTP. +//! +//! Hyper offers both a [Client](client/index.html) and a +//! [Server](server/index.html) which can be used to drive complex web +//! applications written entirely in Rust. +//! +//! ## Internal Design +//! +//! Hyper is designed as a relatively low-level wrapper over raw HTTP. It should +//! allow the implementation of higher-level abstractions with as little pain as +//! possible, and should not irrevocably hide any information from its users. +//! +//! ### Common Functionality +//! +//! Functionality and code shared between the Server and Client implementations +//! can be found in `src` directly - this includes `NetworkStream`s, `Method`s, +//! `StatusCode`, and so on. +//! +//! #### Methods +//! +//! Methods are represented as a single `enum` to remain as simple as possible. +//! Extension Methods are represented as raw `String`s. A method's safety and +//! idempotence can be accessed using the `safe` and `idempotent` methods. +//! +//! #### StatusCode +//! +//! Status codes are also represented as a single, exhaustive, `enum`. This +//! representation is efficient, typesafe, and ergonomic as it allows the use of +//! `match` to disambiguate known status codes. +//! +//! #### Headers +//! +//! Hyper's [header](header/index.html) representation is likely the most +//! complex API exposed by Hyper. +//! +//! Hyper's headers are an abstraction over an internal `HashMap` and provides a +//! typesafe API for interacting with headers that does not rely on the use of +//! "string-typing." +//! +//! Each HTTP header in Hyper has an associated type and implementation of the +//! `Header` trait, which defines an HTTP headers name as a string, how to parse +//! that header, and how to format that header. +//! +//! Headers are then parsed from the string representation lazily when the typed +//! representation of a header is requested and formatted back into their string +//! representation when headers are written back to the client. +//! +//! #### NetworkStream and NetworkAcceptor +//! +//! These are found in `src/net.rs` and define the interface that acceptors and +//! streams must fulfill for them to be used within Hyper. They are by and large +//! internal tools and you should only need to mess around with them if you want to +//! mock or replace `TcpStream` and `TcpAcceptor`. +//! +//! ### Server +//! +//! Server-specific functionality, such as `Request` and `Response` +//! representations, are found in in `src/server`. +//! +//! #### Handler + Server +//! +//! A `Handler` in Hyper accepts a `Request` and `Response`. This is where +//! user-code can handle each connection. The server accepts connections in a +//! task pool with a customizable number of threads, and passes the Request / +//! Response to the handler. +//! +//! #### Request +//! +//! An incoming HTTP Request is represented as a struct containing +//! a `Reader` over a `NetworkStream`, which represents the body, headers, a remote +//! address, an HTTP version, and a `Method` - relatively standard stuff. +//! +//! `Request` implements `Reader` itself, meaning that you can ergonomically get +//! the body out of a `Request` using standard `Reader` methods and helpers. +//! +//! #### Response +//! +//! An outgoing HTTP Response is also represented as a struct containing a `Writer` +//! over a `NetworkStream` which represents the Response body in addition to +//! standard items such as the `StatusCode` and HTTP version. `Response`'s `Writer` +//! implementation provides a streaming interface for sending data over to the +//! client. +//! +//! One of the traditional problems with representing outgoing HTTP Responses is +//! tracking the write-status of the Response - have we written the status-line, +//! the headers, the body, etc.? Hyper tracks this information statically using the +//! type system and prevents you, using the type system, from writing headers after +//! you have started writing to the body or vice versa. +//! +//! Hyper does this through a phantom type parameter in the definition of Response, +//! which tracks whether you are allowed to write to the headers or the body. This +//! phantom type can have two values `Fresh` or `Streaming`, with `Fresh` +//! indicating that you can write the headers and `Streaming` indicating that you +//! may write to the body, but not the headers. +//! +//! ### Client +//! +//! Client-specific functionality, such as `Request` and `Response` +//! representations, are found in `src/client`. +//! +//! #### Request +//! +//! An outgoing HTTP Request is represented as a struct containing a `Writer` over +//! a `NetworkStream` which represents the Request body in addition to the standard +//! information such as headers and the request method. +//! +//! Outgoing Requests track their write-status in almost exactly the same way as +//! outgoing HTTP Responses do on the Server, so we will defer to the explanation +//! in the documentation for server Response. +//! +//! Requests expose an efficient streaming interface instead of a builder pattern, +//! but they also provide the needed interface for creating a builder pattern over +//! the API exposed by core Hyper. +//! +//! #### Response +//! +//! Incoming HTTP Responses are represented as a struct containing a `Reader` over +//! a `NetworkStream` and contain headers, a status, and an http version. They +//! implement `Reader` and can be read to get the data out of a `Response`. +//! + +extern crate base64; +extern crate time; +#[macro_use] extern crate url; +extern crate unicase; +extern crate httparse; +extern crate num_cpus; +extern crate traitobject; +extern crate typeable; +extern crate crossbeam_channel; +extern crate smallvec; + +#[macro_use] +extern crate mime as mime_crate; + +macro_rules! trace { ($($i:tt)*) => { () } } +macro_rules! debug { ($($i:tt)*) => { () } } +macro_rules! info { ($($i:tt)*) => { () } } +macro_rules! warn { ($($i:tt)*) => { () } } +macro_rules! error { ($($i:tt)*) => { eprint!("[hyper] "); eprintln!($($i)*) } } + +#[cfg(all(test, feature = "nightly"))] +extern crate test; + + +pub use url::Url; +pub use error::{Result, Error}; +pub use method::Method::{Get, Head, Post, Delete}; +pub use status::StatusCode::{Ok, BadRequest, NotFound}; +pub use server::Server; + +macro_rules! todo( + ($($arg:tt)*) => (if cfg!(not(ndebug)) { + trace!("TODO: {:?}", format_args!($($arg)*)) + }) +); + +#[doc(hidden)] +pub mod buffer; +pub mod error; +pub mod method; +pub mod header; +pub mod http; +pub mod net; +pub mod server; +pub mod status; +pub mod uri; +pub mod version; + +/// Re-exporting the mime crate, for convenience. +pub mod mime { + pub use mime_crate::*; +} + + +fn _assert_types() { + fn _assert_send() {} + fn _assert_sync() {} + + _assert_send::(); + + _assert_sync::(); +} diff --git a/vendor/hyper-0.10.16/src/method.rs b/vendor/hyper-0.10.16/src/method.rs new file mode 100644 index 0000000..358a751 --- /dev/null +++ b/vendor/hyper-0.10.16/src/method.rs @@ -0,0 +1,196 @@ +//! The HTTP request method +use std::fmt; +use std::str::FromStr; +use std::convert::AsRef; + +use error::Error; +use self::Method::{Options, Get, Post, Put, Delete, Head, Trace, Connect, Patch, + Extension, DavCopy, DavMkcol, DavMove, DavPropfind, DavProppatch, DavLock}; + + +/// The Request Method (VERB) +/// +/// Currently includes 8 variants representing the 8 methods defined in +/// [RFC 7230](https://tools.ietf.org/html/rfc7231#section-4.1), plus PATCH, +/// and an Extension variant for all extensions. +/// +/// It may make sense to grow this to include all variants currently +/// registered with IANA, if they are at all common to use. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum Method { + /// OPTIONS + Options, + /// GET + Get, + /// POST + Post, + /// PUT + Put, + /// DELETE + Delete, + /// HEAD + Head, + /// TRACE + Trace, + /// CONNECT + Connect, + /// PATCH + Patch, + // WebDAV COPY + DavCopy, + // WebDAV MKCOL + DavMkcol, + // WebDAV MOVE + DavMove, + // WebDAV PROPFIND + DavPropfind, + // WebDAV PROPPATCH + DavProppatch, + // WebDAV LOCK + DavLock, + /// Method extensions. An example would be `let m = Extension("FOO".to_string())`. + Extension(String) +} + +impl AsRef for Method { + fn as_ref(&self) -> &str { + match *self { + Options => "OPTIONS", + Get => "GET", + Post => "POST", + Put => "PUT", + Delete => "DELETE", + Head => "HEAD", + Trace => "TRACE", + Connect => "CONNECT", + Patch => "PATCH", + DavCopy => "COPY", + DavMkcol => "MKCOL", + DavMove => "MOVE", + DavPropfind => "PROPFIND", + DavProppatch => "PROPPATCH", + DavLock => "LOCK", + Extension(ref s) => s.as_ref() + } + } +} + +impl Method { + /// Whether a method is considered "safe", meaning the request is + /// essentially read-only. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.1) + /// for more words. + pub fn safe(&self) -> bool { + match *self { + Get | Head | Options | Trace => true, + _ => false + } + } + + /// Whether a method is considered "idempotent", meaning the request has + /// the same result is executed multiple times. + /// + /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for + /// more words. + pub fn idempotent(&self) -> bool { + if self.safe() { + true + } else { + match *self { + Put | Delete => true, + _ => false + } + } + } +} + +impl FromStr for Method { + type Err = Error; + fn from_str(s: &str) -> Result { + if s == "" { + Err(Error::Method) + } else { + Ok(match s { + "OPTIONS" => Options, + "GET" => Get, + "POST" => Post, + "PUT" => Put, + "DELETE" => Delete, + "HEAD" => Head, + "TRACE" => Trace, + "CONNECT" => Connect, + "PATCH" => Patch, + "COPY" => DavCopy, + "MKCOL" => DavMkcol, + "MOVE" => DavMove, + "PROPFIND" => DavPropfind, + "PROPPATCH" => DavProppatch, + "LOCK" => DavLock, + _ => Extension(s.to_owned()) + }) + } + } +} + +impl fmt::Display for Method { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(self.as_ref()) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::str::FromStr; + use error::Error; + use super::Method; + use super::Method::{Get, Post, Put, Extension}; + + #[test] + fn test_safe() { + assert_eq!(true, Get.safe()); + assert_eq!(false, Post.safe()); + } + + #[test] + fn test_idempotent() { + assert_eq!(true, Get.idempotent()); + assert_eq!(true, Put.idempotent()); + assert_eq!(false, Post.idempotent()); + } + + #[test] + fn test_from_str() { + assert_eq!(Get, FromStr::from_str("GET").unwrap()); + assert_eq!(Extension("TEST".to_owned()), + FromStr::from_str("TEST").unwrap()); + let x: Result = FromStr::from_str(""); + if let Err(Error::Method) = x { + } else { + panic!("An empty method is invalid!") + } + } + + #[test] + fn test_fmt() { + assert_eq!("GET".to_owned(), format!("{}", Get)); + assert_eq!("TEST".to_owned(), + format!("{}", Extension("TEST".to_owned()))); + } + + #[test] + fn test_hashable() { + let mut counter: HashMap = HashMap::new(); + counter.insert(Get, 1); + assert_eq!(Some(&1), counter.get(&Get)); + } + + #[test] + fn test_as_str() { + assert_eq!(Get.as_ref(), "GET"); + assert_eq!(Post.as_ref(), "POST"); + assert_eq!(Put.as_ref(), "PUT"); + assert_eq!(Extension("TEST".to_owned()).as_ref(), "TEST"); + } +} diff --git a/vendor/hyper-0.10.16/src/net.rs b/vendor/hyper-0.10.16/src/net.rs new file mode 100644 index 0000000..5e8c7f5 --- /dev/null +++ b/vendor/hyper-0.10.16/src/net.rs @@ -0,0 +1,638 @@ +//! A collection of traits abstracting over Listeners and Streams. +use std::any::{Any, TypeId}; +use std::fmt; +use std::io::{self, ErrorKind, Read, Write}; +use std::net::{SocketAddr, ToSocketAddrs, TcpStream, TcpListener, Shutdown}; +use std::mem; +use std::sync::Arc; + +use std::time::Duration; + +use typeable::Typeable; +use traitobject; + +/// The write-status indicating headers have not been written. +pub enum Fresh {} + +/// The write-status indicating headers have been written. +pub enum Streaming {} + +/// An abstraction to listen for connections on a certain port. +pub trait NetworkListener: Clone { + /// The stream produced for each connection. + type Stream: NetworkStream + Send + Clone; + + /// Returns an iterator of streams. + fn accept(&mut self) -> ::Result; + + /// Get the address this Listener ended up listening on. + fn local_addr(&mut self) -> io::Result; + + /// Returns an iterator over incoming connections. + fn incoming(&mut self) -> NetworkConnections { + NetworkConnections(self) + } + + /// Sets the read timeout for all streams that are accepted + fn set_read_timeout(&mut self, _: Option) { + // This default implementation is only here to prevent the addition of + // these methods from being a breaking change. They should be removed + // when the next breaking release is made. + warn!("Ignoring read timeout"); + } + + /// Sets the write timeout for all streams that are accepted + fn set_write_timeout(&mut self, _: Option) { + // This default implementation is only here to prevent the addition of + // these methods from being a breaking change. They should be removed + // when the next breaking release is made. + warn!("Ignoring write timeout"); + } +} + +/// An iterator wrapper over a `NetworkAcceptor`. +pub struct NetworkConnections<'a, N: NetworkListener + 'a>(&'a mut N); + +impl<'a, N: NetworkListener + 'a> Iterator for NetworkConnections<'a, N> { + type Item = ::Result; + fn next(&mut self) -> Option<::Result> { + Some(self.0.accept()) + } +} + +/// An abstraction over streams that a `Server` can utilize. +pub trait NetworkStream: Read + Write + Any + Send + Typeable { + /// Get the remote address of the underlying connection. + fn peer_addr(&mut self) -> io::Result; + + /// Set the maximum time to wait for a read to complete. + fn set_read_timeout(&self, dur: Option) -> io::Result<()>; + + /// Set the maximum time to wait for a write to complete. + fn set_write_timeout(&self, dur: Option) -> io::Result<()>; + + /// This will be called when Stream should no longer be kept alive. + #[inline] + fn close(&mut self, _how: Shutdown) -> io::Result<()> { + Ok(()) + } + + // Unsure about name and implementation... + + #[doc(hidden)] + fn set_previous_response_expected_no_content(&mut self, _expected: bool) { } + + #[doc(hidden)] + fn previous_response_expected_no_content(&self) -> bool { + false + } +} + +/// A connector creates a NetworkStream. +pub trait NetworkConnector { + /// Type of `Stream` to create + type Stream: Into>; + + /// Connect to a remote address. + fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result; +} + +impl From for Box { + fn from(s: T) -> Box { + Box::new(s) + } +} + +impl fmt::Debug for Box { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.pad("Box") + } +} + +impl NetworkStream { + unsafe fn downcast_ref_unchecked(&self) -> &T { + mem::transmute(traitobject::data(self)) + } + + unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { + mem::transmute(traitobject::data_mut(self)) + } + + unsafe fn downcast_unchecked(self: Box) -> Box { + let raw: *mut NetworkStream = mem::transmute(self); + mem::transmute(traitobject::data_mut(raw)) + } +} + +impl NetworkStream { + /// Is the underlying type in this trait object a `T`? + #[inline] + pub fn is(&self) -> bool { + (*self).get_type() == TypeId::of::() + } + + /// If the underlying type is `T`, get a reference to the contained data. + #[inline] + pub fn downcast_ref(&self) -> Option<&T> { + if self.is::() { + Some(unsafe { self.downcast_ref_unchecked() }) + } else { + None + } + } + + /// If the underlying type is `T`, get a mutable reference to the contained + /// data. + #[inline] + pub fn downcast_mut(&mut self) -> Option<&mut T> { + if self.is::() { + Some(unsafe { self.downcast_mut_unchecked() }) + } else { + None + } + } + + /// If the underlying type is `T`, extract it. + #[inline] + pub fn downcast(self: Box) + -> Result, Box> { + if self.is::() { + Ok(unsafe { self.downcast_unchecked() }) + } else { + Err(self) + } + } +} + +impl NetworkStream + Send { + unsafe fn downcast_ref_unchecked(&self) -> &T { + mem::transmute(traitobject::data(self)) + } + + unsafe fn downcast_mut_unchecked(&mut self) -> &mut T { + mem::transmute(traitobject::data_mut(self)) + } + + unsafe fn downcast_unchecked(self: Box) -> Box { + let raw: *mut NetworkStream = mem::transmute(self); + mem::transmute(traitobject::data_mut(raw)) + } +} + +impl NetworkStream + Send { + /// Is the underlying type in this trait object a `T`? + #[inline] + pub fn is(&self) -> bool { + (*self).get_type() == TypeId::of::() + } + + /// If the underlying type is `T`, get a reference to the contained data. + #[inline] + pub fn downcast_ref(&self) -> Option<&T> { + if self.is::() { + Some(unsafe { self.downcast_ref_unchecked() }) + } else { + None + } + } + + /// If the underlying type is `T`, get a mutable reference to the contained + /// data. + #[inline] + pub fn downcast_mut(&mut self) -> Option<&mut T> { + if self.is::() { + Some(unsafe { self.downcast_mut_unchecked() }) + } else { + None + } + } + + /// If the underlying type is `T`, extract it. + #[inline] + pub fn downcast(self: Box) + -> Result, Box> { + if self.is::() { + Ok(unsafe { self.downcast_unchecked() }) + } else { + Err(self) + } + } +} + +/// A `NetworkListener` for `HttpStream`s. +#[derive(Clone)] +pub struct HttpListener { + listener: Arc, + + read_timeout : Option, + write_timeout: Option, +} + +impl From for HttpListener { + fn from(listener: TcpListener) -> HttpListener { + HttpListener { + listener: Arc::new(listener), + + read_timeout : None, + write_timeout: None, + } + } +} + +impl HttpListener { + /// Start listening to an address over HTTP. + pub fn new(addr: To) -> ::Result { + Ok(HttpListener::from(try!(TcpListener::bind(addr)))) + } +} + +impl NetworkListener for HttpListener { + type Stream = HttpStream; + + #[inline] + fn accept(&mut self) -> ::Result { + let stream = HttpStream(try!(self.listener.accept()).0); + try!(stream.set_read_timeout(self.read_timeout)); + try!(stream.set_write_timeout(self.write_timeout)); + Ok(stream) + } + + #[inline] + fn local_addr(&mut self) -> io::Result { + self.listener.local_addr() + } + + fn set_read_timeout(&mut self, duration: Option) { + self.read_timeout = duration; + } + + fn set_write_timeout(&mut self, duration: Option) { + self.write_timeout = duration; + } +} + +#[cfg(windows)] +impl ::std::os::windows::io::AsRawSocket for HttpListener { + fn as_raw_socket(&self) -> ::std::os::windows::io::RawSocket { + self.listener.as_raw_socket() + } +} + +#[cfg(windows)] +impl ::std::os::windows::io::FromRawSocket for HttpListener { + unsafe fn from_raw_socket(sock: ::std::os::windows::io::RawSocket) -> HttpListener { + HttpListener::from(TcpListener::from_raw_socket(sock)) + } +} + +#[cfg(unix)] +impl ::std::os::unix::io::AsRawFd for HttpListener { + fn as_raw_fd(&self) -> ::std::os::unix::io::RawFd { + self.listener.as_raw_fd() + } +} + +#[cfg(unix)] +impl ::std::os::unix::io::FromRawFd for HttpListener { + unsafe fn from_raw_fd(fd: ::std::os::unix::io::RawFd) -> HttpListener { + HttpListener::from(TcpListener::from_raw_fd(fd)) + } +} + +/// A wrapper around a `TcpStream`. +pub struct HttpStream(pub TcpStream); + +impl Clone for HttpStream { + #[inline] + fn clone(&self) -> HttpStream { + HttpStream(self.0.try_clone().unwrap()) + } +} + +impl fmt::Debug for HttpStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("HttpStream(_)") + } +} + +impl Read for HttpStream { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) + } +} + +impl Write for HttpStream { + #[inline] + fn write(&mut self, msg: &[u8]) -> io::Result { + self.0.write(msg) + } + #[inline] + fn flush(&mut self) -> io::Result<()> { + self.0.flush() + } +} + +#[cfg(windows)] +impl ::std::os::windows::io::AsRawSocket for HttpStream { + fn as_raw_socket(&self) -> ::std::os::windows::io::RawSocket { + self.0.as_raw_socket() + } +} + +#[cfg(windows)] +impl ::std::os::windows::io::FromRawSocket for HttpStream { + unsafe fn from_raw_socket(sock: ::std::os::windows::io::RawSocket) -> HttpStream { + HttpStream(TcpStream::from_raw_socket(sock)) + } +} + +#[cfg(unix)] +impl ::std::os::unix::io::AsRawFd for HttpStream { + fn as_raw_fd(&self) -> ::std::os::unix::io::RawFd { + self.0.as_raw_fd() + } +} + +#[cfg(unix)] +impl ::std::os::unix::io::FromRawFd for HttpStream { + unsafe fn from_raw_fd(fd: ::std::os::unix::io::RawFd) -> HttpStream { + HttpStream(TcpStream::from_raw_fd(fd)) + } +} + +impl NetworkStream for HttpStream { + #[inline] + fn peer_addr(&mut self) -> io::Result { + self.0.peer_addr() + } + + #[inline] + fn set_read_timeout(&self, dur: Option) -> io::Result<()> { + self.0.set_read_timeout(dur) + } + + #[inline] + fn set_write_timeout(&self, dur: Option) -> io::Result<()> { + self.0.set_write_timeout(dur) + } + + #[inline] + fn close(&mut self, how: Shutdown) -> io::Result<()> { + match self.0.shutdown(how) { + Ok(_) => Ok(()), + // see https://github.com/hyperium/hyper/issues/508 + Err(ref e) if e.kind() == ErrorKind::NotConnected => Ok(()), + err => err + } + } +} + +/// A connector that will produce HttpStreams. +#[derive(Debug, Clone, Default)] +pub struct HttpConnector; + +impl NetworkConnector for HttpConnector { + type Stream = HttpStream; + + fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { + let addr = &(host, port); + Ok(try!(match scheme { + "http" => { + debug!("http scheme"); + Ok(HttpStream(try!(TcpStream::connect(addr)))) + }, + _ => { + Err(io::Error::new(io::ErrorKind::InvalidInput, + "Invalid scheme for Http")) + } + })) + } +} + +/// A closure as a connector used to generate `TcpStream`s per request +/// +/// # Example +/// +/// Basic example: +/// +/// ```norun +/// Client::with_connector(|addr: &str, port: u16, scheme: &str| { +/// TcpStream::connect(&(addr, port)) +/// }); +/// ``` +/// +/// Example using `TcpBuilder` from the net2 crate if you want to configure your source socket: +/// +/// ```norun +/// Client::with_connector(|addr: &str, port: u16, scheme: &str| { +/// let b = try!(TcpBuilder::new_v4()); +/// try!(b.bind("127.0.0.1:0")); +/// b.connect(&(addr, port)) +/// }); +/// ``` +impl NetworkConnector for F where F: Fn(&str, u16, &str) -> io::Result { + type Stream = HttpStream; + + fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { + Ok(HttpStream(try!((*self)(host, port, scheme)))) + } +} + +/// An abstraction to allow any SSL implementation to be used with client-side HttpsStreams. +pub trait SslClient { + /// The protected stream. + type Stream: NetworkStream + Send + Clone; + /// Wrap a client stream with SSL. + fn wrap_client(&self, stream: T, host: &str) -> ::Result; +} + +/// An abstraction to allow any SSL implementation to be used with server-side HttpsStreams. +pub trait SslServer { + /// The protected stream. + type Stream: NetworkStream + Send + Clone; + /// Wrap a server stream with SSL. + fn wrap_server(&self, stream: T) -> ::Result; +} + +/// A stream over the HTTP protocol, possibly protected by SSL. +#[derive(Debug, Clone)] +pub enum HttpsStream { + /// A plain text stream. + Http(HttpStream), + /// A stream protected by SSL. + Https(S) +} + +impl Read for HttpsStream { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + match *self { + HttpsStream::Http(ref mut s) => s.read(buf), + HttpsStream::Https(ref mut s) => s.read(buf) + } + } +} + +impl Write for HttpsStream { + #[inline] + fn write(&mut self, msg: &[u8]) -> io::Result { + match *self { + HttpsStream::Http(ref mut s) => s.write(msg), + HttpsStream::Https(ref mut s) => s.write(msg) + } + } + + #[inline] + fn flush(&mut self) -> io::Result<()> { + match *self { + HttpsStream::Http(ref mut s) => s.flush(), + HttpsStream::Https(ref mut s) => s.flush() + } + } +} + +impl NetworkStream for HttpsStream { + #[inline] + fn peer_addr(&mut self) -> io::Result { + match *self { + HttpsStream::Http(ref mut s) => s.peer_addr(), + HttpsStream::Https(ref mut s) => s.peer_addr() + } + } + + #[inline] + fn set_read_timeout(&self, dur: Option) -> io::Result<()> { + match *self { + HttpsStream::Http(ref inner) => inner.0.set_read_timeout(dur), + HttpsStream::Https(ref inner) => inner.set_read_timeout(dur) + } + } + + #[inline] + fn set_write_timeout(&self, dur: Option) -> io::Result<()> { + match *self { + HttpsStream::Http(ref inner) => inner.0.set_write_timeout(dur), + HttpsStream::Https(ref inner) => inner.set_write_timeout(dur) + } + } + + #[inline] + fn close(&mut self, how: Shutdown) -> io::Result<()> { + match *self { + HttpsStream::Http(ref mut s) => s.close(how), + HttpsStream::Https(ref mut s) => s.close(how) + } + } +} + +/// A Http Listener over SSL. +#[derive(Clone)] +pub struct HttpsListener { + listener: HttpListener, + ssl: S, +} + +impl HttpsListener { + /// Start listening to an address over HTTPS. + pub fn new(addr: To, ssl: S) -> ::Result> { + HttpListener::new(addr).map(|l| HttpsListener { + listener: l, + ssl: ssl + }) + } + + /// Construct an HttpsListener from a bound `TcpListener`. + pub fn with_listener(listener: HttpListener, ssl: S) -> HttpsListener { + HttpsListener { + listener: listener, + ssl: ssl + } + } +} + +impl NetworkListener for HttpsListener { + type Stream = S::Stream; + + #[inline] + fn accept(&mut self) -> ::Result { + self.listener.accept().and_then(|s| self.ssl.wrap_server(s)) + } + + #[inline] + fn local_addr(&mut self) -> io::Result { + self.listener.local_addr() + } + + fn set_read_timeout(&mut self, duration: Option) { + self.listener.set_read_timeout(duration) + } + + fn set_write_timeout(&mut self, duration: Option) { + self.listener.set_write_timeout(duration) + } +} + +/// A connector that can protect HTTP streams using SSL. +#[derive(Debug, Default)] +pub struct HttpsConnector { + ssl: S, + connector: C, +} + +impl HttpsConnector { + /// Create a new connector using the provided SSL implementation. + pub fn new(s: S) -> HttpsConnector { + HttpsConnector::with_connector(s, HttpConnector) + } +} + +impl HttpsConnector { + /// Create a new connector using the provided SSL implementation. + pub fn with_connector(s: S, connector: C) -> HttpsConnector { + HttpsConnector { ssl: s, connector: connector } + } +} + +impl> NetworkConnector for HttpsConnector { + type Stream = HttpsStream; + + fn connect(&self, host: &str, port: u16, scheme: &str) -> ::Result { + let stream = try!(self.connector.connect(host, port, "http")); + if scheme == "https" { + debug!("https scheme"); + self.ssl.wrap_client(stream, host).map(HttpsStream::Https) + } else { + Ok(HttpsStream::Http(stream)) + } + } +} + + +#[doc(hidden)] +pub type DefaultConnector = HttpConnector; + +#[cfg(test)] +mod tests { + use mock::MockStream; + use super::{NetworkStream}; + + #[test] + fn test_downcast_box_stream() { + // FIXME: Use Type ascription + let stream: Box = Box::new(MockStream::new()); + + let mock = stream.downcast::().ok().unwrap(); + assert_eq!(mock, Box::new(MockStream::new())); + } + + #[test] + fn test_downcast_unchecked_box_stream() { + // FIXME: Use Type ascription + let stream: Box = Box::new(MockStream::new()); + + let mock = unsafe { stream.downcast_unchecked::() }; + assert_eq!(mock, Box::new(MockStream::new())); + } +} + diff --git a/vendor/hyper-0.10.16/src/server/listener.rs b/vendor/hyper-0.10.16/src/server/listener.rs new file mode 100644 index 0000000..fe666bf --- /dev/null +++ b/vendor/hyper-0.10.16/src/server/listener.rs @@ -0,0 +1,111 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::Duration; +use std::sync::Arc; +use std::thread; + +use net::NetworkListener; + + +pub struct ListenerPool { + acceptor: A +} + +impl ListenerPool { + /// Create a thread pool to manage the acceptor. + pub fn new(acceptor: A) -> ListenerPool { + ListenerPool { acceptor: acceptor } + } + + /// Runs the acceptor pool. Blocks until the acceptors are closed. + /// + /// ## Panics + /// + /// Panics if max_threads == 0. + pub fn accept(mut self, work: F, max_threads: usize) + where F: Fn(A::Stream) + Send + Sync + 'static { + assert!(max_threads != 0, "Can't accept on 0 threads."); + + let work = Arc::new(work); + let live_threads = Arc::new(AtomicUsize::new(0)); + let free_threads = Arc::new(AtomicUsize::new(0)); + let (send, recv) = crossbeam_channel::bounded(20); + + loop { + let msg = match self.acceptor.accept() { + Ok(stream) => stream, + Err(e) => { + info!("Connection failed: {}", e); + continue; + } + }; + + let free = free_threads.load(Ordering::Acquire); + let live = live_threads.load(Ordering::SeqCst); + // eprintln!("free = {}, live = {}", free, live); + if (live == 0 || free == 0) && live != max_threads { + spawn_with::(recv.clone(), work.clone(), live_threads.clone(), free_threads.clone(), msg); + } else { + let _ = send.send(msg); + } + } + } +} + +fn spawn_with(recv: crossbeam_channel::Receiver, work: Arc, live_threads: Arc, free_threads: Arc, first: A::Stream) +where A: NetworkListener + Send + 'static, + F: Fn(::Stream) + Send + Sync + 'static { + thread::spawn(move || { + let thread_id = live_threads.fetch_add(1, Ordering::SeqCst); + let _sentinel = LiveSentinel { live_threads }; + + let mut _free_sentinel = FreeSentinel { free_threads: &free_threads, subbed: true }; + work(first); + _free_sentinel.unsub(); + + loop { + let stream = match if thread_id == 0 { + recv.recv().ok() // infallible + } else { + recv.recv_timeout(Duration::from_secs((thread_id * 5).min(300) as u64)).ok() + } { + None => return, + Some(stream) => stream, + }; + + _free_sentinel.sub(); + work(stream); + _free_sentinel.unsub(); + } + }); +} + +struct LiveSentinel { + live_threads: Arc, +} +impl Drop for LiveSentinel { + fn drop(&mut self) { + self.live_threads.fetch_sub(1, Ordering::SeqCst); + } +} + +struct FreeSentinel<'t> { + free_threads: &'t Arc, + subbed: bool, +} +impl<'t> FreeSentinel<'t> { + fn sub(&mut self) { + self.free_threads.fetch_sub(1, Ordering::AcqRel); + self.subbed = true; + } + fn unsub(&mut self) { + self.free_threads.fetch_add(1, Ordering::AcqRel); + self.subbed = false; + } +} +impl<'t> Drop for FreeSentinel<'t> { + fn drop(&mut self) { + if !self.subbed { + self.free_threads.fetch_sub(1, Ordering::AcqRel); + } + } +} diff --git a/vendor/hyper-0.10.16/src/server/mod.rs b/vendor/hyper-0.10.16/src/server/mod.rs new file mode 100644 index 0000000..5454cbe --- /dev/null +++ b/vendor/hyper-0.10.16/src/server/mod.rs @@ -0,0 +1,494 @@ +//! HTTP Server +//! +//! # Server +//! +//! A `Server` is created to listen on port, parse HTTP requests, and hand +//! them off to a `Handler`. By default, the Server will listen across multiple +//! threads, but that can be configured to a single thread if preferred. +//! +//! # Handling requests +//! +//! You must pass a `Handler` to the Server that will handle requests. There is +//! a default implementation for `fn`s and closures, allowing you pass one of +//! those easily. +//! +//! +//! ```no_run +//! use hyper::server::{Server, Request, Response}; +//! +//! fn hello(req: Request, res: Response) { +//! // handle things here +//! } +//! +//! Server::http("0.0.0.0:0").unwrap().handle(hello).unwrap(); +//! ``` +//! +//! As with any trait, you can also define a struct and implement `Handler` +//! directly on your own type, and pass that to the `Server` instead. +//! +//! ```no_run +//! use std::sync::Mutex; +//! use std::sync::mpsc::{channel, Sender}; +//! use hyper::server::{Handler, Server, Request, Response}; +//! +//! struct SenderHandler { +//! sender: Mutex> +//! } +//! +//! impl Handler for SenderHandler { +//! fn handle(&self, req: Request, res: Response) { +//! self.sender.lock().unwrap().send("start").unwrap(); +//! } +//! } +//! +//! +//! let (tx, rx) = channel(); +//! Server::http("0.0.0.0:0").unwrap().handle(SenderHandler { +//! sender: Mutex::new(tx) +//! }).unwrap(); +//! ``` +//! +//! Since the `Server` will be listening on multiple threads, the `Handler` +//! must implement `Sync`: any mutable state must be synchronized. +//! +//! ```no_run +//! use std::sync::atomic::{AtomicUsize, Ordering}; +//! use hyper::server::{Server, Request, Response}; +//! +//! let counter = AtomicUsize::new(0); +//! Server::http("0.0.0.0:0").unwrap().handle(move |req: Request, res: Response| { +//! counter.fetch_add(1, Ordering::Relaxed); +//! }).unwrap(); +//! ``` +//! +//! # The `Request` and `Response` pair +//! +//! A `Handler` receives a pair of arguments, a `Request` and a `Response`. The +//! `Request` includes access to the `method`, `uri`, and `headers` of the +//! incoming HTTP request. It also implements `std::io::Read`, in order to +//! read any body, such as with `POST` or `PUT` messages. +//! +//! Likewise, the `Response` includes ways to set the `status` and `headers`, +//! and implements `std::io::Write` to allow writing the response body. +//! +//! ```no_run +//! use std::io; +//! use hyper::server::{Server, Request, Response}; +//! use hyper::status::StatusCode; +//! +//! Server::http("0.0.0.0:0").unwrap().handle(|mut req: Request, mut res: Response| { +//! match req.method { +//! hyper::Post => { +//! io::copy(&mut req, &mut res.start().unwrap()).unwrap(); +//! }, +//! _ => *res.status_mut() = StatusCode::MethodNotAllowed +//! } +//! }).unwrap(); +//! ``` +//! +//! ## An aside: Write Status +//! +//! The `Response` uses a phantom type parameter to determine its write status. +//! What does that mean? In short, it ensures you never write a body before +//! adding all headers, and never add a header after writing some of the body. +//! +//! This is often done in most implementations by include a boolean property +//! on the response, such as `headers_written`, checking that each time the +//! body has something to write, so as to make sure the headers are sent once, +//! and only once. But this has 2 downsides: +//! +//! 1. You are typically never notified that your late header is doing nothing. +//! 2. There's a runtime cost to checking on every write. +//! +//! Instead, hyper handles this statically, or at compile-time. A +//! `Response` includes a `headers_mut()` method, allowing you add more +//! headers. It also does not implement `Write`, so you can't accidentally +//! write early. Once the "head" of the response is correct, you can "send" it +//! out by calling `start` on the `Response`. This will return a new +//! `Response` object, that no longer has `headers_mut()`, but does +//! implement `Write`. +use std::fmt; +use std::io::{self, ErrorKind, BufWriter, Write}; +use std::net::{SocketAddr, ToSocketAddrs, Shutdown}; +use std::thread::{self, JoinHandle}; +use std::time::Duration; + +use num_cpus; + +pub use self::request::Request; +pub use self::response::Response; + +pub use net::{Fresh, Streaming}; + +use Error; +use buffer::BufReader; +use header::{Headers, Expect, Connection}; +use http; +use method::Method; +use net::{NetworkListener, NetworkStream, HttpListener, HttpsListener, SslServer}; +use status::StatusCode; +use uri::RequestUri; +use version::HttpVersion::Http11; + +use self::listener::ListenerPool; + +pub mod request; +pub mod response; + +mod listener; + +/// A server can listen on a TCP socket. +/// +/// Once listening, it will create a `Request`/`Response` pair for each +/// incoming connection, and hand them to the provided handler. +#[derive(Debug)] +pub struct Server { + listener: L, + timeouts: Timeouts, +} + +#[derive(Clone, Copy, Debug)] +struct Timeouts { + read: Option, + keep_alive: Option, +} + +impl Default for Timeouts { + fn default() -> Timeouts { + Timeouts { + read: None, + keep_alive: Some(Duration::from_secs(5)) + } + } +} + +impl Server { + /// Creates a new server with the provided handler. + #[inline] + pub fn new(listener: L) -> Server { + Server { + listener: listener, + timeouts: Timeouts::default() + } + } + + /// Controls keep-alive for this server. + /// + /// The timeout duration passed will be used to determine how long + /// to keep the connection alive before dropping it. + /// + /// Passing `None` will disable keep-alive. + /// + /// Default is enabled with a 5 second timeout. + #[inline] + pub fn keep_alive(&mut self, timeout: Option) { + self.timeouts.keep_alive = timeout; + } + + /// Sets the read timeout for all Request reads. + pub fn set_read_timeout(&mut self, dur: Option) { + self.listener.set_read_timeout(dur); + self.timeouts.read = dur; + } + + /// Sets the write timeout for all Response writes. + pub fn set_write_timeout(&mut self, dur: Option) { + self.listener.set_write_timeout(dur); + } + + /// Get the address that the server is listening on. + pub fn local_addr(&mut self) -> io::Result { + self.listener.local_addr() + } +} + +impl Server { + /// Creates a new server that will handle `HttpStream`s. + pub fn http(addr: To) -> ::Result> { + HttpListener::new(addr).map(Server::new) + } +} + +impl Server> { + /// Creates a new server that will handle `HttpStream`s over SSL. + /// + /// You can use any SSL implementation, as long as implements `hyper::net::Ssl`. + pub fn https(addr: A, ssl: S) -> ::Result>> { + HttpsListener::new(addr, ssl).map(Server::new) + } +} + +impl Server { + /// Binds to a socket and starts handling connections. + pub fn handle(self, handler: H) -> ::Result { + self.handle_threads(handler, num_cpus::get() * 5 / 4) + } + + /// Binds to a socket and starts handling connections with the provided + /// number of threads. + pub fn handle_threads(self, handler: H, + threads: usize) -> ::Result { + handle(self, handler, threads) + } +} + +fn handle(mut server: Server, handler: H, threads: usize) -> ::Result +where H: Handler + 'static, L: NetworkListener + Send + 'static { + let socket = try!(server.listener.local_addr()); + + debug!("threads = {:?}", threads); + let pool = ListenerPool::new(server.listener); + let worker = Worker::new(handler, server.timeouts); + let work = move |mut stream| worker.handle_connection(&mut stream); + + let guard = thread::spawn(move || pool.accept(work, threads)); + + Ok(Listening { + _guard: Some(guard), + socket: socket, + }) +} + +struct Worker { + handler: H, + timeouts: Timeouts, +} + +impl Worker { + fn new(handler: H, timeouts: Timeouts) -> Worker { + Worker { + handler: handler, + timeouts: timeouts, + } + } + + fn handle_connection(&self, stream: &mut S) where S: NetworkStream + Clone { + debug!("Incoming stream"); + + self.handler.on_connection_start(); + + let addr = match stream.peer_addr() { + Ok(addr) => addr, + Err(e) => { + info!("Peer Name error: {:?}", e); + return; + } + }; + + let stream2: &mut NetworkStream = &mut stream.clone(); + let mut rdr = BufReader::new(stream2); + let mut wrt = BufWriter::new(stream); + + while self.keep_alive_loop(&mut rdr, &mut wrt, addr) { + if let Err(e) = self.set_read_timeout(*rdr.get_ref(), self.timeouts.keep_alive) { + info!("set_read_timeout keep_alive {:?}", e); + break; + } + } + + self.handler.on_connection_end(); + + debug!("keep_alive loop ending for {}", addr); + + if let Err(e) = rdr.get_mut().close(Shutdown::Both) { + info!("failed to close stream: {}", e); + } + } + + fn set_read_timeout(&self, s: &NetworkStream, timeout: Option) -> io::Result<()> { + s.set_read_timeout(timeout) + } + + fn keep_alive_loop(&self, rdr: &mut BufReader<&mut NetworkStream>, + wrt: &mut W, addr: SocketAddr) -> bool { + let req = match Request::new(rdr, addr) { + Ok(req) => req, + Err(Error::Io(ref e)) if e.kind() == ErrorKind::ConnectionAborted => { + trace!("tcp closed, cancelling keep-alive loop"); + return false; + } + Err(Error::Io(e)) => { + debug!("ioerror in keepalive loop = {:?}", e); + return false; + } + Err(e) => { + //TODO: send a 400 response + info!("request error = {:?}", e); + return false; + } + }; + + if !self.handle_expect(&req, wrt) { + return false; + } + + if let Err(e) = req.set_read_timeout(self.timeouts.read) { + info!("set_read_timeout {:?}", e); + return false; + } + + let mut keep_alive = self.timeouts.keep_alive.is_some() && + http::should_keep_alive(req.version, &req.headers); + let version = req.version; + let mut res_headers = Headers::new(); + if !keep_alive { + res_headers.set(Connection::close()); + } + { + let mut res = Response::new(wrt, &mut res_headers); + res.version = version; + self.handler.handle(req, res); + } + + // if the request was keep-alive, we need to check that the server agrees + // if it wasn't, then the server cannot force it to be true anyways + if keep_alive { + keep_alive = http::should_keep_alive(version, &res_headers); + } + + debug!("keep_alive = {:?} for {}", keep_alive, addr); + keep_alive + } + + fn handle_expect(&self, req: &Request, wrt: &mut W) -> bool { + if req.version == Http11 && req.headers.get() == Some(&Expect::Continue) { + let status = self.handler.check_continue((&req.method, &req.uri, &req.headers)); + match write!(wrt, "{} {}\r\n\r\n", Http11, status).and_then(|_| wrt.flush()) { + Ok(..) => (), + Err(e) => { + info!("error writing 100-continue: {:?}", e); + return false; + } + } + + if status != StatusCode::Continue { + debug!("non-100 status ({}) for Expect 100 request", status); + return false; + } + } + + true + } +} + +/// A listening server, which can later be closed. +pub struct Listening { + _guard: Option>, + /// The socket addresses that the server is bound to. + pub socket: SocketAddr, +} + +impl fmt::Debug for Listening { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Listening {{ socket: {:?} }}", self.socket) + } +} + +impl Drop for Listening { + fn drop(&mut self) { + let _ = self._guard.take().map(|g| g.join()); + } +} + +impl Listening { + /// Warning: This function doesn't work. The server remains listening after you called + /// it. See https://github.com/hyperium/hyper/issues/338 for more details. + /// + /// Stop the server from listening to its socket address. + pub fn close(&mut self) -> ::Result<()> { + let _ = self._guard.take(); + debug!("closing server"); + Ok(()) + } +} + +/// A handler that can handle incoming requests for a server. +pub trait Handler: Sync + Send { + /// Receives a `Request`/`Response` pair, and should perform some action on them. + /// + /// This could reading from the request, and writing to the response. + fn handle<'a, 'k>(&'a self, Request<'a, 'k>, Response<'a, Fresh>); + + /// Called when a Request includes a `Expect: 100-continue` header. + /// + /// By default, this will always immediately response with a `StatusCode::Continue`, + /// but can be overridden with custom behavior. + fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { + StatusCode::Continue + } + + /// This is run after a connection is received, on a per-connection basis (not a + /// per-request basis, as a connection with keep-alive may handle multiple + /// requests) + fn on_connection_start(&self) { } + + /// This is run before a connection is closed, on a per-connection basis (not a + /// per-request basis, as a connection with keep-alive may handle multiple + /// requests) + fn on_connection_end(&self) { } +} + +impl Handler for F where F: Fn(Request, Response), F: Sync + Send { + fn handle<'a, 'k>(&'a self, req: Request<'a, 'k>, res: Response<'a, Fresh>) { + self(req, res) + } +} + +#[cfg(test)] +mod tests { + use header::Headers; + use method::Method; + use mock::MockStream; + use status::StatusCode; + use uri::RequestUri; + + use super::{Request, Response, Fresh, Handler, Worker}; + + #[test] + fn test_check_continue_default() { + let mut mock = MockStream::with_input(b"\ + POST /upload HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Expect: 100-continue\r\n\ + Content-Length: 10\r\n\ + \r\n\ + 1234567890\ + "); + + fn handle(_: Request, res: Response) { + res.start().unwrap().end().unwrap(); + } + + Worker::new(handle, Default::default()).handle_connection(&mut mock); + let cont = b"HTTP/1.1 100 Continue\r\n\r\n"; + assert_eq!(&mock.write[..cont.len()], cont); + let res = b"HTTP/1.1 200 OK\r\n"; + assert_eq!(&mock.write[cont.len()..cont.len() + res.len()], res); + } + + #[test] + fn test_check_continue_reject() { + struct Reject; + impl Handler for Reject { + fn handle<'a, 'k>(&'a self, _: Request<'a, 'k>, res: Response<'a, Fresh>) { + res.start().unwrap().end().unwrap(); + } + + fn check_continue(&self, _: (&Method, &RequestUri, &Headers)) -> StatusCode { + StatusCode::ExpectationFailed + } + } + + let mut mock = MockStream::with_input(b"\ + POST /upload HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Expect: 100-continue\r\n\ + Content-Length: 10\r\n\ + \r\n\ + 1234567890\ + "); + + Worker::new(Reject, Default::default()).handle_connection(&mut mock); + assert_eq!(mock.write, &b"HTTP/1.1 417 Expectation Failed\r\n\r\n"[..]); + } +} diff --git a/vendor/hyper-0.10.16/src/server/request.rs b/vendor/hyper-0.10.16/src/server/request.rs new file mode 100644 index 0000000..c632d27 --- /dev/null +++ b/vendor/hyper-0.10.16/src/server/request.rs @@ -0,0 +1,307 @@ +//! Server Requests +//! +//! These are requests that a `hyper::Server` receives, and include its method, +//! target URI, headers, and message body. +use std::io::{self, Read}; +use std::net::SocketAddr; +use std::time::Duration; + +use buffer::BufReader; +use net::NetworkStream; +use version::{HttpVersion}; +use method::Method; +use header::{Headers, ContentLength, TransferEncoding}; +use http::h1::{self, Incoming, HttpReader}; +use http::h1::HttpReader::{SizedReader, ChunkedReader, EmptyReader}; +use uri::RequestUri; + +/// A request bundles several parts of an incoming `NetworkStream`, given to a `Handler`. +pub struct Request<'a, 'b: 'a> { + /// The IP address of the remote connection. + pub remote_addr: SocketAddr, + /// The `Method`, such as `Get`, `Post`, etc. + pub method: Method, + /// The headers of the incoming request. + pub headers: Headers, + /// The target request-uri for this request. + pub uri: RequestUri, + /// The version of HTTP for this request. + pub version: HttpVersion, + body: HttpReader<&'a mut BufReader<&'b mut NetworkStream>> +} + + +impl<'a, 'b: 'a> Request<'a, 'b> { + /// Create a new Request, reading the StartLine and Headers so they are + /// immediately useful. + pub fn new(stream: &'a mut BufReader<&'b mut NetworkStream>, addr: SocketAddr) + -> ::Result> { + + let Incoming { version, subject: (method, uri), headers } = try!(h1::parse_request(stream)); + debug!("Request Line: {:?} {:?} {:?}", method, uri, version); + debug!("{:?}", headers); + + let body = if headers.has::() { + match headers.get::() { + Some(&ContentLength(len)) => SizedReader(stream, len), + None => unreachable!() + } + } else if headers.has::() { + todo!("check for Transfer-Encoding: chunked"); + ChunkedReader(stream, None) + } else { + EmptyReader(stream) + }; + + Ok(Request { + remote_addr: addr, + method: method, + uri: uri, + headers: headers, + version: version, + body: body + }) + } + + /// Set the read timeout of the underlying NetworkStream. + #[inline] + pub fn set_read_timeout(&self, timeout: Option) -> io::Result<()> { + self.body.get_ref().get_ref().set_read_timeout(timeout) + } + + /// Get a reference to the underlying `NetworkStream`. + #[inline] + pub fn downcast_ref(&self) -> Option<&T> { + self.body.get_ref().get_ref().downcast_ref() + } + + /// Get a reference to the underlying Ssl stream, if connected + /// over HTTPS. + /// + /// This is actually just an alias for `downcast_ref`. + #[inline] + pub fn ssl(&self) -> Option<&T> { + self.downcast_ref() + } + + /// Deconstruct a Request into its constituent parts. + #[inline] + pub fn deconstruct(self) -> (SocketAddr, Method, Headers, + RequestUri, HttpVersion, + HttpReader<&'a mut BufReader<&'b mut NetworkStream>>) { + (self.remote_addr, self.method, self.headers, + self.uri, self.version, self.body) + } +} + +impl<'a, 'b> Read for Request<'a, 'b> { + #[inline] + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.body.read(buf) + } +} + +#[cfg(test)] +mod tests { + use buffer::BufReader; + use header::{Host, TransferEncoding, Encoding}; + use net::NetworkStream; + use mock::MockStream; + use super::Request; + + use std::io::{self, Read}; + use std::net::SocketAddr; + + fn sock(s: &str) -> SocketAddr { + s.parse().unwrap() + } + + fn read_to_string(mut req: Request) -> io::Result { + let mut s = String::new(); + try!(req.read_to_string(&mut s)); + Ok(s) + } + + #[test] + fn test_get_empty_body() { + let mut mock = MockStream::with_input(b"\ + GET / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + \r\n\ + I'm a bad request.\r\n\ + "); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + assert_eq!(read_to_string(req).unwrap(), "".to_owned()); + } + + #[test] + fn test_get_with_body() { + let mut mock = MockStream::with_input(b"\ + GET / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Content-Length: 19\r\n\ + \r\n\ + I'm a good request.\r\n\ + "); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + assert_eq!(read_to_string(req).unwrap(), "I'm a good request.".to_owned()); + } + + #[test] + fn test_head_empty_body() { + let mut mock = MockStream::with_input(b"\ + HEAD / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + \r\n\ + I'm a bad request.\r\n\ + "); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + assert_eq!(read_to_string(req).unwrap(), "".to_owned()); + } + + #[test] + fn test_post_empty_body() { + let mut mock = MockStream::with_input(b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + \r\n\ + I'm a bad request.\r\n\ + "); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + assert_eq!(read_to_string(req).unwrap(), "".to_owned()); + } + + #[test] + fn test_parse_chunked_request() { + let mut mock = MockStream::with_input(b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 1\r\n\ + q\r\n\ + 2\r\n\ + we\r\n\ + 2\r\n\ + rt\r\n\ + 0\r\n\ + \r\n" + ); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + + // The headers are correct? + match req.headers.get::() { + Some(host) => { + assert_eq!("example.domain", host.hostname); + }, + None => panic!("Host header expected!"), + }; + match req.headers.get::() { + Some(encodings) => { + assert_eq!(1, encodings.len()); + assert_eq!(Encoding::Chunked, encodings[0]); + } + None => panic!("Transfer-Encoding: chunked expected!"), + }; + // The content is correctly read? + assert_eq!(read_to_string(req).unwrap(), "qwert".to_owned()); + } + + /// Tests that when a chunk size is not a valid radix-16 number, an error + /// is returned. + #[test] + fn test_invalid_chunk_size_not_hex_digit() { + let mut mock = MockStream::with_input(b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + X\r\n\ + 1\r\n\ + 0\r\n\ + \r\n" + ); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + + assert!(read_to_string(req).is_err()); + } + + /// Tests that when a chunk size contains an invalid extension, an error is + /// returned. + #[test] + fn test_invalid_chunk_size_extension() { + let mut mock = MockStream::with_input(b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 1 this is an invalid extension\r\n\ + 1\r\n\ + 0\r\n\ + \r\n" + ); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + + assert!(read_to_string(req).is_err()); + } + + /// Tests that when a valid extension that contains a digit is appended to + /// the chunk size, the chunk is correctly read. + #[test] + fn test_chunk_size_with_extension() { + let mut mock = MockStream::with_input(b"\ + POST / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Transfer-Encoding: chunked\r\n\ + \r\n\ + 1;this is an extension with a digit 1\r\n\ + 1\r\n\ + 0\r\n\ + \r\n" + ); + + // FIXME: Use Type ascription + let mock: &mut NetworkStream = &mut mock; + let mut stream = BufReader::new(mock); + + let req = Request::new(&mut stream, sock("127.0.0.1:80")).unwrap(); + + assert_eq!(read_to_string(req).unwrap(), "1".to_owned()); + } + +} diff --git a/vendor/hyper-0.10.16/src/server/response.rs b/vendor/hyper-0.10.16/src/server/response.rs new file mode 100644 index 0000000..4d45bff --- /dev/null +++ b/vendor/hyper-0.10.16/src/server/response.rs @@ -0,0 +1,399 @@ +//! Server Responses +//! +//! These are responses sent by a `hyper::Server` to clients, after +//! receiving a request. +use std::any::{Any, TypeId}; +use std::marker::PhantomData; +use std::mem; +use std::io::{self, Write}; +use std::ptr; +use std::thread; + +use time::now_utc; + +use header; +use http::h1::{LINE_ENDING, HttpWriter}; +use http::h1::HttpWriter::{ThroughWriter, SizedWriter}; +use status; +use net::{Fresh, Streaming}; +use version; + + +/// The outgoing half for a Tcp connection, created by a `Server` and given to a `Handler`. +/// +/// The default `StatusCode` for a `Response` is `200 OK`. +/// +/// There is a `Drop` implementation for `Response` that will automatically +/// write the head and flush the body, if the handler has not already done so, +/// so that the server doesn't accidentally leave dangling requests. +#[derive(Debug)] +pub struct Response<'a, W: Any = Fresh> { + /// The HTTP version of this response. + pub version: version::HttpVersion, + // Stream the Response is writing to, not accessible through UnwrittenResponse + body: HttpWriter<&'a mut (Write + 'a)>, + // The status code for the request. + status: status::StatusCode, + // The outgoing headers on this response. + headers: &'a mut header::Headers, + + _writing: PhantomData +} + +impl<'a, W: Any> Response<'a, W> { + /// The status of this response. + #[inline] + pub fn status(&self) -> status::StatusCode { self.status } + + /// The headers of this response. + #[inline] + pub fn headers(&self) -> &header::Headers { &*self.headers } + + /// Construct a Response from its constituent parts. + #[inline] + pub fn construct(version: version::HttpVersion, + body: HttpWriter<&'a mut (Write + 'a)>, + status: status::StatusCode, + headers: &'a mut header::Headers) -> Response<'a, Fresh> { + Response { + status: status, + version: version, + body: body, + headers: headers, + _writing: PhantomData, + } + } + + /// Deconstruct this Response into its constituent parts. + #[inline] + pub fn deconstruct(self) -> (version::HttpVersion, HttpWriter<&'a mut (Write + 'a)>, + status::StatusCode, &'a mut header::Headers) { + unsafe { + let parts = ( + self.version, + ptr::read(&self.body), + self.status, + ptr::read(&self.headers) + ); + mem::forget(self); + parts + } + } + + fn write_head(&mut self) -> io::Result { + debug!("writing head: {:?} {:?}", self.version, self.status); + try!(write!(&mut self.body, "{} {}\r\n", self.version, self.status)); + + if !self.headers.has::() { + self.headers.set(header::Date(header::HttpDate(now_utc()))); + } + + let body_type = match self.status { + status::StatusCode::NoContent | status::StatusCode::NotModified => Body(0), + c if c.class() == status::StatusClass::Informational => Body(0), + _ => if let Some(cl) = self.headers.get::() { + Body(**cl) + } else { + panic!("Body::Chunked"); + } + }; + + debug!("headers [\n{:?}]", self.headers); + try!(write!(&mut self.body, "{}{}", self.headers, LINE_ENDING)); + + Ok(body_type) + } +} + +impl<'a> Response<'a, Fresh> { + /// Creates a new Response that can be used to write to a network stream. + #[inline] + pub fn new(stream: &'a mut (Write + 'a), headers: &'a mut header::Headers) -> + Response<'a, Fresh> { + Response { + status: status::StatusCode::Ok, + version: version::HttpVersion::Http11, + headers: headers, + body: ThroughWriter(stream), + _writing: PhantomData, + } + } + + /// Writes the body and ends the response. + /// + /// This is a shortcut method for when you have a response with a fixed + /// size, and would only need a single `write` call normally. + /// + /// # Example + /// + /// ``` + /// # use hyper::server::Response; + /// fn handler(res: Response) { + /// res.send(b"Hello World!").unwrap(); + /// } + /// ``` + /// + /// The above is the same, but shorter, than the longer: + /// + /// ``` + /// # use hyper::server::Response; + /// use std::io::Write; + /// use hyper::header::ContentLength; + /// fn handler(mut res: Response) { + /// let body = b"Hello World!"; + /// res.headers_mut().set(ContentLength(body.len() as u64)); + /// let mut res = res.start().unwrap(); + /// res.write_all(body).unwrap(); + /// } + /// ``` + #[inline] + pub fn send(self, body: &[u8]) -> io::Result<()> { + self.headers.set(header::ContentLength(body.len() as u64)); + let mut stream = try!(self.start()); + try!(stream.writer().write_all(body)); + stream.end() + } + + /// Consume this Response, writing the Headers and Status and + /// creating a Response + pub fn start(mut self) -> io::Result> { + let body_type = try!(self.write_head()); + let (version, body, status, headers) = self.deconstruct(); + let stream = SizedWriter(body.into_inner(), body_type.0); + + // "copy" to change the phantom type + Ok(Response { + version: version, + body: stream, + status: status, + headers: headers, + _writing: PhantomData, + }) + } + /// Get a mutable reference to the status. + #[inline] + pub fn status_mut(&mut self) -> &mut status::StatusCode { &mut self.status } + + /// Get a mutable reference to the Headers. + #[inline] + pub fn headers_mut(&mut self) -> &mut header::Headers { self.headers } +} + + +impl<'a> Response<'a, Streaming> { + /// Flushes all writing of a response to the client. + #[inline] + pub fn end(self) -> io::Result<()> { + trace!("ending"); + let (_, body, _, _) = self.deconstruct(); + try!(body.end()); + Ok(()) + } +} + +impl<'a> Response<'a, Streaming> { + pub fn writer(&mut self) -> &mut HttpWriter<&'a mut (Write + 'a)> { + &mut self.body + } +} + +#[derive(PartialEq, Debug)] +struct Body(u64); + +impl<'a, T: Any> Drop for Response<'a, T> { + fn drop(&mut self) { + if TypeId::of::() == TypeId::of::() { + if thread::panicking() { + self.status = status::StatusCode::InternalServerError; + if self.headers.get::().is_none() { + self.headers.set(header::ContentLength(0)); + } + } + + let mut body = match self.write_head() { + Ok(Body(len)) => SizedWriter(self.body.get_mut(), len), + Err(e) => { + debug!("error dropping request: {:?}", e); + return; + } + }; + end(&mut body); + } else { + end(&mut self.body); + }; + + + #[inline] + fn end(w: &mut W) { + match w.write(&[]) { + Ok(_) => match w.flush() { + Ok(_) => debug!("drop successful"), + Err(e) => debug!("error dropping request: {:?}", e) + }, + Err(e) => debug!("error dropping request: {:?}", e) + } + } + } +} + +#[cfg(test)] +mod tests { + use header::Headers; + use mock::MockStream; + use super::Response; + + macro_rules! lines { + ($s:ident = $($line:pat),+) => ({ + let s = String::from_utf8($s.write).unwrap(); + let mut lines = s.split_terminator("\r\n"); + + $( + match lines.next() { + Some($line) => (), + other => panic!("line mismatch: {:?} != {:?}", other, stringify!($line)) + } + )+ + + assert_eq!(lines.next(), None); + }) + } + + #[test] + fn test_fresh_start() { + let mut headers = Headers::new(); + let mut stream = MockStream::new(); + { + let res = Response::new(&mut stream, &mut headers); + res.start().unwrap().deconstruct(); + } + + lines! { stream = + "HTTP/1.1 200 OK", + _date, + _transfer_encoding, + "" + } + } + + #[test] + fn test_streaming_end() { + let mut headers = Headers::new(); + let mut stream = MockStream::new(); + { + let res = Response::new(&mut stream, &mut headers); + res.start().unwrap().end().unwrap(); + } + + lines! { stream = + "HTTP/1.1 200 OK", + _date, + _transfer_encoding, + "", + "0", + "" // empty zero body + } + } + + #[test] + fn test_fresh_drop() { + use status::StatusCode; + let mut headers = Headers::new(); + let mut stream = MockStream::new(); + { + let mut res = Response::new(&mut stream, &mut headers); + *res.status_mut() = StatusCode::NotFound; + } + + lines! { stream = + "HTTP/1.1 404 Not Found", + _date, + _transfer_encoding, + "", + "0", + "" // empty zero body + } + } + + // x86 windows msvc does not support unwinding + // See https://github.com/rust-lang/rust/issues/25869 + #[cfg(not(all(windows, target_arch="x86", target_env="msvc")))] + #[test] + fn test_fresh_drop_panicing() { + use std::thread; + use std::sync::{Arc, Mutex}; + + use status::StatusCode; + + let stream = MockStream::new(); + let stream = Arc::new(Mutex::new(stream)); + let inner_stream = stream.clone(); + let join_handle = thread::spawn(move || { + let mut headers = Headers::new(); + let mut stream = inner_stream.lock().unwrap(); + let mut res = Response::new(&mut *stream, &mut headers); + *res.status_mut() = StatusCode::NotFound; + + panic!("inside") + }); + + assert!(join_handle.join().is_err()); + + let stream = match stream.lock() { + Err(poisoned) => poisoned.into_inner().clone(), + Ok(_) => unreachable!() + }; + + lines! { stream = + "HTTP/1.1 500 Internal Server Error", + _date, + _transfer_encoding, + "", + "0", + "" // empty zero body + } + } + + + #[test] + fn test_streaming_drop() { + use std::io::Write; + use status::StatusCode; + let mut headers = Headers::new(); + let mut stream = MockStream::new(); + { + let mut res = Response::new(&mut stream, &mut headers); + *res.status_mut() = StatusCode::NotFound; + let mut stream = res.start().unwrap(); + stream.write_all(b"foo").unwrap(); + } + + lines! { stream = + "HTTP/1.1 404 Not Found", + _date, + _transfer_encoding, + "", + "3", + "foo", + "0", + "" // empty zero body + } + } + + #[test] + fn test_no_content() { + use status::StatusCode; + let mut headers = Headers::new(); + let mut stream = MockStream::new(); + { + let mut res = Response::new(&mut stream, &mut headers); + *res.status_mut() = StatusCode::NoContent; + res.start().unwrap(); + } + + lines! { stream = + "HTTP/1.1 204 No Content", + _date, + "" + } + } +} diff --git a/vendor/hyper-0.10.16/src/status.rs b/vendor/hyper-0.10.16/src/status.rs new file mode 100644 index 0000000..5435182 --- /dev/null +++ b/vendor/hyper-0.10.16/src/status.rs @@ -0,0 +1,748 @@ +//! HTTP status codes +use std::fmt; +use std::cmp::Ordering; + +// shamelessly lifted from Teepee. I tried a few schemes, this really +// does seem like the best. Improved scheme to support arbitrary status codes. + +/// An HTTP status code (`status-code` in RFC 7230 et al.). +/// +/// This enum contains all common status codes and an Unregistered +/// extension variant. It allows status codes in the range [0, 65535], as any +/// `u16` integer may be used as a status code for XHR requests. It is +/// recommended to only use values between [100, 599], since only these are +/// defined as valid status codes with a status class by HTTP. +/// +/// If you encounter a status code that you do not know how to deal with, you +/// should treat it as the `x00` status code—e.g. for code 123, treat it as +/// 100 (Continue). This can be achieved with +/// `self.class().default_code()`: +/// +/// ```rust +/// # use hyper::status::StatusCode; +/// let status = StatusCode::Unregistered(123); +/// assert_eq!(status.class().default_code(), StatusCode::Continue); +/// ``` +/// +/// IANA maintain the [Hypertext Transfer Protocol (HTTP) Status Code +/// Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) which is +/// the source for this enum (with one exception, 418 I'm a teapot, which is +/// inexplicably not in the register). +#[derive(Debug, Hash)] +pub enum StatusCode { + /// 100 Continue + /// [[RFC7231, Section 6.2.1](https://tools.ietf.org/html/rfc7231#section-6.2.1)] + Continue, + /// 101 Switching Protocols + /// [[RFC7231, Section 6.2.2](https://tools.ietf.org/html/rfc7231#section-6.2.2)] + SwitchingProtocols, + /// 102 Processing + /// [[RFC2518](https://tools.ietf.org/html/rfc2518)] + Processing, + + /// 200 OK + /// [[RFC7231, Section 6.3.1](https://tools.ietf.org/html/rfc7231#section-6.3.1)] + Ok, + /// 201 Created + /// [[RFC7231, Section 6.3.2](https://tools.ietf.org/html/rfc7231#section-6.3.2)] + Created, + /// 202 Accepted + /// [[RFC7231, Section 6.3.3](https://tools.ietf.org/html/rfc7231#section-6.3.3)] + Accepted, + /// 203 Non-Authoritative Information + /// [[RFC7231, Section 6.3.4](https://tools.ietf.org/html/rfc7231#section-6.3.4)] + NonAuthoritativeInformation, + /// 204 No Content + /// [[RFC7231, Section 6.3.5](https://tools.ietf.org/html/rfc7231#section-6.3.5)] + NoContent, + /// 205 Reset Content + /// [[RFC7231, Section 6.3.6](https://tools.ietf.org/html/rfc7231#section-6.3.6)] + ResetContent, + /// 206 Partial Content + /// [[RFC7233, Section 4.1](https://tools.ietf.org/html/rfc7233#section-4.1)] + PartialContent, + /// 207 Multi-Status + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + MultiStatus, + /// 208 Already Reported + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + AlreadyReported, + + /// 226 IM Used + /// [[RFC3229](https://tools.ietf.org/html/rfc3229)] + ImUsed, + + /// 300 Multiple Choices + /// [[RFC7231, Section 6.4.1](https://tools.ietf.org/html/rfc7231#section-6.4.1)] + MultipleChoices, + /// 301 Moved Permanently + /// [[RFC7231, Section 6.4.2](https://tools.ietf.org/html/rfc7231#section-6.4.2)] + MovedPermanently, + /// 302 Found + /// [[RFC7231, Section 6.4.3](https://tools.ietf.org/html/rfc7231#section-6.4.3)] + Found, + /// 303 See Other + /// [[RFC7231, Section 6.4.4](https://tools.ietf.org/html/rfc7231#section-6.4.4)] + SeeOther, + /// 304 Not Modified + /// [[RFC7232, Section 4.1](https://tools.ietf.org/html/rfc7232#section-4.1)] + NotModified, + /// 305 Use Proxy + /// [[RFC7231, Section 6.4.5](https://tools.ietf.org/html/rfc7231#section-6.4.5)] + UseProxy, + /// 307 Temporary Redirect + /// [[RFC7231, Section 6.4.7](https://tools.ietf.org/html/rfc7231#section-6.4.7)] + TemporaryRedirect, + /// 308 Permanent Redirect + /// [[RFC7238](https://tools.ietf.org/html/rfc7238)] + PermanentRedirect, + + /// 400 Bad Request + /// [[RFC7231, Section 6.5.1](https://tools.ietf.org/html/rfc7231#section-6.5.1)] + BadRequest, + /// 401 Unauthorized + /// [[RFC7235, Section 3.1](https://tools.ietf.org/html/rfc7235#section-3.1)] + Unauthorized, + /// 402 Payment Required + /// [[RFC7231, Section 6.5.2](https://tools.ietf.org/html/rfc7231#section-6.5.2)] + PaymentRequired, + /// 403 Forbidden + /// [[RFC7231, Section 6.5.3](https://tools.ietf.org/html/rfc7231#section-6.5.3)] + Forbidden, + /// 404 Not Found + /// [[RFC7231, Section 6.5.4](https://tools.ietf.org/html/rfc7231#section-6.5.4)] + NotFound, + /// 405 Method Not Allowed + /// [[RFC7231, Section 6.5.5](https://tools.ietf.org/html/rfc7231#section-6.5.5)] + MethodNotAllowed, + /// 406 Not Acceptable + /// [[RFC7231, Section 6.5.6](https://tools.ietf.org/html/rfc7231#section-6.5.6)] + NotAcceptable, + /// 407 Proxy Authentication Required + /// [[RFC7235, Section 3.2](https://tools.ietf.org/html/rfc7235#section-3.2)] + ProxyAuthenticationRequired, + /// 408 Request Timeout + /// [[RFC7231, Section 6.5.7](https://tools.ietf.org/html/rfc7231#section-6.5.7)] + RequestTimeout, + /// 409 Conflict + /// [[RFC7231, Section 6.5.8](https://tools.ietf.org/html/rfc7231#section-6.5.8)] + Conflict, + /// 410 Gone + /// [[RFC7231, Section 6.5.9](https://tools.ietf.org/html/rfc7231#section-6.5.9)] + Gone, + /// 411 Length Required + /// [[RFC7231, Section 6.5.10](https://tools.ietf.org/html/rfc7231#section-6.5.10)] + LengthRequired, + /// 412 Precondition Failed + /// [[RFC7232, Section 4.2](https://tools.ietf.org/html/rfc7232#section-4.2)] + PreconditionFailed, + /// 413 Payload Too Large + /// [[RFC7231, Section 6.5.11](https://tools.ietf.org/html/rfc7231#section-6.5.11)] + PayloadTooLarge, + /// 414 URI Too Long + /// [[RFC7231, Section 6.5.12](https://tools.ietf.org/html/rfc7231#section-6.5.12)] + UriTooLong, + /// 415 Unsupported Media Type + /// [[RFC7231, Section 6.5.13](https://tools.ietf.org/html/rfc7231#section-6.5.13)] + UnsupportedMediaType, + /// 416 Range Not Satisfiable + /// [[RFC7233, Section 4.4](https://tools.ietf.org/html/rfc7233#section-4.4)] + RangeNotSatisfiable, + /// 417 Expectation Failed + /// [[RFC7231, Section 6.5.14](https://tools.ietf.org/html/rfc7231#section-6.5.14)] + ExpectationFailed, + /// 418 I'm a teapot + /// [curiously, not registered by IANA, but [RFC2324](https://tools.ietf.org/html/rfc2324)] + ImATeapot, + + /// 421 Misdirected Request + /// [RFC7540, Section 9.1.2](http://tools.ietf.org/html/rfc7540#section-9.1.2) + MisdirectedRequest, + /// 422 Unprocessable Entity + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + UnprocessableEntity, + /// 423 Locked + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + Locked, + /// 424 Failed Dependency + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + FailedDependency, + + /// 426 Upgrade Required + /// [[RFC7231, Section 6.5.15](https://tools.ietf.org/html/rfc7231#section-6.5.15)] + UpgradeRequired, + + /// 428 Precondition Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + PreconditionRequired, + /// 429 Too Many Requests + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + TooManyRequests, + + /// 431 Request Header Fields Too Large + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + RequestHeaderFieldsTooLarge, + + /// 451 Unavailable For Legal Reasons + /// [[RFC7725](http://tools.ietf.org/html/rfc7725)] + UnavailableForLegalReasons, + + /// 500 Internal Server Error + /// [[RFC7231, Section 6.6.1](https://tools.ietf.org/html/rfc7231#section-6.6.1)] + InternalServerError, + /// 501 Not Implemented + /// [[RFC7231, Section 6.6.2](https://tools.ietf.org/html/rfc7231#section-6.6.2)] + NotImplemented, + /// 502 Bad Gateway + /// [[RFC7231, Section 6.6.3](https://tools.ietf.org/html/rfc7231#section-6.6.3)] + BadGateway, + /// 503 Service Unavailable + /// [[RFC7231, Section 6.6.4](https://tools.ietf.org/html/rfc7231#section-6.6.4)] + ServiceUnavailable, + /// 504 Gateway Timeout + /// [[RFC7231, Section 6.6.5](https://tools.ietf.org/html/rfc7231#section-6.6.5)] + GatewayTimeout, + /// 505 HTTP Version Not Supported + /// [[RFC7231, Section 6.6.6](https://tools.ietf.org/html/rfc7231#section-6.6.6)] + HttpVersionNotSupported, + /// 506 Variant Also Negotiates + /// [[RFC2295](https://tools.ietf.org/html/rfc2295)] + VariantAlsoNegotiates, + /// 507 Insufficient Storage + /// [[RFC4918](https://tools.ietf.org/html/rfc4918)] + InsufficientStorage, + /// 508 Loop Detected + /// [[RFC5842](https://tools.ietf.org/html/rfc5842)] + LoopDetected, + + /// 510 Not Extended + /// [[RFC2774](https://tools.ietf.org/html/rfc2774)] + NotExtended, + /// 511 Network Authentication Required + /// [[RFC6585](https://tools.ietf.org/html/rfc6585)] + NetworkAuthenticationRequired, + + /// A status code not in the IANA HTTP status code registry or very well known + // `ImATeapot` is not registered. + Unregistered(u16), +} + +impl StatusCode { + + #[doc(hidden)] + pub fn from_u16(n: u16) -> StatusCode { + match n { + 100 => StatusCode::Continue, + 101 => StatusCode::SwitchingProtocols, + 102 => StatusCode::Processing, + 200 => StatusCode::Ok, + 201 => StatusCode::Created, + 202 => StatusCode::Accepted, + 203 => StatusCode::NonAuthoritativeInformation, + 204 => StatusCode::NoContent, + 205 => StatusCode::ResetContent, + 206 => StatusCode::PartialContent, + 207 => StatusCode::MultiStatus, + 208 => StatusCode::AlreadyReported, + 226 => StatusCode::ImUsed, + 300 => StatusCode::MultipleChoices, + 301 => StatusCode::MovedPermanently, + 302 => StatusCode::Found, + 303 => StatusCode::SeeOther, + 304 => StatusCode::NotModified, + 305 => StatusCode::UseProxy, + 307 => StatusCode::TemporaryRedirect, + 308 => StatusCode::PermanentRedirect, + 400 => StatusCode::BadRequest, + 401 => StatusCode::Unauthorized, + 402 => StatusCode::PaymentRequired, + 403 => StatusCode::Forbidden, + 404 => StatusCode::NotFound, + 405 => StatusCode::MethodNotAllowed, + 406 => StatusCode::NotAcceptable, + 407 => StatusCode::ProxyAuthenticationRequired, + 408 => StatusCode::RequestTimeout, + 409 => StatusCode::Conflict, + 410 => StatusCode::Gone, + 411 => StatusCode::LengthRequired, + 412 => StatusCode::PreconditionFailed, + 413 => StatusCode::PayloadTooLarge, + 414 => StatusCode::UriTooLong, + 415 => StatusCode::UnsupportedMediaType, + 416 => StatusCode::RangeNotSatisfiable, + 417 => StatusCode::ExpectationFailed, + 418 => StatusCode::ImATeapot, + 421 => StatusCode::MisdirectedRequest, + 422 => StatusCode::UnprocessableEntity, + 423 => StatusCode::Locked, + 424 => StatusCode::FailedDependency, + 426 => StatusCode::UpgradeRequired, + 428 => StatusCode::PreconditionRequired, + 429 => StatusCode::TooManyRequests, + 431 => StatusCode::RequestHeaderFieldsTooLarge, + 451 => StatusCode::UnavailableForLegalReasons, + 500 => StatusCode::InternalServerError, + 501 => StatusCode::NotImplemented, + 502 => StatusCode::BadGateway, + 503 => StatusCode::ServiceUnavailable, + 504 => StatusCode::GatewayTimeout, + 505 => StatusCode::HttpVersionNotSupported, + 506 => StatusCode::VariantAlsoNegotiates, + 507 => StatusCode::InsufficientStorage, + 508 => StatusCode::LoopDetected, + 510 => StatusCode::NotExtended, + 511 => StatusCode::NetworkAuthenticationRequired, + _ => StatusCode::Unregistered(n), + } + } + + #[doc(hidden)] + pub fn to_u16(&self) -> u16 { + match *self { + StatusCode::Continue => 100, + StatusCode::SwitchingProtocols => 101, + StatusCode::Processing => 102, + StatusCode::Ok => 200, + StatusCode::Created => 201, + StatusCode::Accepted => 202, + StatusCode::NonAuthoritativeInformation => 203, + StatusCode::NoContent => 204, + StatusCode::ResetContent => 205, + StatusCode::PartialContent => 206, + StatusCode::MultiStatus => 207, + StatusCode::AlreadyReported => 208, + StatusCode::ImUsed => 226, + StatusCode::MultipleChoices => 300, + StatusCode::MovedPermanently => 301, + StatusCode::Found => 302, + StatusCode::SeeOther => 303, + StatusCode::NotModified => 304, + StatusCode::UseProxy => 305, + StatusCode::TemporaryRedirect => 307, + StatusCode::PermanentRedirect => 308, + StatusCode::BadRequest => 400, + StatusCode::Unauthorized => 401, + StatusCode::PaymentRequired => 402, + StatusCode::Forbidden => 403, + StatusCode::NotFound => 404, + StatusCode::MethodNotAllowed => 405, + StatusCode::NotAcceptable => 406, + StatusCode::ProxyAuthenticationRequired => 407, + StatusCode::RequestTimeout => 408, + StatusCode::Conflict => 409, + StatusCode::Gone => 410, + StatusCode::LengthRequired => 411, + StatusCode::PreconditionFailed => 412, + StatusCode::PayloadTooLarge => 413, + StatusCode::UriTooLong => 414, + StatusCode::UnsupportedMediaType => 415, + StatusCode::RangeNotSatisfiable => 416, + StatusCode::ExpectationFailed => 417, + StatusCode::ImATeapot => 418, + StatusCode::MisdirectedRequest => 421, + StatusCode::UnprocessableEntity => 422, + StatusCode::Locked => 423, + StatusCode::FailedDependency => 424, + StatusCode::UpgradeRequired => 426, + StatusCode::PreconditionRequired => 428, + StatusCode::TooManyRequests => 429, + StatusCode::RequestHeaderFieldsTooLarge => 431, + StatusCode::UnavailableForLegalReasons => 451, + StatusCode::InternalServerError => 500, + StatusCode::NotImplemented => 501, + StatusCode::BadGateway => 502, + StatusCode::ServiceUnavailable => 503, + StatusCode::GatewayTimeout => 504, + StatusCode::HttpVersionNotSupported => 505, + StatusCode::VariantAlsoNegotiates => 506, + StatusCode::InsufficientStorage => 507, + StatusCode::LoopDetected => 508, + StatusCode::NotExtended => 510, + StatusCode::NetworkAuthenticationRequired => 511, + StatusCode::Unregistered(n) => n, + } + } + + /// Get the standardised `reason-phrase` for this status code. + /// + /// This is mostly here for servers writing responses, but could potentially have application + /// at other times. + /// + /// The reason phrase is defined as being exclusively for human readers. You should avoid + /// deriving any meaning from it at all costs. + /// + /// Bear in mind also that in HTTP/2.0 the reason phrase is abolished from transmission, and so + /// this canonical reason phrase really is the only reason phrase you’ll find. + pub fn canonical_reason(&self) -> Option<&'static str> { + match *self { + StatusCode::Continue => Some("Continue"), + StatusCode::SwitchingProtocols => Some("Switching Protocols"), + StatusCode::Processing => Some("Processing"), + + StatusCode::Ok => Some("OK"), + StatusCode::Created => Some("Created"), + StatusCode::Accepted => Some("Accepted"), + StatusCode::NonAuthoritativeInformation => Some("Non-Authoritative Information"), + StatusCode::NoContent => Some("No Content"), + StatusCode::ResetContent => Some("Reset Content"), + StatusCode::PartialContent => Some("Partial Content"), + StatusCode::MultiStatus => Some("Multi-Status"), + StatusCode::AlreadyReported => Some("Already Reported"), + + StatusCode::ImUsed => Some("IM Used"), + + StatusCode::MultipleChoices => Some("Multiple Choices"), + StatusCode::MovedPermanently => Some("Moved Permanently"), + StatusCode::Found => Some("Found"), + StatusCode::SeeOther => Some("See Other"), + StatusCode::NotModified => Some("Not Modified"), + StatusCode::UseProxy => Some("Use Proxy"), + + StatusCode::TemporaryRedirect => Some("Temporary Redirect"), + StatusCode::PermanentRedirect => Some("Permanent Redirect"), + + StatusCode::BadRequest => Some("Bad Request"), + StatusCode::Unauthorized => Some("Unauthorized"), + StatusCode::PaymentRequired => Some("Payment Required"), + StatusCode::Forbidden => Some("Forbidden"), + StatusCode::NotFound => Some("Not Found"), + StatusCode::MethodNotAllowed => Some("Method Not Allowed"), + StatusCode::NotAcceptable => Some("Not Acceptable"), + StatusCode::ProxyAuthenticationRequired => Some("Proxy Authentication Required"), + StatusCode::RequestTimeout => Some("Request Timeout"), + StatusCode::Conflict => Some("Conflict"), + StatusCode::Gone => Some("Gone"), + StatusCode::LengthRequired => Some("Length Required"), + StatusCode::PreconditionFailed => Some("Precondition Failed"), + StatusCode::PayloadTooLarge => Some("Payload Too Large"), + StatusCode::UriTooLong => Some("URI Too Long"), + StatusCode::UnsupportedMediaType => Some("Unsupported Media Type"), + StatusCode::RangeNotSatisfiable => Some("Range Not Satisfiable"), + StatusCode::ExpectationFailed => Some("Expectation Failed"), + StatusCode::ImATeapot => Some("I'm a teapot"), + + StatusCode::MisdirectedRequest => Some("Misdirected Request"), + StatusCode::UnprocessableEntity => Some("Unprocessable Entity"), + StatusCode::Locked => Some("Locked"), + StatusCode::FailedDependency => Some("Failed Dependency"), + + StatusCode::UpgradeRequired => Some("Upgrade Required"), + + StatusCode::PreconditionRequired => Some("Precondition Required"), + StatusCode::TooManyRequests => Some("Too Many Requests"), + + StatusCode::RequestHeaderFieldsTooLarge => Some("Request Header Fields Too Large"), + + StatusCode::UnavailableForLegalReasons => Some("Unavailable For Legal Reasons"), + + StatusCode::InternalServerError => Some("Internal Server Error"), + StatusCode::NotImplemented => Some("Not Implemented"), + StatusCode::BadGateway => Some("Bad Gateway"), + StatusCode::ServiceUnavailable => Some("Service Unavailable"), + StatusCode::GatewayTimeout => Some("Gateway Timeout"), + StatusCode::HttpVersionNotSupported => Some("HTTP Version Not Supported"), + StatusCode::VariantAlsoNegotiates => Some("Variant Also Negotiates"), + StatusCode::InsufficientStorage => Some("Insufficient Storage"), + StatusCode::LoopDetected => Some("Loop Detected"), + + StatusCode::NotExtended => Some("Not Extended"), + StatusCode::NetworkAuthenticationRequired => Some("Network Authentication Required"), + StatusCode::Unregistered(..) => None + } + } + + /// Determine the class of a status code, based on its first digit. + pub fn class(&self) -> StatusClass { + match self.to_u16() { + 100...199 => StatusClass::Informational, + 200...299 => StatusClass::Success, + 300...399 => StatusClass::Redirection, + 400...499 => StatusClass::ClientError, + 500...599 => StatusClass::ServerError, + _ => StatusClass::NoClass, + } + } + + /// Check if class is Informational. + pub fn is_informational(&self) -> bool { + self.class() == StatusClass::Informational + } + + /// Check if class is Success. + pub fn is_success(&self) -> bool { + self.class() == StatusClass::Success + } + + /// Check if class is Redirection. + pub fn is_redirection(&self) -> bool { + self.class() == StatusClass::Redirection + } + + /// Check if class is ClientError. + pub fn is_client_error(&self) -> bool { + self.class() == StatusClass::ClientError + } + + /// Check if class is ServerError. + pub fn is_server_error(&self) -> bool { + self.class() == StatusClass::ServerError + } + + /// Check if class is NoClass + pub fn is_strange_status(&self) -> bool { + self.class() == StatusClass::NoClass + } +} + +impl Copy for StatusCode {} + +/// Formats the status code, *including* the canonical reason. +/// +/// ```rust +/// # use hyper::status::StatusCode::{ImATeapot, Unregistered}; +/// assert_eq!(format!("{}", ImATeapot), "418 I'm a teapot"); +/// assert_eq!(format!("{}", Unregistered(123)), +/// "123 "); +/// ``` +impl fmt::Display for StatusCode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.to_u16(), + self.canonical_reason().unwrap_or("")) + } +} + +impl PartialEq for StatusCode { + #[inline] + fn eq(&self, other: &StatusCode) -> bool { + self.to_u16() == other.to_u16() + } +} + +impl Eq for StatusCode {} + +impl Clone for StatusCode { + #[inline] + fn clone(&self) -> StatusCode { + *self + } +} + +impl PartialOrd for StatusCode { + #[inline] + fn partial_cmp(&self, other: &StatusCode) -> Option { + self.to_u16().partial_cmp(&(other.to_u16())) + } +} + +impl Ord for StatusCode { + #[inline] + fn cmp(&self, other: &StatusCode) -> Ordering { + if *self < *other { + Ordering::Less + } else if *self > *other { + Ordering::Greater + } else { + Ordering::Equal + } + } +} + +/// The class of an HTTP `status-code`. +/// +/// [RFC 7231, section 6 (Response Status Codes)](https://tools.ietf.org/html/rfc7231#section-6): +/// +/// > The first digit of the status-code defines the class of response. +/// > The last two digits do not have any categorization role. +/// +/// And: +/// +/// > HTTP status codes are extensible. HTTP clients are not required to +/// > understand the meaning of all registered status codes, though such +/// > understanding is obviously desirable. However, a client MUST +/// > understand the class of any status code, as indicated by the first +/// > digit, and treat an unrecognized status code as being equivalent to +/// > the x00 status code of that class, with the exception that a +/// > recipient MUST NOT cache a response with an unrecognized status code. +/// > +/// > For example, if an unrecognized status code of 471 is received by a +/// > client, the client can assume that there was something wrong with its +/// > request and treat the response as if it had received a 400 (Bad +/// > Request) status code. The response message will usually contain a +/// > representation that explains the status. +/// +/// This can be used in cases where a status code’s meaning is unknown, also, +/// to get the appropriate *category* of status. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Copy)] +pub enum StatusClass { + /// 1xx (Informational): The request was received, continuing process + Informational, + + /// 2xx (Success): The request was successfully received, understood, and accepted + Success, + + /// 3xx (Redirection): Further action needs to be taken in order to complete the request + Redirection, + + /// 4xx (Client Error): The request contains bad syntax or cannot be fulfilled + ClientError, + + /// 5xx (Server Error): The server failed to fulfill an apparently valid request + ServerError, + + /// A status code lower than 100 or higher than 599. These codes do no belong to any class. + NoClass, +} + +impl StatusClass { + /// Get the default status code for the class. + /// + /// This produces the x00 status code; thus, for `ClientError` (4xx), for + /// example, this will produce `BadRequest` (400): + /// + /// ```rust + /// # use hyper::status::StatusClass::ClientError; + /// # use hyper::status::StatusCode::BadRequest; + /// assert_eq!(ClientError.default_code(), BadRequest); + /// ``` + /// + /// The use for this is outlined in [RFC 7231, section 6 (Response Status + /// Codes)](https://tools.ietf.org/html/rfc7231#section-6): + /// + /// > HTTP status codes are extensible. HTTP clients are not required to + /// > understand the meaning of all registered status codes, though such + /// > understanding is obviously desirable. However, a client MUST + /// > understand the class of any status code, as indicated by the first + /// > digit, and treat an unrecognized status code as being equivalent to + /// > the x00 status code of that class, with the exception that a + /// > recipient MUST NOT cache a response with an unrecognized status code. + /// > + /// > For example, if an unrecognized status code of 471 is received by a + /// > client, the client can assume that there was something wrong with its + /// > request and treat the response as if it had received a 400 (Bad + /// > Request) status code. The response message will usually contain a + /// > representation that explains the status. + /// + /// This is demonstrated thusly: + /// + /// ```rust + /// # use hyper::status::StatusCode::{Unregistered, BadRequest}; + /// // Suppose we have received this status code. + /// // You will never directly create an unregistered status code. + /// let status = Unregistered(471); + /// + /// // Uh oh! Don’t know what to do with it. + /// // Let’s fall back to the default: + /// let status = status.class().default_code(); + /// + /// // And look! That is 400 Bad Request. + /// assert_eq!(status, BadRequest); + /// // So now let’s treat it as that. + /// ``` + /// All status codes that do not map to an existing status class are matched + /// by a `NoClass`, variant that resolves to 200 (Ok) as default code. + /// This is a common handling for unknown status codes in major browsers. + pub fn default_code(&self) -> StatusCode { + match *self { + StatusClass::Informational => StatusCode::Continue, + StatusClass::Success => StatusCode::Ok, + StatusClass::Redirection => StatusCode::MultipleChoices, + StatusClass::ClientError => StatusCode::BadRequest, + StatusClass::ServerError => StatusCode::InternalServerError, + StatusClass::NoClass => StatusCode::Ok, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::StatusCode::*; + + // Check that the following entities are properly inter-connected: + // - numerical code + // - status code + // - default code (for the given status code) + // - canonical reason + fn validate(num: u16, status_code: StatusCode, default_code: StatusCode, reason: Option<&str>) { + assert_eq!(StatusCode::from_u16(num), status_code); + assert_eq!(status_code.to_u16(), num); + assert_eq!(status_code.class().default_code(), default_code); + assert_eq!(status_code.canonical_reason(), reason); + } + + #[test] + fn test_status_code() { + validate(99, Unregistered(99), Ok, None); + + validate(100, Continue, Continue, Some("Continue")); + validate(101, SwitchingProtocols, Continue, Some("Switching Protocols")); + validate(102, Processing, Continue, Some("Processing")); + + validate(200, Ok, Ok, Some("OK")); + validate(201, Created, Ok, Some("Created")); + validate(202, Accepted, Ok, Some("Accepted")); + validate(203, NonAuthoritativeInformation, Ok, Some("Non-Authoritative Information")); + validate(204, NoContent, Ok, Some("No Content")); + validate(205, ResetContent, Ok, Some("Reset Content")); + validate(206, PartialContent, Ok, Some("Partial Content")); + validate(207, MultiStatus, Ok, Some("Multi-Status")); + validate(208, AlreadyReported, Ok, Some("Already Reported")); + validate(226, ImUsed, Ok, Some("IM Used")); + + validate(300, MultipleChoices, MultipleChoices, Some("Multiple Choices")); + validate(301, MovedPermanently, MultipleChoices, Some("Moved Permanently")); + validate(302, Found, MultipleChoices, Some("Found")); + validate(303, SeeOther, MultipleChoices, Some("See Other")); + validate(304, NotModified, MultipleChoices, Some("Not Modified")); + validate(305, UseProxy, MultipleChoices, Some("Use Proxy")); + validate(307, TemporaryRedirect, MultipleChoices, Some("Temporary Redirect")); + validate(308, PermanentRedirect, MultipleChoices, Some("Permanent Redirect")); + + validate(400, BadRequest, BadRequest, Some("Bad Request")); + validate(401, Unauthorized, BadRequest, Some("Unauthorized")); + validate(402, PaymentRequired, BadRequest, Some("Payment Required")); + validate(403, Forbidden, BadRequest, Some("Forbidden")); + validate(404, NotFound, BadRequest, Some("Not Found")); + validate(405, MethodNotAllowed, BadRequest, Some("Method Not Allowed")); + validate(406, NotAcceptable, BadRequest, Some("Not Acceptable")); + validate(407, ProxyAuthenticationRequired, BadRequest, + Some("Proxy Authentication Required")); + validate(408, RequestTimeout, BadRequest, Some("Request Timeout")); + validate(409, Conflict, BadRequest, Some("Conflict")); + validate(410, Gone, BadRequest, Some("Gone")); + validate(411, LengthRequired, BadRequest, Some("Length Required")); + validate(412, PreconditionFailed, BadRequest, Some("Precondition Failed")); + validate(413, PayloadTooLarge, BadRequest, Some("Payload Too Large")); + validate(414, UriTooLong, BadRequest, Some("URI Too Long")); + validate(415, UnsupportedMediaType, BadRequest, Some("Unsupported Media Type")); + validate(416, RangeNotSatisfiable, BadRequest, Some("Range Not Satisfiable")); + validate(417, ExpectationFailed, BadRequest, Some("Expectation Failed")); + validate(418, ImATeapot, BadRequest, Some("I'm a teapot")); + validate(421, MisdirectedRequest, BadRequest, Some("Misdirected Request")); + validate(422, UnprocessableEntity, BadRequest, Some("Unprocessable Entity")); + validate(423, Locked, BadRequest, Some("Locked")); + validate(424, FailedDependency, BadRequest, Some("Failed Dependency")); + validate(426, UpgradeRequired, BadRequest, Some("Upgrade Required")); + validate(428, PreconditionRequired, BadRequest, Some("Precondition Required")); + validate(429, TooManyRequests, BadRequest, Some("Too Many Requests")); + validate(431, RequestHeaderFieldsTooLarge, BadRequest, + Some("Request Header Fields Too Large")); + validate(451, UnavailableForLegalReasons, BadRequest, + Some("Unavailable For Legal Reasons")); + + validate(500, InternalServerError, InternalServerError, Some("Internal Server Error")); + validate(501, NotImplemented, InternalServerError, Some("Not Implemented")); + validate(502, BadGateway, InternalServerError, Some("Bad Gateway")); + validate(503, ServiceUnavailable, InternalServerError, Some("Service Unavailable")); + validate(504, GatewayTimeout, InternalServerError, Some("Gateway Timeout")); + validate(505, HttpVersionNotSupported, InternalServerError, + Some("HTTP Version Not Supported")); + validate(506, VariantAlsoNegotiates, InternalServerError, Some("Variant Also Negotiates")); + validate(507, InsufficientStorage, InternalServerError, Some("Insufficient Storage")); + validate(508, LoopDetected, InternalServerError, Some("Loop Detected")); + validate(510, NotExtended, InternalServerError, Some("Not Extended")); + validate(511, NetworkAuthenticationRequired, InternalServerError, + Some("Network Authentication Required")); + + } +} diff --git a/vendor/hyper-0.10.16/src/uri.rs b/vendor/hyper-0.10.16/src/uri.rs new file mode 100644 index 0000000..2642375 --- /dev/null +++ b/vendor/hyper-0.10.16/src/uri.rs @@ -0,0 +1,110 @@ +//! HTTP RequestUris +use std::fmt::{Display, self}; +use std::str::FromStr; +use url::Url; +use url::ParseError as UrlError; + +use Error; + +/// The Request-URI of a Request's StartLine. +/// +/// From Section 5.3, Request Target: +/// > Once an inbound connection is obtained, the client sends an HTTP +/// > request message (Section 3) with a request-target derived from the +/// > target URI. There are four distinct formats for the request-target, +/// > depending on both the method being requested and whether the request +/// > is to a proxy. +/// > +/// > ```notrust +/// > request-target = origin-form +/// > / absolute-form +/// > / authority-form +/// > / asterisk-form +/// > ``` +#[derive(Debug, PartialEq, Clone)] +pub enum RequestUri { + /// The most common request target, an absolute path and optional query. + /// + /// For example, the line `GET /where?q=now HTTP/1.1` would parse the URI + /// as `AbsolutePath("/where?q=now".to_string())`. + AbsolutePath(String), + + /// An absolute URI. Used in conjunction with proxies. + /// + /// > When making a request to a proxy, other than a CONNECT or server-wide + /// > OPTIONS request (as detailed below), a client MUST send the target + /// > URI in absolute-form as the request-target. + /// + /// An example StartLine with an `AbsoluteUri` would be + /// `GET http://www.example.org/pub/WWW/TheProject.html HTTP/1.1`. + AbsoluteUri(Url), + + /// The authority form is only for use with `CONNECT` requests. + /// + /// An example StartLine: `CONNECT www.example.com:80 HTTP/1.1`. + Authority(String), + + /// The star is used to target the entire server, instead of a specific resource. + /// + /// This is only used for a server-wide `OPTIONS` request. + Star, +} + +impl FromStr for RequestUri { + type Err = Error; + + fn from_str(s: &str) -> Result { + let bytes = s.as_bytes(); + if bytes.is_empty() { + Err(Error::Uri(UrlError::RelativeUrlWithoutBase)) + } else if bytes == b"*" { + Ok(RequestUri::Star) + } else if bytes.starts_with(b"/") { + Ok(RequestUri::AbsolutePath(s.to_owned())) + } else if bytes.contains(&b'/') { + Ok(RequestUri::AbsoluteUri(try!(Url::parse(s)))) + } else { + let mut temp = "http://".to_owned(); + temp.push_str(s); + try!(Url::parse(&temp[..])); + todo!("compare vs u.authority()"); + Ok(RequestUri::Authority(s.to_owned())) + } + } +} + +impl Display for RequestUri { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + RequestUri::AbsolutePath(ref path) => f.write_str(path), + RequestUri::AbsoluteUri(ref url) => write!(f, "{}", url), + RequestUri::Authority(ref path) => f.write_str(path), + RequestUri::Star => f.write_str("*") + } + } +} + +#[test] +fn test_uri_fromstr() { + fn read(s: &str, result: RequestUri) { + assert_eq!(s.parse::().unwrap(), result); + } + + read("*", RequestUri::Star); + read("http://hyper.rs/", RequestUri::AbsoluteUri(Url::parse("http://hyper.rs/").unwrap())); + read("hyper.rs", RequestUri::Authority("hyper.rs".to_owned())); + read("/", RequestUri::AbsolutePath("/".to_owned())); +} + +#[test] +fn test_uri_display() { + fn assert_display(expected_string: &str, request_uri: RequestUri) { + assert_eq!(expected_string, format!("{}", request_uri)); + } + + assert_display("*", RequestUri::Star); + assert_display("http://hyper.rs/", RequestUri::AbsoluteUri(Url::parse("http://hyper.rs/").unwrap())); + assert_display("hyper.rs", RequestUri::Authority("hyper.rs".to_owned())); + assert_display("/", RequestUri::AbsolutePath("/".to_owned())); + +} diff --git a/vendor/hyper-0.10.16/src/version.rs b/vendor/hyper-0.10.16/src/version.rs new file mode 100644 index 0000000..a7f12f2 --- /dev/null +++ b/vendor/hyper-0.10.16/src/version.rs @@ -0,0 +1,53 @@ +//! HTTP Versions enum +//! +//! Instead of relying on typo-prone Strings, use expected HTTP versions as +//! the `HttpVersion` enum. +use std::fmt; +use std::str::FromStr; + +use error::Error; +use self::HttpVersion::{Http09, Http10, Http11, Http20}; + +/// Represents a version of the HTTP spec. +#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)] +pub enum HttpVersion { + /// `HTTP/0.9` + Http09, + /// `HTTP/1.0` + Http10, + /// `HTTP/1.1` + Http11, + /// `HTTP/2.0` + Http20, +} + +impl fmt::Display for HttpVersion { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.write_str(self.as_ref()) + } +} + +impl AsRef for HttpVersion { + fn as_ref(&self) -> &str { + match *self { + Http09 => "HTTP/0.9", + Http10 => "HTTP/1.0", + Http11 => "HTTP/1.1", + Http20 => "HTTP/2.0", + } + } +} + +impl FromStr for HttpVersion { + type Err = Error; + + fn from_str(s: &str) -> Result { + match s { + "HTTP/0.9" => Ok(Http09), + "HTTP/1.0" => Ok(Http10), + "HTTP/1.1" => Ok(Http11), + "HTTP/2.0" => Ok(Http20), + _ => Err(Error::Version), + } + } +} diff --git a/vendor/iron-0.6.1/.cargo_vcs_info.json b/vendor/iron-0.6.1/.cargo_vcs_info.json new file mode 100644 index 0000000..9f7c7bf --- /dev/null +++ b/vendor/iron-0.6.1/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "9e44ab85185e167841756613772632147b0bf892" + } +} diff --git a/vendor/iron-0.6.1/.gitignore b/vendor/iron-0.6.1/.gitignore new file mode 100644 index 0000000..2ef14d1 --- /dev/null +++ b/vendor/iron-0.6.1/.gitignore @@ -0,0 +1,30 @@ +deps/ +.DS_Store +*~ +*# +*.o +*.so +*.swp +*.dylib +*.dSYM +*.dll +*.rlib +*.dummy +*.exe +*-test +/src/generated/mimes.txt +/src/generated/mimegen +/src/response/mimes/mod.rs +/bin/main +/bin/test-internal +/bin/test-external +/doc/ +/target/ +/build/ +/.rust/ +watch.sh +rusti.sh +/examples/* +!/examples/*.rs +Cargo.lock +.cargo \ No newline at end of file diff --git a/vendor/iron-0.6.1/.travis.yml b/vendor/iron-0.6.1/.travis.yml new file mode 100644 index 0000000..7545cbe --- /dev/null +++ b/vendor/iron-0.6.1/.travis.yml @@ -0,0 +1,15 @@ +language: rust +rust: + - stable + - nightly +sudo: false +script: + - cargo build + - cargo test + - cargo build --features native-tls-example + - cargo test --features native-tls-example +env: + global: + secure: DUE2yG7/ASacYARIs7nysUAUhK86AqwE/PdQ3j+D5dqzxs3IOMSOcc7PA1r2w3FkXd52rENCYqKz2iReniJn4fG5S3Q+NbcfaYkhS/6P1y0sQB8yIIVeBRf/Bo2bR2P5TRh+piYWDmqYLUvsQ0+DpQ78MRA6HSxz7gCKpkZS4Y4= +after_success: 'curl https://raw.githubusercontent.com/iron/build-doc/master/build-doc.sh | sh ' + diff --git a/vendor/iron-0.6.1/CONTRIBUTING.md b/vendor/iron-0.6.1/CONTRIBUTING.md new file mode 100644 index 0000000..4ef5980 --- /dev/null +++ b/vendor/iron-0.6.1/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# Contributing + +### Overview + +* Fork iron to your own account +* Create a feature branch, namespaced by. + * bug/... + * feat/... + * test/... + * doc/... + * refactor/... +* Make commits to your feature branch. Prefix each commit like so: + * (feat) Added a new feature + * (fix) Fixed inconsistent tests [Fixes #0] + * (refactor) ... + * (cleanup) ... + * (test) ... + * (doc) ... +* Make a pull request with your changes directly to master. Include a + description of your changes. +* Wait for one of the reviewers to look at your code and either merge it or + give feedback which you should adapt to. + +#### Thank you for contributing! diff --git a/vendor/iron-0.6.1/Cargo.toml b/vendor/iron-0.6.1/Cargo.toml new file mode 100644 index 0000000..d2e0e74 --- /dev/null +++ b/vendor/iron-0.6.1/Cargo.toml @@ -0,0 +1,49 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "iron" +version = "0.6.1" +authors = ["Jonathan Reem ", "Zach Pomerantz ", "Michael Sproul ", "Patrick Tran "] +description = "Extensible, Concurrency Focused Web Development in Rust." +documentation = "http://ironframework.io/doc/iron/" +readme = "README.md" +license = "MIT" +repository = "https://github.com/iron/iron" + +[lib] +name = "iron" +path = "src/lib.rs" +[dependencies.hyper] +version = "0.10" + +[dependencies.hyper-native-tls] +version = "0.3" +optional = true + +[dependencies.modifier] +version = "0.1" + +[dependencies.num_cpus] +version = "1.0" + +[dependencies.url] +version = "1.1" +[dev-dependencies.mime] +version = "0.2" + +[dev-dependencies.time] +version = "0.1" + +[features] +default = [] +native-tls-example = ["hyper-native-tls"] diff --git a/vendor/iron-0.6.1/Cargo.toml.orig b/vendor/iron-0.6.1/Cargo.toml.orig new file mode 100644 index 0000000..10513fe --- /dev/null +++ b/vendor/iron-0.6.1/Cargo.toml.orig @@ -0,0 +1,38 @@ +[package] + +name = "iron" +version = "0.6.1" +description = "Extensible, Concurrency Focused Web Development in Rust." +readme = "README.md" +repository = "https://github.com/iron/iron" +documentation = "http://ironframework.io/doc/iron/" +license = "MIT" +authors = [ + "Jonathan Reem ", + "Zach Pomerantz ", + "Michael Sproul ", + "Patrick Tran " +] + +[lib] +name = "iron" +path = "src/lib.rs" + +[features] +default = [] +native-tls-example = ["hyper-native-tls"] + +[dependencies] +typemap = "0.3" +url = "1.1" +plugin = "0.2" +mime_guess = "1.8.1" +modifier = "0.1" +log = "0.3" +num_cpus = "1.0" +hyper = "0.10" +hyper-native-tls = { version = "0.3", optional = true } + +[dev-dependencies] +time = "0.1" +mime = "0.2" diff --git a/vendor/iron-0.6.1/LICENSE b/vendor/iron-0.6.1/LICENSE new file mode 100644 index 0000000..de88bcd --- /dev/null +++ b/vendor/iron-0.6.1/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Iron Core Team + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/iron-0.6.1/README.md b/vendor/iron-0.6.1/README.md new file mode 100644 index 0000000..f191754 --- /dev/null +++ b/vendor/iron-0.6.1/README.md @@ -0,0 +1,219 @@ +Iron +==== + +[![Build Status](https://secure.travis-ci.org/iron/iron.svg?branch=master)](https://travis-ci.org/iron/iron) +[![Crates.io Status](http://meritbadge.herokuapp.com/iron)](https://crates.io/crates/iron) +[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/iron/iron/master/LICENSE) + +> Extensible, Concurrency Focused Web Development in Rust. + +## Response Timer Example + +```rust +extern crate iron; +extern crate time; + +use iron::prelude::*; +use iron::{BeforeMiddleware, AfterMiddleware, typemap}; +use time::precise_time_ns; + +struct ResponseTime; + +impl typemap::Key for ResponseTime { type Value = u64; } + +impl BeforeMiddleware for ResponseTime { + fn before(&self, req: &mut Request) -> IronResult<()> { + req.extensions.insert::(precise_time_ns()); + Ok(()) + } +} + +impl AfterMiddleware for ResponseTime { + fn after(&self, req: &mut Request, res: Response) -> IronResult { + let delta = precise_time_ns() - *req.extensions.get::().unwrap(); + println!("Request took: {} ms", (delta as f64) / 1000000.0); + Ok(res) + } +} + +fn hello_world(_: &mut Request) -> IronResult { + Ok(Response::with((iron::status::Ok, "Hello World"))) +} + +fn main() { + let mut chain = Chain::new(hello_world); + chain.link_before(ResponseTime); + chain.link_after(ResponseTime); + Iron::new(chain).http("localhost:3000").unwrap(); +} +``` + +## Overview + +Iron is a high level web framework built in and for Rust, built on +[hyper](https://github.com/hyperium/hyper). Iron is designed to take advantage +of Rust's greatest features - its excellent type system and its principled +approach to ownership in both single threaded and multi threaded contexts. + +Iron is highly concurrent and can scale horizontally on more machines behind a +load balancer or by running more threads on a more powerful machine. Iron +avoids the bottlenecks encountered in highly concurrent code by avoiding shared +writes and locking in the core framework. + +Iron is 100% safe code: + +```sh +$ rg unsafe src | wc + 0 0 0 +``` + +## Philosophy + +Iron is meant to be as extensible and pluggable as possible; Iron's core is +concentrated and avoids unnecessary features by leaving them to middleware, +plugins, and modifiers. + +Middleware, Plugins, and Modifiers are the main ways to extend Iron with new +functionality. Most extensions that would be provided by middleware in other +web frameworks are instead addressed by the much simpler Modifier and Plugin +systems. + +Modifiers allow external code to manipulate Requests and Response in an ergonomic +fashion, allowing third-party extensions to get the same treatment as modifiers +defined in Iron itself. Plugins allow for lazily-evaluated, automatically cached +extensions to Requests and Responses, perfect for parsing, accessing, and +otherwise lazily manipulating an http connection. + +Middleware are only used when it is necessary to modify the control flow of a +Request flow, hijack the entire handling of a Request, check an incoming +Request, or to do final post-processing. This covers areas such as routing, +mounting, static asset serving, final template rendering, authentication, and +logging. + +Iron comes with only basic modifiers for setting the status, body, and various +headers, and the infrastructure for creating modifiers, plugins, and +middleware. No plugins or middleware are bundled with Iron. + +## Performance + +Iron averages [72,000+ requests per second for hello world](https://github.com/iron/iron/wiki/How-to-Benchmark-hello.rs-Example) +and is mostly IO-bound, spending over 70% of its time in the kernel send-ing or +recv-ing data.\* + +\* *Numbers from profiling on my OS X machine, your mileage may vary.* + +## Core Extensions + +Iron aims to fill a void in the Rust web stack - a high level framework that is +*extensible* and makes organizing complex server code easy. + +Extensions are painless to build. Some important ones are: + +Middleware: +- [Routing](https://github.com/iron/router) +- [Mounting](https://github.com/iron/mount) +- [Static File Serving](https://github.com/iron/staticfile) +- [Logging](https://github.com/iron/logger) + +Plugins: +- [JSON Body Parsing](https://github.com/iron/body-parser) +- [URL Encoded Data Parsing](https://github.com/iron/urlencoded) +- [All-In-One (JSON, URL, & Form Data) Parameter Parsing](https://github.com/iron/params) + +Both: +- [Shared Memory (also used for Plugin configuration)](https://github.com/iron/persistent) +- [Sessions](https://github.com/iron/iron-sessionstorage) + +This allows for extremely flexible and powerful setups and allows nearly all +of Iron's features to be swappable - you can even change the middleware +resolution algorithm by swapping in your own `Chain`. + +\* Due to the rapidly evolving state of the Rust ecosystem, not everything +builds all the time. Please be patient and file issues for breaking builds, +we're doing our best. + +## Underlying HTTP Implementation + +Iron is based on and uses [`hyper`](https://github.com/hyperium/hyper) as its +HTTP implementation, and lifts several types from it, including its header +representation, status, and other core HTTP types. It is usually unnecessary to +use `hyper` directly when using Iron, since Iron provides a facade over +`hyper`'s core facilities, but it is sometimes necessary to depend on it as +well. + + + +## Installation + +If you're using `Cargo`, just add Iron to your `Cargo.toml`: + +```toml +[dependencies.iron] +version = "*" +``` + +## [Documentation](http://ironframework.io/doc/iron) + +The documentation is hosted [online](http://ironframework.io/doc/iron) and +auto-updated with each successful release. You can also use `cargo doc` to +build a local copy. + +## [Examples](/examples) + +Check out the [examples](/examples) directory! + +You can run an individual example using `cargo run --example example-name`. +Note that for benchmarking you should make sure to use the `--release` flag, +which will cause cargo to compile the entire toolchain with optimizations. +Without `--release` you will get truly sad numbers. + +## Getting Help + +Feel free to ask questions as github issues in this or other related repos. + +The best place to get immediate help is on IRC, on any of these channels on the +mozilla network: + +- `#rust-webdev` +- `#iron` +- `#rust` + +One of the maintainers or contributors is usually around and can probably help. +We encourage you to stop by and say hi and tell us what you're using Iron for, +even if you don't have any questions. It's invaluable to hear feedback from users +and always nice to hear if someone is using the framework we've worked on. + +## Maintainers + +Jonathan Reem ([reem](https://github.com/reem)) is the core maintainer and +author of Iron. + +Commit Distribution (as of `8e55759`): + +``` +Jonathan Reem (415) +Zach Pomerantz (123) +Michael Sproul (9) +Patrick Tran (5) +Corey Richardson (4) +Bryce Fisher-Fleig (3) +Barosl Lee (2) +Christoph Burgdorf (2) +da4c30ff (2) +arathunku (1) +Cengiz Can (1) +Darayus (1) +Eduardo Bautista (1) +Mehdi Avdi (1) +Michael Sierks (1) +Nerijus Arlauskas (1) +SuprDewd (1) +``` + +## License + +MIT + diff --git a/vendor/iron-0.6.1/circle.yml b/vendor/iron-0.6.1/circle.yml new file mode 100644 index 0000000..c277e06 --- /dev/null +++ b/vendor/iron-0.6.1/circle.yml @@ -0,0 +1,3 @@ +dependencies: + pre: + - rm ~/.gitconfig diff --git a/vendor/iron-0.6.1/examples/404.rs b/vendor/iron-0.6.1/examples/404.rs new file mode 100644 index 0000000..3e8252b --- /dev/null +++ b/vendor/iron-0.6.1/examples/404.rs @@ -0,0 +1,11 @@ +extern crate iron; + +use iron::prelude::*; +use iron::status; + +fn main() { + Iron::new(|_: &mut Request| { + Ok(Response::with(status::NotFound)) + }).http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/around.rs b/vendor/iron-0.6.1/examples/around.rs new file mode 100644 index 0000000..c6c297e --- /dev/null +++ b/vendor/iron-0.6.1/examples/around.rs @@ -0,0 +1,67 @@ +extern crate iron; +extern crate time; + +use iron::prelude::*; +use iron::{Handler, AroundMiddleware}; +use iron::status; + +enum LoggerMode { + Silent, + Tiny, + Large +} + +struct Logger { + mode: LoggerMode +} + +struct LoggerHandler { logger: Logger, handler: H } + +impl Logger { + fn new(mode: LoggerMode) -> Logger { + Logger { mode: mode } + } + + fn log(&self, req: &Request, res: Result<&Response, &IronError>, time: u64) { + match self.mode { + LoggerMode::Silent => {}, + LoggerMode::Tiny => println!("Req: {:?}\nRes: {:?}\nTook: {}", req, res, time), + LoggerMode::Large => println!("Request: {:?}\nResponse: {:?}\nResponse-Time: {}", req, res, time) + } + } +} + +impl Handler for LoggerHandler { + fn handle(&self, req: &mut Request) -> IronResult { + let entry = ::time::precise_time_ns(); + let res = self.handler.handle(req); + self.logger.log(req, res.as_ref(), ::time::precise_time_ns() - entry); + res + } +} + +impl AroundMiddleware for Logger { + fn around(self, handler: Box) -> Box { + Box::new(LoggerHandler { + logger: self, + handler: handler + }) as Box + } +} + +fn hello_world(_: &mut Request) -> IronResult { + Ok(Response::with((status::Ok, "Hello World!"))) +} + +fn main() { + let tiny = Iron::new(Logger::new(LoggerMode::Tiny).around(Box::new(hello_world))); + let silent = Iron::new(Logger::new(LoggerMode::Silent).around(Box::new(hello_world))); + let large = Iron::new(Logger::new(LoggerMode::Large).around(Box::new(hello_world))); + + let _tiny_listening = tiny.http("localhost:2000").unwrap(); + let _silent_listening = silent.http("localhost:3000").unwrap(); + let _large_listening = large.http("localhost:4000").unwrap(); + + println!("Servers listening on 2000, 3000, and 4000"); +} + diff --git a/vendor/iron-0.6.1/examples/content_type.rs b/vendor/iron-0.6.1/examples/content_type.rs new file mode 100644 index 0000000..93f4cce --- /dev/null +++ b/vendor/iron-0.6.1/examples/content_type.rs @@ -0,0 +1,46 @@ +#[macro_use] +extern crate mime; +extern crate iron; + +use std::env; + +use iron::prelude::*; +use iron::headers::ContentType; +use iron::status; + +// All these variants do the same thing, with more or less options for customization. + +fn variant1(_: &mut Request) -> IronResult { + Ok(Response::with((ContentType::json().0, status::Ok, "{}"))) +} + +fn variant2(_: &mut Request) -> IronResult { + use iron::mime; + let content_type = "application/json".parse::().unwrap(); + Ok(Response::with((content_type, status::Ok, "{}"))) +} + +fn variant3(_: &mut Request) -> IronResult { + let content_type = mime!(Application/Json); + Ok(Response::with((content_type, status::Ok, "{}"))) +} + +fn variant4(_: &mut Request) -> IronResult { + use iron::mime; + let content_type = mime::Mime(iron::mime::TopLevel::Application, iron::mime::SubLevel::Json, vec![]); + Ok(Response::with((content_type, status::Ok, "{}"))) +} + +fn main() { + let args: Vec = env::args().collect(); + let variant_index = if args.len() > 1 { args[1].parse().unwrap() } else { 1 }; + let handler = match variant_index { + 1 => variant1, + 2 => variant2, + 3 => variant3, + 4 => variant4, + _ => panic!("No such variant"), + }; + println!("Using variant{}", variant_index); + Iron::new(handler).http("localhost:3000").unwrap(); +} diff --git a/vendor/iron-0.6.1/examples/echo.rs b/vendor/iron-0.6.1/examples/echo.rs new file mode 100644 index 0000000..e7a0e89 --- /dev/null +++ b/vendor/iron-0.6.1/examples/echo.rs @@ -0,0 +1,24 @@ +// An example that echoes the body of the request back as the response. +// +// Shows how to read the request body with error handling and how to return a +// response. See `helper_macros` example for a different way to handle errors. + +extern crate iron; + +use std::io::Read; + +use iron::prelude::*; +use iron::status; + +fn echo(request: &mut Request) -> IronResult { + let mut body = Vec::new(); + request + .body + .read_to_end(&mut body) + .map_err(|e| IronError::new(e, (status::InternalServerError, "Error reading request")))?; + Ok(Response::with((status::Ok, body))) +} + +fn main() { + Iron::new(echo).http("localhost:3000").unwrap(); +} diff --git a/vendor/iron-0.6.1/examples/error.rs b/vendor/iron-0.6.1/examples/error.rs new file mode 100644 index 0000000..60667b3 --- /dev/null +++ b/vendor/iron-0.6.1/examples/error.rs @@ -0,0 +1,53 @@ +extern crate iron; +extern crate time; + +use iron::prelude::*; +use iron::{Handler, BeforeMiddleware}; +use iron::status; + +use std::error::Error; +use std::fmt::{self, Debug}; + +struct ErrorHandler; +struct ErrorProducer; + +#[derive(Debug)] +struct StringError(String); + +impl fmt::Display for StringError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl Error for StringError { + fn description(&self) -> &str { &*self.0 } +} + +impl Handler for ErrorHandler { + fn handle(&self, _: &mut Request) -> IronResult { + // This is never called! + // + // If a BeforeMiddleware returns an error through Err(...), + // and it is not handled by a subsequent BeforeMiddleware in + // the chain, the main handler is not invoked. + Ok(Response::new()) + } +} + +impl BeforeMiddleware for ErrorProducer { + fn before(&self, _: &mut Request) -> IronResult<()> { + Err(IronError::new(StringError("Error".to_string()), status::BadRequest)) + } +} + +fn main() { + // Handler is attached here. + let mut chain = Chain::new(ErrorHandler); + + // Link our error maker. + chain.link_before(ErrorProducer); + + Iron::new(chain).http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/error_recovery.rs b/vendor/iron-0.6.1/examples/error_recovery.rs new file mode 100644 index 0000000..4e41be0 --- /dev/null +++ b/vendor/iron-0.6.1/examples/error_recovery.rs @@ -0,0 +1,99 @@ +// This example illustrates the error flow of a Request in the middleware Chain. +// Here is the chain used and the path of the request through the middleware pieces: +// +// Normal Flow : __[ErrorProducer::before]__ [ErrorRecover::before] __[handle::HelloWorldHandler]__[ErrorProducer::after]__ [ErrorRecover::after] __ ... +// Error Flow : [ErrorProducer::catch ] |__[ErrorRecover::catch ]__| [ErrorProducer::catch] |__[ErrorRecover::catch]__| +// +// --------------- BEFORE MIDDLEWARE ----------------- || --------- HANDLER -------- || ---------------- AFTER MIDDLEWARE -------------- + +extern crate iron; + +use iron::prelude::*; +use iron::status; +use iron::{Handler, BeforeMiddleware, AfterMiddleware}; + +use std::error::Error; +use std::fmt::{self, Debug}; + +struct HelloWorldHandler; +struct ErrorProducer; +struct ErrorRecover; + +#[derive(Debug)] +struct StringError(String); + +impl fmt::Display for StringError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl Error for StringError { + fn description(&self) -> &str { &*self.0 } +} + +impl Handler for HelloWorldHandler { + fn handle(&self, _: &mut Request) -> IronResult { + // This will be called since we are in the normal flow before reaching the Handler. + // However, the AfterMiddleware chain will override the Response. + println!("The HelloWorldHandler has been called !"); + Ok(Response::with((status::Ok, "Hello world !"))) + } +} + +impl BeforeMiddleware for ErrorProducer { + fn before(&self, _: &mut Request) -> IronResult<()> { + // The error produced here switches to the error flow. + // The catch method of following middleware pieces will be called. + // The Handler will be skipped unless the error is handled by another middleware piece. + // IronError::error tells the next middleware what went wrong. + // IronError::response is the Response that will be sent back to the client if this error is not handled. + // Here status::BadRequest acts as modifier, thus we can put more there than just a status. + Err(IronError::new(StringError("Error in ErrorProducer BeforeMiddleware".to_string()), status::BadRequest)) + } +} + +impl AfterMiddleware for ErrorProducer { + fn after(&self, _: &mut Request, _: Response) -> IronResult { + // The behavior here is the same as in ErrorProducer::before. + // The previous response (from the Handler) is discarded and replaced with a new response (created from the modifier). + Err(IronError::new(StringError("Error in ErrorProducer AfterMiddleware".to_string()), (status::BadRequest, "Response created in ErrorProducer"))) + } +} + +impl BeforeMiddleware for ErrorRecover { + fn catch(&self, _: &mut Request, err: IronError) -> IronResult<()> { + // We can use the IronError from previous middleware to decide what to do. + // Returning Ok() from a catch method resumes the normal flow and + // passes the Request forward to the next middleware piece in the chain (here the HelloWorldHandler). + println!("{} caught in ErrorRecover BeforeMiddleware.", err.error); + match err.response.status { + Some(status::BadRequest) => Ok(()), + _ => Err(err) + } + } +} + +impl AfterMiddleware for ErrorRecover { + fn catch(&self, _: &mut Request, err: IronError) -> IronResult { + // Just like in the BeforeMiddleware, we can return Ok(Response) here to return to the normal flow. + // In this case, ErrorRecover is the last middleware in the chain + // and the Response created in the ErrorProducer is modified and sent back to the client. + println!("{} caught in ErrorRecover AfterMiddleware.", err.error); + match err.response.status { + Some(status::BadRequest) => Ok(err.response.set(status::Ok)), + _ => Err(err) + } + } +} + +fn main() { + let mut chain = Chain::new(HelloWorldHandler); + chain.link_before(ErrorProducer); + chain.link_before(ErrorRecover); + + chain.link_after(ErrorProducer); + chain.link_after(ErrorRecover); + + Iron::new(chain).http("localhost:3000").unwrap(); +} diff --git a/vendor/iron-0.6.1/examples/get_set_headers.rs b/vendor/iron-0.6.1/examples/get_set_headers.rs new file mode 100644 index 0000000..588d212 --- /dev/null +++ b/vendor/iron-0.6.1/examples/get_set_headers.rs @@ -0,0 +1,41 @@ +extern crate iron; + +use iron::{Iron, Request, Response, IronResult, AfterMiddleware, Chain}; + +struct DefaultContentType; +impl AfterMiddleware for DefaultContentType { + // This is run for every requests, AFTER all handlers have been executed + fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { + if resp.headers.get::() == None { + // Set a standard header + resp.headers.set(iron::headers::ContentType::plaintext()); + } + Ok(resp) + } +} + + + +fn info(req: &mut Request) -> IronResult { + // Get a header using a standard iron::headers + let ua = match req.headers.get::() { + Some(ua_header) => format!("User Agent: {}\n", ua_header), + None => "No User Agent.\n".to_string(), + }; + // Get a non-standard header using the raw header + let x_forwarded_for = match req.headers.get_raw("X-Forwarded-For") { + Some(proxies) => format!("Proxies: {}\n", std::str::from_utf8(&proxies[0]).unwrap()), + None => "No proxy.\n".to_string(), + }; + let body = format!("{}{}\n", ua, x_forwarded_for); + + Ok(Response::with((iron::status::Ok, body))) +} + +fn main() { + let mut chain = Chain::new(info); + chain.link_after(DefaultContentType); + Iron::new(chain) + .http(format!("localhost:{}", 3000)) + .unwrap(); +} diff --git a/vendor/iron-0.6.1/examples/hello.rs b/vendor/iron-0.6.1/examples/hello.rs new file mode 100644 index 0000000..b179a1a --- /dev/null +++ b/vendor/iron-0.6.1/examples/hello.rs @@ -0,0 +1,11 @@ +extern crate iron; + +use iron::prelude::*; +use iron::status; + +fn main() { + Iron::new(|_: &mut Request| { + Ok(Response::with((status::Ok, "Hello world!"))) + }).http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/hello_custom_config.rs b/vendor/iron-0.6.1/examples/hello_custom_config.rs new file mode 100644 index 0000000..0760d9e --- /dev/null +++ b/vendor/iron-0.6.1/examples/hello_custom_config.rs @@ -0,0 +1,21 @@ +extern crate iron; + +use std::time::Duration; + +use iron::prelude::*; +use iron::status; +use iron::Timeouts; + +fn main() { + let mut iron = Iron::new(|_: &mut Request| { + Ok(Response::with((status::Ok, "Hello world!"))) + }); + iron.threads = 8; + iron.timeouts = Timeouts { + keep_alive: Some(Duration::from_secs(10)), + read: Some(Duration::from_secs(10)), + write: Some(Duration::from_secs(10)) + }; + iron.http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/helper_macros.rs b/vendor/iron-0.6.1/examples/helper_macros.rs new file mode 100644 index 0000000..7bf4fa3 --- /dev/null +++ b/vendor/iron-0.6.1/examples/helper_macros.rs @@ -0,0 +1,31 @@ +//! A simple demonstration how iron's helper macros make e.g. IO-intensive code easier to write. +#[macro_use] extern crate iron; + +use std::io; +use std::fs; + +use iron::prelude::*; +use iron::status; +use iron::method; + +fn main() { + Iron::new(|req: &mut Request| { + Ok(match req.method { + method::Get => { + // It's not a server error if the file doesn't exist yet. Therefore we use + // `iexpect`, to return Ok(...) instead of Err(...) if the file doesn't exist. + let f = iexpect!(fs::File::open("foo.txt").ok(), (status::Ok, "")); + Response::with((status::Ok, f)) + }, + method::Put => { + // If creating the file fails, something is messed up on our side. We probably want + // to log the error, so we use `itry` instead of `iexpect`. + let mut f = itry!(fs::File::create("foo.txt")); + itry!(io::copy(&mut req.body, &mut f)); + Response::with(status::Created) + }, + _ => Response::with(status::BadRequest) + }) + }).http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/https.rs b/vendor/iron-0.6.1/examples/https.rs new file mode 100644 index 0000000..8868cce --- /dev/null +++ b/vendor/iron-0.6.1/examples/https.rs @@ -0,0 +1,41 @@ +// This requires running with: +// +// ```bash +// cargo run --example https --features native-tls-example +// ``` +// +// Generate an identity like so: +// +// ```bash +// openssl req -x509 -newkey rsa:4096 -nodes -keyout localhost.key -out localhost.crt -days 3650 +// openssl pkcs12 -export -out identity.p12 -inkey localhost.key -in localhost.crt -password pass:mypass +// +// ``` + +extern crate iron; +#[cfg(feature = "native-tls-example")] +extern crate hyper_native_tls; + +#[cfg(feature = "native-tls-example")] +fn main() { + // Avoid unused errors due to conditional compilation ('native-tls-example' feature is not default) + use hyper_native_tls::NativeTlsServer; + use iron::{Iron, Request, Response}; + use iron::status; + use std::result::Result; + + let ssl = NativeTlsServer::new("identity.p12", "mypass").unwrap(); + + match Iron::new(|_: &mut Request| { + Ok(Response::with((status::Ok, "Hello world!"))) + }).https("127.0.0.1:3000", ssl) { + Result::Ok(listening) => println!("{:?}", listening), + Result::Err(err) => panic!("{:?}", err), + } + // curl -vvvv https://127.0.0.1:3000/ -k +} + +#[cfg(not(feature = "native-tls-example"))] +fn main() { + // We need to do this to make sure `cargo test` passes. +} diff --git a/vendor/iron-0.6.1/examples/redirect.rs b/vendor/iron-0.6.1/examples/redirect.rs new file mode 100644 index 0000000..c60b02d --- /dev/null +++ b/vendor/iron-0.6.1/examples/redirect.rs @@ -0,0 +1,14 @@ +extern crate iron; + +use iron::prelude::*; +use iron::modifiers::Redirect; +use iron::{Url, status}; + +fn main() { + let url = Url::parse("http://rust-lang.org").unwrap(); + + Iron::new(move |_: &mut Request | { + Ok(Response::with((status::Found, Redirect(url.clone())))) + }).http("localhost:3000").unwrap(); +} + diff --git a/vendor/iron-0.6.1/examples/simple_routing.rs b/vendor/iron-0.6.1/examples/simple_routing.rs new file mode 100644 index 0000000..2404fdd --- /dev/null +++ b/vendor/iron-0.6.1/examples/simple_routing.rs @@ -0,0 +1,52 @@ +// This example shows how to create a basic router that maps url to different handlers. +// If you're looking for real routing middleware, check https://github.com/iron/router + +extern crate iron; + +use std::collections::HashMap; + +use iron::prelude::*; +use iron::Handler; +use iron::status; + +struct Router { + // Routes here are simply matched with the url path. + routes: HashMap> +} + +impl Router { + fn new() -> Self { + Router { routes: HashMap::new() } + } + + fn add_route(&mut self, path: String, handler: H) where H: Handler { + self.routes.insert(path, Box::new(handler)); + } +} + +impl Handler for Router { + fn handle(&self, req: &mut Request) -> IronResult { + match self.routes.get(&req.url.path().join("/")) { + Some(handler) => handler.handle(req), + None => Ok(Response::with(status::NotFound)) + } + } +} + +fn main() { + let mut router = Router::new(); + + router.add_route("hello".to_string(), |_: &mut Request| { + Ok(Response::with((status::Ok, "Hello world !"))) + }); + + router.add_route("hello/again".to_string(), |_: &mut Request| { + Ok(Response::with((status::Ok, "Hello again !"))) + }); + + router.add_route("error".to_string(), |_: &mut Request| { + Ok(Response::with(status::BadRequest)) + }); + + Iron::new(router).http("localhost:3000").unwrap(); +} diff --git a/vendor/iron-0.6.1/examples/time.rs b/vendor/iron-0.6.1/examples/time.rs new file mode 100644 index 0000000..5a9c322 --- /dev/null +++ b/vendor/iron-0.6.1/examples/time.rs @@ -0,0 +1,36 @@ +extern crate iron; +extern crate time; + +use iron::prelude::*; +use iron::{BeforeMiddleware, AfterMiddleware, typemap}; +use time::precise_time_ns; + +struct ResponseTime; + +impl typemap::Key for ResponseTime { type Value = u64; } + +impl BeforeMiddleware for ResponseTime { + fn before(&self, req: &mut Request) -> IronResult<()> { + req.extensions.insert::(precise_time_ns()); + Ok(()) + } +} + +impl AfterMiddleware for ResponseTime { + fn after(&self, req: &mut Request, res: Response) -> IronResult { + let delta = precise_time_ns() - *req.extensions.get::().unwrap(); + println!("Request took: {} ms", (delta as f64) / 1000000.0); + Ok(res) + } +} + +fn hello_world(_: &mut Request) -> IronResult { + Ok(Response::with((iron::status::Ok, "Hello World"))) +} + +fn main() { + let mut chain = Chain::new(hello_world); + chain.link_before(ResponseTime); + chain.link_after(ResponseTime); + Iron::new(chain).http("localhost:3000").unwrap(); +} diff --git a/vendor/iron-0.6.1/src/error.rs b/vendor/iron-0.6.1/src/error.rs new file mode 100644 index 0000000..6f69469 --- /dev/null +++ b/vendor/iron-0.6.1/src/error.rs @@ -0,0 +1,63 @@ +use std::fmt; + +use modifier::Modifier; +use {Response}; + +pub use std::error::Error; +pub use hyper::Error as HttpError; +pub use hyper::error::Result as HttpResult; + +/// The type of Errors inside and when using Iron. +/// +/// `IronError` informs its receivers of two things: +/// +/// * What went wrong +/// * What to do about it +/// +/// The `error` field is responsible for informing receivers of which +/// error occured, and receivers may also modify the error field by layering +/// it (building up a cause chain). +/// +/// The `response` field provides a tangible action to be taken if this error +/// is not otherwise handled. +#[derive(Debug)] +pub struct IronError { + /// The underlying error + /// + /// This can be layered and will be logged at the end of an errored + /// request. + pub error: Box, + + /// What to do about this error. + /// + /// This Response will be used when the error-handling flow finishes. + pub response: Response +} + +impl IronError { + /// Create a new `IronError` from an error and a modifier. + pub fn new>(e: E, m: M) -> IronError { + IronError { + error: Box::new(e), + response: Response::with(m) + } + } +} + +impl fmt::Display for IronError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fmt::Display::fmt(&*self.error, f) + } +} + +impl Error for IronError { + fn description(&self) -> &str { + self.error.description() + } + + #[allow(deprecated)] + fn cause(&self) -> Option<&Error> { + self.error.cause() + } +} + diff --git a/vendor/iron-0.6.1/src/iron.rs b/vendor/iron-0.6.1/src/iron.rs new file mode 100644 index 0000000..c062b23 --- /dev/null +++ b/vendor/iron-0.6.1/src/iron.rs @@ -0,0 +1,199 @@ +//! Exposes the `Iron` type, the main entrance point of the +//! `Iron` library. + +use std::net::{ToSocketAddrs, SocketAddr}; +use std::time::Duration; + +pub use hyper::server::Listening; +use hyper::server::Server; +use hyper::net::{Fresh, SslServer, HttpListener, HttpsListener, NetworkListener}; + +use request::HttpRequest; +use response::HttpResponse; + +use error::HttpResult; + +use {Request, Handler}; +use status; + +/// The primary entrance point to `Iron`, a `struct` to instantiate a new server. +/// +/// `Iron` contains the `Handler` which takes a `Request` and produces a +/// `Response`. +pub struct Iron { + /// Iron contains a `Handler`, which it uses to create responses for client + /// requests. + pub handler: H, + + /// Server timeouts. + pub timeouts: Timeouts, + + /// The number of request handling threads. + /// + /// Defaults to `8 * num_cpus`. + pub threads: usize, +} + +/// A settings struct containing a set of timeouts which can be applied to a server. +#[derive(Debug, PartialEq, Clone, Copy)] +pub struct Timeouts { + /// Controls the timeout for keep alive connections. + /// + /// The default is `Some(Duration::from_secs(5))`. + /// + /// NOTE: Setting this to None will have the effect of turning off keep alive. + pub keep_alive: Option, + + /// Controls the timeout for reads on existing connections. + /// + /// The default is `Some(Duration::from_secs(30))` + pub read: Option, + + /// Controls the timeout for writes on existing connections. + /// + /// The default is `Some(Duration::from_secs(1))` + pub write: Option +} + +impl Default for Timeouts { + fn default() -> Self { + Timeouts { + keep_alive: Some(Duration::from_secs(5)), + read: Some(Duration::from_secs(30)), + write: Some(Duration::from_secs(1)) + } + } +} + +#[derive(Clone)] +enum _Protocol { + Http, + Https, +} + +/// Protocol used to serve content. +#[derive(Clone)] +pub struct Protocol(_Protocol); + +impl Protocol { + /// Plaintext HTTP/1 + pub fn http() -> Protocol { + Protocol(_Protocol::Http) + } + + /// HTTP/1 over SSL/TLS + pub fn https() -> Protocol { + Protocol(_Protocol::Https) + } + + /// Returns the name used for this protocol in a URI's scheme part. + pub fn name(&self) -> &str { + match self.0 { + _Protocol::Http => "http", + _Protocol::Https => "https", + } + } +} + +impl Iron { + /// Instantiate a new instance of `Iron`. + /// + /// This will create a new `Iron`, the base unit of the server, using the + /// passed in `Handler`. + pub fn new(handler: H) -> Iron { + Iron { + handler: handler, + timeouts: Timeouts::default(), + threads: 8 * ::num_cpus::get(), + } + } + + /// Kick off the server process using the HTTP protocol. + /// + /// Call this once to begin listening for requests on the server. + /// This consumes the Iron instance, but does the listening on + /// another task, so is not blocking. + /// + /// The thread returns a guard that will automatically join with the parent + /// once it is dropped, blocking until this happens. + pub fn http(self, addr: A) -> HttpResult + where A: ToSocketAddrs + { + HttpListener::new(addr).and_then(|l| self.listen(l, Protocol::http())) + } + + /// Kick off the server process using the HTTPS protocol. + /// + /// Call this once to begin listening for requests on the server. + /// This consumes the Iron instance, but does the listening on + /// another task, so is not blocking. + /// + /// The thread returns a guard that will automatically join with the parent + /// once it is dropped, blocking until this happens. + pub fn https(self, addr: A, ssl: S) -> HttpResult + where A: ToSocketAddrs, + S: 'static + SslServer + Send + Clone + { + HttpsListener::new(addr, ssl).and_then(|l| self.listen(l, Protocol::http())) + } + + /// Kick off a server process on an arbitrary `Listener`. + /// + /// Most use cases may call `http` and `https` methods instead of this. + pub fn listen(self, mut listener: L, protocol: Protocol) -> HttpResult + where L: 'static + NetworkListener + Send + { + let handler = RawHandler { + handler: self.handler, + addr: try!(listener.local_addr()), + protocol: protocol, + }; + + let mut server = Server::new(listener); + server.keep_alive(self.timeouts.keep_alive); + server.set_read_timeout(self.timeouts.read); + server.set_write_timeout(self.timeouts.write); + server.handle_threads(handler, self.threads) + } +} + +struct RawHandler { + handler: H, + addr: SocketAddr, + protocol: Protocol, +} + +impl ::hyper::server::Handler for RawHandler { + fn handle(&self, http_req: HttpRequest, mut http_res: HttpResponse) { + // Set some defaults in case request handler panics. + // This should not be necessary anymore once stdlib's catch_panic becomes stable. + *http_res.status_mut() = status::InternalServerError; + + // Create `Request` wrapper. + match Request::from_http(http_req, self.addr, &self.protocol) { + Ok(mut req) => { + // Dispatch the request, write the response back to http_res + self.handler.handle(&mut req).unwrap_or_else(|e| { + eprintln!("[iron] Error handling:\n{:?}\nError was: {:?}", req, e.error); + e.response + }).write_back(http_res) + }, + Err(e) => { + eprintln!("[iron] Error creating request:\n {}", e); + bad_request(http_res) + } + } + } +} + +fn bad_request(mut http_res: HttpResponse) { + *http_res.status_mut() = status::BadRequest; + + // Consume and flush the response. + // We would like this to work, but can't do anything if it doesn't. + if let Ok(res) = http_res.start() + { + let _ = res.end(); + } +} + diff --git a/vendor/iron-0.6.1/src/lib.rs b/vendor/iron-0.6.1/src/lib.rs new file mode 100644 index 0000000..5a460d0 --- /dev/null +++ b/vendor/iron-0.6.1/src/lib.rs @@ -0,0 +1,157 @@ +#![doc(html_logo_url = "https://avatars0.githubusercontent.com/u/7853871?s=128", html_favicon_url = "https://avatars0.githubusercontent.com/u/7853871?s=256", html_root_url = "http://ironframework.io/core/iron")] +#![cfg_attr(test, deny(warnings))] +#![allow(bare_trait_objects)] +#![deny(missing_docs)] +#![allow(deprecated)] +#![allow(anonymous_parameters)] + +//! The main crate for Iron. +//! +//! ## Overview +//! +//! Iron is a high level web framework built in and for Rust, built on +//! [hyper](https://github.com/hyperium/hyper). Iron is designed to take advantage +//! of Rust's greatest features - its excellent type system and principled +//! approach to ownership in both single threaded and multi threaded contexts. +//! +//! Iron is highly concurrent and can scale horizontally on more machines behind a +//! load balancer or by running more threads on a more powerful machine. Iron +//! avoids the bottlenecks encountered in highly concurrent code by avoiding shared +//! writes and locking in the core framework. +//! +//! ## Hello World +//! +//! ```no_run +//! extern crate iron; +//! +//! use iron::prelude::*; +//! use iron::status; +//! +//! fn main() { +//! Iron::new(|_: &mut Request| { +//! Ok(Response::with((status::Ok, "Hello World!"))) +//! }).http("localhost:3000").unwrap(); +//! } +//! ``` +//! +//! ## Design Philosophy +//! +//! Iron is meant to be as extensible and pluggable as possible; Iron's core is +//! concentrated and avoids unnecessary features by leaving them to middleware, +//! plugins, and modifiers. +//! +//! Middleware, Plugins, and Modifiers are the main ways to extend Iron with new +//! functionality. Most extensions that would be provided by middleware in other +//! web frameworks are instead addressed by the much simpler Modifier and Plugin +//! systems. +//! +//! Modifiers allow external code to manipulate Requests and Response in an ergonomic +//! fashion, allowing third-party extensions to get the same treatment as modifiers +//! defined in Iron itself. Plugins allow for lazily-evaluated, automatically cached +//! extensions to Requests and Responses, perfect for parsing, accessing, and +//! otherwise lazily manipulating an http connection. +//! +//! Middleware are only used when it is necessary to modify the control flow of a +//! Request flow, hijack the entire handling of a Request, check an incoming +//! Request, or to do final post-processing. This covers areas such as routing, +//! mounting, static asset serving, final template rendering, authentication, and +//! logging. +//! +//! Iron comes with only basic modifiers for setting the status, body, and various +//! headers, and the infrastructure for creating modifiers, plugins, and +//! middleware. No plugins or middleware are bundled with Iron. +//! + +// Third party packages +extern crate hyper; +extern crate url as url_ext; +extern crate num_cpus; + +// Request + Response +pub use request::{Request, Url}; +pub use response::Response; + +// Middleware system +pub use middleware::{BeforeMiddleware, AfterMiddleware, AroundMiddleware, + Handler, Chain}; + +// Server +pub use iron::*; + +// Headers +pub use hyper::header as headers; +pub use hyper::header::Headers; + +// Expose modifiers. +pub use modifier::Set; + +// Errors +pub use error::Error; +pub use error::IronError; + +// Mime types +pub use hyper::mime; + +/// Iron's error type and associated utilities. +pub mod error; + +/// The Result alias used throughout Iron and in clients of Iron. +pub type IronResult = Result; + +/// A module meant to be glob imported when using Iron. +/// +/// For instance: +/// +/// ``` +/// use iron::prelude::*; +/// ``` +/// +/// This module contains several important traits that provide many +/// of the convenience methods in Iron, as well as `Request`, `Response` +/// `IronResult`, `IronError` and `Iron`. +pub mod prelude { + #[doc(no_inline)] + pub use {Set, Chain, Request, Response, + IronResult, IronError, Iron}; +} + +/// Re-exports from the Modifier crate. +pub mod modifier { + extern crate modifier as modfier; + pub use self::modfier::*; +} + +/// Re-exports from the url crate. +pub mod url { + pub use url_ext::*; +} + +/// Status Codes +pub mod status { + pub use hyper::status::StatusCode as Status; + pub use hyper::status::StatusCode::*; + pub use hyper::status::StatusClass; +} + +/// HTTP Methods +pub mod method { + pub use hyper::method::Method; + pub use hyper::method::Method::*; +} + +// Publicized to show the documentation +pub mod middleware; + +// Response utilities +pub mod response; + +// Request utilities +pub mod request; + +// Request and Response Modifiers +pub mod modifiers; + +// Helper macros for error handling +mod macros; + +mod iron; diff --git a/vendor/iron-0.6.1/src/macros.rs b/vendor/iron-0.6.1/src/macros.rs new file mode 100644 index 0000000..f2d48d0 --- /dev/null +++ b/vendor/iron-0.6.1/src/macros.rs @@ -0,0 +1,41 @@ +//! Helper macros. Note that these are relatively new and may change in a later version. +//! +//! The idea is to use `itry` for internal server operations which can't be recovered from, and +//! `iexpect` for validating user input. Note that this kind of usage is completely non-normative. +//! Feedback about actual usability and usage is appreciated. + +/// Like `try!()`, but wraps the error value in `IronError`. To be used in +/// request handlers. +/// +/// The second (optional) parameter is any [modifier](modifiers/index.html). +/// The default modifier is `status::InternalServerError`. +/// +/// +/// ```ignore +/// let f = itry!(fs::File::create("foo.txt"), status::BadRequest); +/// let f = itry!(fs::File::create("foo.txt"), (status::NotFound, "Not Found")); +/// let f = itry!(fs::File::create("foo.txt")); // HTTP 500 +/// ``` +/// +#[macro_export] +macro_rules! itry { + ($result:expr) => (itry!($result, $crate::status::InternalServerError)); + + ($result:expr, $modifier:expr) => (match $result { + ::std::result::Result::Ok(val) => val, + ::std::result::Result::Err(err) => return ::std::result::Result::Err( + $crate::IronError::new(err, $modifier)) + }) +} + +/// Unwrap the given `Option` or return a `Ok(Response::new())` with the given +/// modifier. The default modifier is `status::BadRequest`. +#[macro_export] +macro_rules! iexpect { + ($option:expr) => (iexpect!($option, $crate::status::BadRequest)); + ($option:expr, $modifier:expr) => (match $option { + ::std::option::Option::Some(x) => x, + ::std::option::Option::None => return ::std::result::Result::Ok( + $crate::response::Response::with($modifier)) + }) +} diff --git a/vendor/iron-0.6.1/src/middleware/mod.rs b/vendor/iron-0.6.1/src/middleware/mod.rs new file mode 100644 index 0000000..0c50089 --- /dev/null +++ b/vendor/iron-0.6.1/src/middleware/mod.rs @@ -0,0 +1,460 @@ +//! This module contains Iron's middleware and handler system, the fundamental +//! building blocks for handling HTTP requests and generating responses. +//! +//! # Handlers +//! +//! A `Handler` will produce a `Response` given a `Request`. Most handlers are +//! functions or closures that accept a `&mut Request` as an argument and return +//! an `IronResult` containing a `Response`. An `IronResult` is returned instead of +//! directly returning a `Response` in order to indicate a possibility of +//! failure (e.g. database timeout). +//! +//! Here's an example of a `Handler`: +//! +//! ```rust +//! use iron::prelude::*; +//! use iron::Handler; +//! use iron::status; +//! +//! fn hello_handler(req: &mut Request) -> IronResult { +//! Ok(Response::with((status::Ok, "Hello world!"))) +//! }; +//! ``` +//! +//! # Middleware +//! +//! In situations involving more complex logic, it may be desirable to transform +//! `Request`s passed to a `Handler` or altering `Response`s sent to the +//! clients. For example, an authorization step could only allow requests sent +//! by authorized users to be passed to a `Handler` and respond to all other +//! requests with a 403 status code. To faciliate such use cases, Iron's +//! middleware system allows `Handler`s to be extended by defining middleware, +//! which will perform transformations. +//! +//! There are three types of middleware: +//! +//! * A `BeforeMiddleware` alters a `Request`. It can be useful for handling +//! control flow (e.g. routing and authorization). +//! * An `AroundMiddleware` wraps a `Handler`, changing both the `Response` +//! passed to the `Handler` and the returned `Response`. +//! * An `AfterMiddleware` performs `Response` post-processing. It can be used +//! for editing headers or logging `Response`s, but it should _not_ be used for +//! changing the body of a `Response`. +//! +//! See the documentation for each middleware for more details. +//! +//! ## Defining the middleware pipeline +//! +//! A `Chain` is a `Handler` that wraps another `Handler`. It is used to attach +//! middleware to the wrapped `Handler` using a `link` method corresponding to +//! each type of middleware. A sample middleware pipeline is shown below: +//! +//! ```rust +//! use iron::prelude::*; +//! use iron::middleware::*; +//! +//! # use iron::status; +//! # fn hello_handler(req: &mut Request) -> IronResult { +//! # Ok(Response::with((status::Ok, "Hello world!"))) +//! # }; +//! +//! struct RequestLoggingMiddleware; +//! impl BeforeMiddleware for RequestLoggingMiddleware { +//! fn before(&self, req: &mut Request) -> IronResult<()> { +//! println!("{:?}", req); +//! Ok(()) +//! } +//! } +//! +//! let mut chain = Chain::new(hello_handler); +//! chain.link_before(RequestLoggingMiddleware {}); +//! // Since a Chain is a Handler, chain can be passed to Iron::new without any problems. +//! // Iron::new(chain).http("localhost:3000").unwrap(); +//! ``` +//! +//! # The Request Handling Flow +//! +//! A diagram modeling the entire middleware system process is shown below: +//! +//! ```plain +//! [b] = BeforeMiddleware +//! [a] = AfterMiddleware +//! [[h]] = AroundMiddleware +//! [h] = Handler +//! ``` +//! +//! With no errors, the flow looks like: +//! +//! ```plain +//! [b] -> [b] -> [b] -> [[[[h]]]] -> [a] -> [a] -> [a] -> [a] +//! ``` +//! +//! A request first travels through all `BeforeMiddleware`, then a `Response` is +//! generated by the `Handler`, which can be an arbitrary nesting of +//! `AroundMiddleware`, then all `AfterMiddleware` are called with both the +//! `Request` and `Response`. After all `AfterMiddleware` have been fired, the +//! response is written back to the client. +//! +//! Iron's error handling system is pragmatic and focuses on tracking two pieces +//! of information for error receivers (other middleware): +//! +//! * The cause of the error +//! * The result (what to do about) the error. +//! +//! The cause of the error is represented simply by the error itself, and the +//! result of the error, representing the action to take in response to the +//! error, is a complete Response, which will be sent at the end of the error +//! flow. +//! +//! When an error is thrown in Iron by any middleware or handler returning an +//! `Err` variant with an `IronError`, the flow of the `Request` switches to the +//! error flow, which proceeds to just call the `catch` method of middleware and +//! sidesteps the `Handler` entirely, since there is already a `Response` in the +//! error. +//! +//! A `Request` can exit the error flow by returning an Ok from any of the catch +//! methods. This resumes the flow at the middleware immediately following the +//! middleware which handled the error. It is impossible to "go back" to an +//! earlier middleware that was skipped. +//! +//! Generally speaking, returning a 5XX error code means that the error flow +//! should be entered by raising an explicit error. Dealing with 4XX errors is +//! trickier, since the server may not want to recognize an error that is +//! entirely the clients fault; handling of 4XX error codes is up to to each +//! application and middleware author. +//! +//! Middleware authors should be cognizant that their middleware may be skipped +//! during the error flow. Anything that *must* be done to each `Request` or +//! `Response` should be run during both the normal and error flow by +//! implementing the `catch` method to also do the necessary action. + +use std::sync::Arc; +use {Request, Response, IronResult, IronError}; + +/// `Handler`s are responsible for handling requests by creating Responses from Requests. +pub trait Handler: Send + Sync + 'static { + /// Produce a `Response` from a Request, with the possibility of error. + fn handle(&self, &mut Request) -> IronResult; +} + +/// `BeforeMiddleware` are fired before a `Handler` is called inside of a Chain. +/// +/// `BeforeMiddleware` are responsible for doing request pre-processing that requires +/// the ability to change control-flow, such as authorization middleware, or for editing +/// the request by modifying the headers. +/// +/// `BeforeMiddleware` only have access to the Request, if you need to modify or read +/// a Response, you will need `AfterMiddleware`. Middleware which wishes to send an +/// early response that is not an error cannot be `BeforeMiddleware`, but should +/// instead be `AroundMiddleware`. +pub trait BeforeMiddleware: Send + Sync + 'static { + /// Do whatever work this middleware should do with a `Request` object. + fn before(&self, _: &mut Request) -> IronResult<()> { Ok(()) } + + /// Respond to an error thrown by a previous `BeforeMiddleware`. + /// + /// Returning a `Ok` will cause the request to resume the normal flow at the + /// next `BeforeMiddleware`, or if this was the last `BeforeMiddleware`, + /// at the `Handler`. + fn catch(&self, _: &mut Request, err: IronError) -> IronResult<()> { Err(err) } +} + +/// `AfterMiddleware` are fired after a `Handler` is called inside of a Chain. +/// +/// `AfterMiddleware` receive both a `Request` and a `Response` and are responsible for doing +/// any response post-processing. +/// +/// `AfterMiddleware` should *not* overwrite the contents of a Response. In +/// the common case, a complete response is generated by the Chain's `Handler` and +/// `AfterMiddleware` simply do post-processing of that Response, such as +/// adding headers or logging. +pub trait AfterMiddleware: Send + Sync + 'static { + /// Do whatever post-processing this middleware should do. + fn after(&self, _: &mut Request, res: Response) -> IronResult { + Ok(res) + } + + /// Respond to an error thrown by previous `AfterMiddleware`, the `Handler`, + /// or a `BeforeMiddleware`. + /// + /// Returning `Ok` will cause the request to resume the normal flow at the + /// next `AfterMiddleware`. + fn catch(&self, _: &mut Request, err: IronError) -> IronResult { + Err(err) + } +} + +/// `AroundMiddleware` are used to wrap and replace the `Handler` in a `Chain`. +/// +/// `AroundMiddleware` produce `Handler`s through their `around` method, which is +/// called once on insertion into a `Chain` or can be called manually outside of a +/// `Chain`. +pub trait AroundMiddleware { + /// Produce a `Handler` from this `AroundMiddleware` given another `Handler`. + /// + /// Usually this means wrapping the handler and editing the `Request` on the + /// way in and the `Response` on the way out. + /// + /// This is called only once, when an `AroundMiddleware` is added to a `Chain` + /// using `Chain::around`, it is passed the `Chain`'s current `Handler`. + fn around(self, handler: Box) -> Box; +} + +/// The middleware chain used in Iron. +/// +/// This is a canonical implementation of Iron's middleware system, +/// but Iron's infrastructure is flexible enough to allow alternate +/// systems. +pub struct Chain { + befores: Vec>, + afters: Vec>, + + // Internal invariant: this is always Some + handler: Option> +} + +impl Chain { + /// Construct a new ChainBuilder from a `Handler`. + pub fn new(handler: H) -> Chain { + Chain { + befores: vec![], + afters: vec![], + handler: Some(Box::new(handler) as Box) + } + } + + /// Link both a before and after middleware to the chain at once. + /// + /// Middleware that have a Before and After piece should have a constructor + /// which returns both as a tuple, so it can be passed directly to link. + pub fn link(&mut self, link: (B, A)) -> &mut Chain + where A: AfterMiddleware, B: BeforeMiddleware { + let (before, after) = link; + self.befores.push(Box::new(before) as Box); + self.afters.push(Box::new(after) as Box); + self + } + + /// Link a `BeforeMiddleware` to the `Chain`, after all previously linked + /// `BeforeMiddleware`. + pub fn link_before(&mut self, before: B) -> &mut Chain + where B: BeforeMiddleware { + self.befores.push(Box::new(before) as Box); + self + } + + /// Link a `AfterMiddleware` to the `Chain`, after all previously linked + /// `AfterMiddleware`. + pub fn link_after(&mut self, after: A) -> &mut Chain + where A: AfterMiddleware { + self.afters.push(Box::new(after) as Box); + self + } + + /// Apply an `AroundMiddleware` to the `Handler` in this `Chain`. + /// + /// Note: This function is being renamed `link_around()`, and will + /// eventually be removed. + pub fn around(&mut self, around: A) -> &mut Chain + where A: AroundMiddleware { + self.link_around(around) + } + + /// Apply an `AroundMiddleware` to the `Handler` in this `Chain`. + pub fn link_around(&mut self, around: A) -> &mut Chain + where A: AroundMiddleware { + let mut handler = self.handler.take().unwrap(); + handler = around.around(handler); + self.handler = Some(handler); + self + } +} + +impl Handler for Chain { + fn handle(&self, req: &mut Request) -> IronResult { + // Kick off at befores, which will continue into handler + // then afters. + self.continue_from_before(req, 0) + } +} + +impl Chain { + ///////////////// Implementation Helpers ///////////////// + + // Enter the error flow from a before middleware, starting + // at the passed index. + // + // If the index is out of bounds for the before middleware Vec, + // this instead behaves the same as fail_from_handler. + fn fail_from_before(&self, req: &mut Request, index: usize, + mut err: IronError) -> IronResult { + // If this was the last before, yield to next phase. + if index >= self.befores.len() { + return self.fail_from_handler(req, err) + } + + for (i, before) in self.befores[index..].iter().enumerate() { + err = match before.catch(req, err) { + Err(err) => err, + Ok(()) => return self.continue_from_before(req, index + i + 1) + }; + } + + // Next phase + self.fail_from_handler(req, err) + } + + // Enter the error flow from an errored handle, starting with the + // first AfterMiddleware. + fn fail_from_handler(&self, req: &mut Request, + err: IronError) -> IronResult { + // Yield to next phase, nothing to do here. + self.fail_from_after(req, 0, err) + } + + // Enter the error flow from an errored after middleware, starting + // with the passed index. + // + // If the index is out of bounds for the after middleware Vec, + // this instead just returns the passed error. + fn fail_from_after(&self, req: &mut Request, index: usize, + mut err: IronError) -> IronResult { + // If this was the last after, we're done. + if index == self.afters.len() { return Err(err) } + + for (i, after) in self.afters[index..].iter().enumerate() { + err = match after.catch(req, err) { + Err(err) => err, + Ok(res) => return self.continue_from_after(req, index + i + 1, res) + } + } + + // Done + Err(err) + } + + // Enter the normal flow in the before middleware, starting with the passed + // index. + fn continue_from_before(&self, req: &mut Request, + index: usize) -> IronResult { + // If this was the last beforemiddleware, start at the handler. + if index >= self.befores.len() { + return self.continue_from_handler(req) + } + + for (i, before) in self.befores[index..].iter().enumerate() { + match before.before(req) { + Ok(()) => {}, + Err(err) => return self.fail_from_before(req, index + i + 1, err) + } + } + + // Yield to next phase. + self.continue_from_handler(req) + } + + // Enter the normal flow at the handler. + fn continue_from_handler(&self, req: &mut Request) -> IronResult { + // unwrap is safe because it's always Some + match self.handler.as_ref().unwrap().handle(req) { + Ok(res) => self.continue_from_after(req, 0, res), + Err(err) => self.fail_from_handler(req, err) + } + } + + // Enter the normal flow in the after middleware, starting with the passed + // index. + fn continue_from_after(&self, req: &mut Request, index: usize, + mut res: Response) -> IronResult { + // If this was the last after middleware, we're done. + if index >= self.afters.len() { + return Ok(res); + } + + for (i, after) in self.afters[index..].iter().enumerate() { + res = match after.after(req, res) { + Ok(r) => r, + Err(err) => return self.fail_from_after(req, index + i + 1, err) + } + } + + // We made it with no error! + Ok(res) + } +} + +impl Handler for Box { + fn handle(&self, req: &mut Request) -> IronResult { + (**self).handle(req) + } +} + +impl Handler for &'static T { + fn handle(&self, req: &mut Request) -> IronResult { + (**self).handle(req) + } +} + +impl BeforeMiddleware for F +where F: Send + Sync + 'static + Fn(&mut Request) -> IronResult<()> { + fn before(&self, req: &mut Request) -> IronResult<()> { + (*self)(req) + } +} + +impl BeforeMiddleware for Box { + fn before(&self, req: &mut Request) -> IronResult<()> { + (**self).before(req) + } + + fn catch(&self, req: &mut Request, err: IronError) -> IronResult<()> { + (**self).catch(req, err) + } +} + +impl BeforeMiddleware for Arc where T: BeforeMiddleware { + fn before(&self, req: &mut Request) -> IronResult<()> { + (**self).before(req) + } + + fn catch(&self, req: &mut Request, err: IronError) -> IronResult<()> { + (**self).catch(req, err) + } +} + +impl AfterMiddleware for F +where F: Send + Sync + 'static + Fn(&mut Request, Response) -> IronResult { + fn after(&self, req: &mut Request, res: Response) -> IronResult { + (*self)(req, res) + } +} + +impl AfterMiddleware for Box { + fn after(&self, req: &mut Request, res: Response) -> IronResult { + (**self).after(req, res) + } + + fn catch(&self, req: &mut Request, err: IronError) -> IronResult { + (**self).catch(req, err) + } +} + +impl AfterMiddleware for Arc where T: AfterMiddleware { + fn after(&self, req: &mut Request, res: Response) -> IronResult { + (**self).after(req, res) + } + + fn catch(&self, req: &mut Request, err: IronError) -> IronResult { + (**self).catch(req, err) + } +} + +impl AroundMiddleware for F +where F: FnOnce(Box) -> Box { + fn around(self, handler: Box) -> Box { + self(handler) + } +} + +#[cfg(test)] +mod test; diff --git a/vendor/iron-0.6.1/src/middleware/test.rs b/vendor/iron-0.6.1/src/middleware/test.rs new file mode 100644 index 0000000..8922a9b --- /dev/null +++ b/vendor/iron-0.6.1/src/middleware/test.rs @@ -0,0 +1,249 @@ +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::Relaxed; +use std::sync::Arc; + +use self::Kind::{Fine, Prob}; + +use prelude::*; +use {AfterMiddleware, BeforeMiddleware, Handler}; + +#[test] fn test_chain_normal() { + test_chain( + (vec![Fine, Fine, Fine], Fine, vec![Fine, Fine, Fine]), + (vec![Fine, Fine, Fine], Fine, vec![Fine, Fine, Fine]) + ); +} + +#[test] fn test_chain_before_error() { + test_chain( + // Error in before + (vec![Prob, Prob, Prob], Fine, vec![Prob, Prob, Prob]), + (vec![Fine, Prob, Prob], Prob, vec![Prob, Prob, Prob]) + ); +} + +#[test] fn test_chain_handler_error() { + test_chain( + // Error in handler + (vec![Fine, Fine, Fine], Prob, vec![Prob, Prob, Prob]), + (vec![Fine, Fine, Fine], Fine, vec![Prob, Prob, Prob]) + ); +} + +#[test] fn test_chain_after_error() { + test_chain( + // Error in after + (vec![Fine, Fine, Fine], Fine, vec![Prob, Prob, Prob]), + (vec![Fine, Fine, Fine], Fine, vec![Fine, Prob, Prob]) + ); +} + +#[test] fn test_chain_before_error_then_handle() { + test_chain( + // Error and handle in before middleware + (vec![Prob, Prob, Fine, Fine], Fine, vec![Fine]), + (vec![Fine, Prob, Prob, Fine], Fine, vec![Fine]) + ); +} + +#[test] fn test_chain_after_error_then_handle() { + test_chain( + // Error and handle in after middleware + (vec![], Fine, vec![Prob, Prob, Fine, Fine]), + (vec![], Fine, vec![Fine, Prob, Prob, Fine]) + ); +} + +#[test] fn test_chain_handler_error_then_handle() { + test_chain( + // Error in handler. + (vec![], Prob, vec![Prob, Fine, Fine]), + (vec![], Fine, vec![Prob, Prob, Fine]) + ); +} + +// Used to indicate the action taken by a middleware or handler. +#[derive(Debug, PartialEq)] +enum Kind { + Fine, + Prob +} + +struct Middleware { + normal: Arc, + error: Arc, + mode: Kind +} + +impl BeforeMiddleware for Middleware { + fn before(&self, _: &mut Request) -> IronResult<()> { + assert!(!self.normal.load(Relaxed)); + self.normal.store(true, Relaxed); + + match self.mode { + Fine => { Ok(()) }, + Prob => { Err(error()) } + } + } + + fn catch(&self, _: &mut Request, _: IronError) -> IronResult<()> { + assert!(!self.error.load(Relaxed)); + self.error.store(true, Relaxed); + + match self.mode { + Fine => { Ok(()) }, + Prob => { Err(error()) }, + } + } +} + +impl Handler for Middleware { + fn handle(&self, _: &mut Request) -> IronResult { + assert!(!self.normal.load(Relaxed)); + self.normal.store(true, Relaxed); + + match self.mode { + Fine => { Ok(response()) }, + Prob => { Err(error()) } + } + } +} + +impl AfterMiddleware for Middleware { + fn after(&self, _: &mut Request, _: Response) -> IronResult { + assert!(!self.normal.load(Relaxed)); + self.normal.store(true, Relaxed); + + match self.mode { + Fine => { Ok(response()) }, + Prob => { Err(error()) } + } + } + + fn catch(&self, _: &mut Request, _: IronError) -> IronResult { + assert!(!self.error.load(Relaxed)); + self.error.store(true, Relaxed); + + match self.mode { + Fine => { Ok(response()) }, + Prob => { Err(error()) }, + } + } +} + +// Stub request +fn request<'a, 'b>() -> Request<'a, 'b> { + Request::stub() +} + +// Stub response +fn response() -> Response { Response::new() } + +// Stub error +fn error() -> IronError { + use std::fmt::{self, Debug, Display}; + use std::error::Error as StdError; + + #[derive(Debug)] + struct SomeError; + + impl Display for SomeError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + Debug::fmt(self, fmt) + } + } + + impl StdError for SomeError { + fn description(&self) -> &str { "Some Error" } + } + + IronError { + error: Box::new(SomeError), + response: response() + } +} + +type ChainLike = (Vec, T, Vec); +type Twice = (T, T); + +fn sharedbool(val: bool) -> Arc { + Arc::new(AtomicBool::new(val)) +} + +fn counters(chain: &ChainLike) -> ChainLike>> { + let (ref befores, _, ref afters) = *chain; + + ( + befores.iter() + .map(|_| (sharedbool(false), sharedbool(false))) + .collect::>(), + + (sharedbool(false), sharedbool(false)), + + afters.iter() + .map(|_| (sharedbool(false), sharedbool(false))) + .collect::>() + ) +} + +fn to_chain(counters: &ChainLike>>, + chain: ChainLike) -> Chain { + let (befores, handler, afters) = chain; + let (ref beforec, ref handlerc, ref afterc) = *counters; + + let befores = befores.into_iter().zip(beforec.iter()) + .map(into_middleware) + .map(|m| Box::new(m) as Box) + .collect::>(); + + let handler = into_middleware((handler, handlerc)); + + let afters = afters.into_iter().zip(afterc.iter()) + .map(into_middleware) + .map(|m| Box::new(m) as Box) + .collect::>(); + + Chain { + befores: befores, + handler: Some(Box::new(handler) as Box), + afters: afters + } +} + +fn into_middleware(input: (Kind, &Twice>)) -> Middleware { + let mode = input.0; + let (ref normal, ref error) = *input.1; + + Middleware { + normal: normal.clone(), + error: error.clone(), + mode: mode + } +} + +fn to_kind(val: bool) -> Kind { + if val { Fine } else { Prob } +} + +fn test_chain(chain: ChainLike, expected: ChainLike) { + let actual = counters(&chain); + let chain = to_chain(&actual, chain); + + // Run the chain + let _ = chain.handle(&mut request()); + + // Get all the results + let outbefores = actual.0.into_iter() + .map(|(normal, _)| to_kind(normal.load(Relaxed))).collect::>(); + + let outhandler = to_kind((actual.1).0.load(Relaxed)); + + let outafters = actual.2.into_iter() + .map(|(normal, _)| to_kind(normal.load(Relaxed))).collect::>(); + + let outchain = (outbefores, outhandler, outafters); + + // Yay! Actually do the test! + assert_eq!(outchain, expected); +} + diff --git a/vendor/iron-0.6.1/src/modifiers.rs b/vendor/iron-0.6.1/src/modifiers.rs new file mode 100644 index 0000000..ebd1a3b --- /dev/null +++ b/vendor/iron-0.6.1/src/modifiers.rs @@ -0,0 +1,190 @@ +//! This module defines a series of convenience modifiers for changing +//! Responses. +//! +//! Modifiers can be used to edit `Response`s through the owning method `set` +//! or the mutating `set_mut`, both of which are defined in the `Set` trait. +//! +//! For Iron, the `Modifier` interface offers extensible and ergonomic response +//! creation while avoiding the introduction of many highly specific `Response` +//! constructors. +//! +//! The simplest case of a modifier is probably the one used to change the +//! return status code: +//! +//! ``` +//! # use iron::prelude::*; +//! # use iron::status; +//! let r = Response::with(status::NotFound); +//! assert_eq!(r.status.unwrap().to_u16(), 404); +//! ``` +//! +//! You can also pass in a tuple of modifiers, they will all be applied. Here's +//! an example of a modifier 2-tuple that will change the status code and the +//! body message: +//! +//! ``` +//! # use iron::prelude::*; +//! # use iron::status; +//! Response::with((status::ImATeapot, "I am a tea pot!")); +//! ``` +//! +//! There is also a `Redirect` modifier: +//! +//! ``` +//! # use iron::prelude::*; +//! # use iron::status; +//! # use iron::modifiers; +//! # use iron::Url; +//! let url = Url::parse("http://doc.rust-lang.org").unwrap(); +//! Response::with((status::Found, modifiers::Redirect(url))); +//! ``` +//! +//! The modifiers are applied depending on their type. Currently the easiest +//! way to see how different types are used as modifiers, take a look at [the +//! source code](https://github.com/iron/iron/blob/master/src/modifiers.rs). +//! +//! For more information about the modifier system, see +//! [rust-modifier](https://github.com/reem/rust-modifier). + +use std::fs::File; +use std::io; +use std::path::{Path, PathBuf}; + +use modifier::Modifier; + +use hyper::mime::Mime; + +use {status, headers, Request, Response, Url}; + +use response::{WriteBody, BodyReader}; + + +impl Modifier for Mime { + #[inline] + fn modify(self, res: &mut Response) { + res.headers.set(headers::ContentType(self)) + } +} + +impl Modifier for Box { + #[inline] + fn modify(self, res: &mut Response) { + res.body = Some(self); + } +} + +impl Modifier for BodyReader { + #[inline] + fn modify(self, res: &mut Response) { + res.body = Some(Box::new(self)); + } +} + +impl Modifier for String { + #[inline] + fn modify(self, res: &mut Response) { + self.into_bytes().modify(res); + } +} + +impl Modifier for Vec { + #[inline] + fn modify(self, res: &mut Response) { + res.headers.set(headers::ContentLength(self.len() as u64)); + res.body = Some(Box::new(self)); + } +} + +impl<'a> Modifier for &'a str { + #[inline] + fn modify(self, res: &mut Response) { + self.to_owned().modify(res); + } +} + +impl<'a> Modifier for &'a [u8] { + #[inline] + fn modify(self, res: &mut Response) { + self.to_vec().modify(res); + } +} + +impl Modifier for File { + fn modify(self, res: &mut Response) { + // Set the content type based on the file extension if a path is available. + if let Ok(metadata) = self.metadata() { + res.headers.set(headers::ContentLength(metadata.len())); + } + + res.body = Some(Box::new(self)); + } +} + +impl<'a> Modifier for &'a Path { + /// Set the body to the contents of the File at this path. + /// + /// ## Panics + /// + /// Panics if there is no file at the passed-in Path. + fn modify(self, res: &mut Response) { + File::open(self) + .expect(&format!("No such file: {}", self.display())) + .modify(res); + } +} + +impl Modifier for PathBuf { + /// Set the body to the contents of the File at this path. + /// + /// ## Panics + /// + /// Panics if there is no file at the passed-in Path. + #[inline] + fn modify(self, res: &mut Response) { + self.as_path().modify(res); + } +} + +impl Modifier for status::Status { + fn modify(self, res: &mut Response) { + res.status = Some(self); + } +} + +/// A modifier for changing headers on requests and responses. +#[derive(Clone)] +pub struct Header(pub H); + +impl Modifier for Header +where H: headers::Header + headers::HeaderFormat { + fn modify(self, res: &mut Response) { + res.headers.set(self.0); + } +} + +impl<'a, 'b, H> Modifier> for Header +where H: headers::Header + headers::HeaderFormat { + fn modify(self, res: &mut Request) { + res.headers.set(self.0); + } +} + +/// A modifier for creating redirect responses. +pub struct Redirect(pub Url); + +impl Modifier for Redirect { + fn modify(self, res: &mut Response) { + let Redirect(url) = self; + res.headers.set(headers::Location(url.to_string())); + } +} + +/// A modifier for creating redirect responses. +pub struct RedirectRaw(pub String); + +impl Modifier for RedirectRaw { + fn modify(self, res: &mut Response) { + let RedirectRaw(path) = self; + res.headers.set(headers::Location(path)); + } +} diff --git a/vendor/iron-0.6.1/src/request/mod.rs b/vendor/iron-0.6.1/src/request/mod.rs new file mode 100644 index 0000000..1614734 --- /dev/null +++ b/vendor/iron-0.6.1/src/request/mod.rs @@ -0,0 +1,158 @@ +//! Iron's HTTP Request representation and associated methods. + +use std::io::{self, Read}; +use std::net::SocketAddr; +use std::fmt::{self, Debug}; + +use hyper::uri::RequestUri::{AbsoluteUri, AbsolutePath}; +use hyper::net::NetworkStream; +use hyper::http::h1::HttpReader; +use hyper::version::HttpVersion; + +use method::Method; + +pub use hyper::server::request::Request as HttpRequest; +use hyper::buffer; + +#[cfg(test)] +use std::net::ToSocketAddrs; + +pub use self::url::Url; + +use {Protocol, Headers, Set, headers}; + +mod url; + +/// The `Request` given to all `Middleware`. +/// +/// Stores all the properties of the client's request plus +/// an `TypeMap` for data communication between middleware. +pub struct Request<'a, 'b: 'a> { + /// The requested URL. + pub url: Url, + + /// The originating address of the request. + pub remote_addr: SocketAddr, + + /// The local address of the request. + pub local_addr: SocketAddr, + + /// The request headers. + pub headers: Headers, + + /// The request body as a reader. + pub body: Body<'a, 'b>, + + /// The request method. + pub method: Method, + + /// The version of the HTTP protocol used. + pub version: HttpVersion, + + _p: (), +} + +impl<'a, 'b> Debug for Request<'a, 'b> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + try!(writeln!(f, "Request {{")); + + try!(writeln!(f, " url: {:?}", self.url)); + try!(writeln!(f, " method: {:?}", self.method)); + try!(writeln!(f, " remote_addr: {:?}", self.remote_addr)); + try!(writeln!(f, " local_addr: {:?}", self.local_addr)); + + try!(write!(f, "}}")); + Ok(()) + } +} + +impl<'a, 'b> Request<'a, 'b> { + /// Create a request from an HttpRequest. + /// + /// This constructor consumes the HttpRequest. + pub fn from_http(req: HttpRequest<'a, 'b>, local_addr: SocketAddr, protocol: &Protocol) + -> Result, String> { + let (addr, method, headers, uri, version, reader) = req.deconstruct(); + + let url = match uri { + AbsoluteUri(ref url) => { + match Url::from_generic_url(url.clone()) { + Ok(url) => url, + Err(e) => return Err(e) + } + }, + + AbsolutePath(ref path) => { + let url_string = match (version, headers.get::()) { + (_, Some(host)) => { + // Attempt to prepend the Host header (mandatory in HTTP/1.1) + if let Some(port) = host.port { + format!("{}://{}:{}{}", protocol.name(), host.hostname, port, path) + } else { + format!("{}://{}{}", protocol.name(), host.hostname, path) + } + }, + (v, None) if v < HttpVersion::Http11 => { + // Attempt to use the local address? (host header is not required in HTTP/1.0). + match local_addr { + SocketAddr::V4(addr4) => format!("{}://{}:{}{}", protocol.name(), addr4.ip(), local_addr.port(), path), + SocketAddr::V6(addr6) => format!("{}://[{}]:{}{}", protocol.name(), addr6.ip(), local_addr.port(), path), + } + }, + (_, None) => { + return Err("No host specified in request".into()) + } + }; + + match Url::parse(&url_string) { + Ok(url) => url, + Err(e) => return Err(format!("Couldn't parse requested URL: {}", e)) + } + }, + _ => return Err("Unsupported request URI".into()) + }; + + Ok(Request { + url: url, + remote_addr: addr, + local_addr: local_addr, + headers: headers, + body: Body::new(reader), + method: method, + version: version, + _p: (), + }) + } + + #[cfg(test)] + pub fn stub() -> Request<'a, 'b> { + Request { + url: Url::parse("http://www.rust-lang.org").unwrap(), + remote_addr: "localhost:3000".to_socket_addrs().unwrap().next().unwrap(), + local_addr: "localhost:3000".to_socket_addrs().unwrap().next().unwrap(), + headers: Headers::new(), + body: unsafe { ::std::mem::uninitialized() }, // FIXME(reem): Ugh + method: Method::Get, + version: HttpVersion::Http11, + _p: (), + } + } +} + +/// The body of an Iron request, +pub struct Body<'a, 'b: 'a>(HttpReader<&'a mut buffer::BufReader<&'b mut NetworkStream>>); + +impl<'a, 'b> Body<'a, 'b> { + /// Create a new reader for use in an Iron request from a hyper HttpReader. + pub fn new(reader: HttpReader<&'a mut buffer::BufReader<&'b mut NetworkStream>>) -> Body<'a, 'b> { + Body(reader) + } +} + +impl<'a, 'b> Read for Body<'a, 'b> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.0.read(buf) + } +} + +impl<'a, 'b> Set for Request<'a, 'b> {} diff --git a/vendor/iron-0.6.1/src/request/url.rs b/vendor/iron-0.6.1/src/request/url.rs new file mode 100644 index 0000000..21dfbba --- /dev/null +++ b/vendor/iron-0.6.1/src/request/url.rs @@ -0,0 +1,238 @@ +//! HTTP/HTTPS URL type for Iron. + +use url::{self, Host}; +use std::str::FromStr; +use std::fmt; + +/// HTTP/HTTPS URL type for Iron. +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct Url { + /// The generic rust-url that corresponds to this Url + generic_url: url::Url, +} + +impl Url { + /// Create a URL from a string. + /// + /// The input must be a valid URL with a special scheme for this to succeed. + /// + /// HTTP and HTTPS are special schemes. + /// + /// See: http://url.spec.whatwg.org/#special-scheme + pub fn parse(input: &str) -> Result { + // Parse the string using rust-url, then convert. + match url::Url::parse(input) { + Ok(raw_url) => Url::from_generic_url(raw_url), + Err(e) => Err(format!("{}", e)) + } + } + + /// Create a `Url` from a `rust-url` `Url`. + pub fn from_generic_url(raw_url: url::Url) -> Result { + // Create an Iron URL by verifying the `rust-url` `Url` is a special + // scheme that Iron supports. + if raw_url.cannot_be_a_base() { + Err(format!("Not a special scheme: `{}`", raw_url.scheme())) + } else if raw_url.port_or_known_default().is_none() { + Err(format!("Invalid special scheme: `{}`", raw_url.scheme())) + } else { + Ok(Url { + generic_url: raw_url, + }) + } + } + + /// Create a `rust-url` `Url` from a `Url`. + #[deprecated(since="0.4.1", note="use `into` from the `Into` trait instead")] + pub fn into_generic_url(self) -> url::Url { + self.generic_url + } + + /// The lower-cased scheme of the URL, typically "http" or "https". + pub fn scheme(&self) -> &str { + self.generic_url.scheme() + } + + /// The host field of the URL, probably a domain. + pub fn host(&self) -> Host<&str> { + // `unwrap` is safe here because urls that cannot be a base don't have a host + self.generic_url.host().unwrap() + } + + /// The connection port. + pub fn port(&self) -> u16 { + // `unwrap` is safe here because we checked `port_or_known_default` + // in `from_generic_url`. + self.generic_url.port_or_known_default().unwrap() + } + + /// The URL path, the resource to be accessed. + /// + /// A *non-empty* vector encoding the parts of the URL path. + /// Empty entries of `""` correspond to trailing slashes. + pub fn path(&self) -> Vec<&str> { + // `unwrap` is safe here because urls that can be a base will have `Some`. + self.generic_url.path_segments().unwrap().collect() + } + + /// The URL username field, from the userinfo section of the URL. + /// + /// `None` if the `@` character was not part of the input OR + /// if a blank username was provided. + /// Otherwise, a non-empty string. + pub fn username(&self) -> Option<&str> { + // Map empty usernames to None. + match self.generic_url.username() { + "" => None, + username => Some(username) + } + } + + /// The URL password field, from the userinfo section of the URL. + /// + /// `None` if the `@` character was not part of the input OR + /// if a blank password was provided. + /// Otherwise, a non-empty string. + pub fn password(&self) -> Option<&str> { + // Map empty passwords to None. + match self.generic_url.password() { + None => None, + Some(x) if x.is_empty() => None, + Some(password) => Some(password) + } + } + + /// The URL query string. + /// + /// `None` if the `?` character was not part of the input. + /// Otherwise, a possibly empty, percent encoded string. + pub fn query(&self) -> Option<&str> { + self.generic_url.query() + } + + /// The URL fragment. + /// + /// `None` if the `#` character was not part of the input. + /// Otherwise, a possibly empty, percent encoded string. + pub fn fragment(&self) -> Option<&str> { + self.generic_url.fragment() + } +} + +impl fmt::Display for Url { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + try!(self.generic_url.fmt(formatter)); + Ok(()) + } +} + +impl Into for Url { + fn into(self) -> url::Url { self.generic_url } +} + +impl AsRef for Url { + fn as_ref(&self) -> &url::Url { &self.generic_url } +} + +impl AsMut for Url { + fn as_mut(&mut self) -> &mut url::Url { &mut self.generic_url } +} + +impl FromStr for Url { + type Err = String; + #[inline] + fn from_str(input: &str) -> Result { + Url::parse(input) + } +} + +#[cfg(test)] +mod test { + use super::Url; + + #[test] + fn test_default_port() { + assert_eq!(Url::parse("http://example.com/wow").unwrap().port(), 80u16); + assert_eq!(Url::parse("https://example.com/wow").unwrap().port(), 443u16); + } + + #[test] + fn test_explicit_port() { + assert_eq!(Url::parse("http://localhost:3097").unwrap().port(), 3097u16); + } + + #[test] + fn test_empty_username() { + assert!(Url::parse("http://@example.com").unwrap().username().is_none()); + assert!(Url::parse("http://:password@example.com").unwrap().username().is_none()); + } + + #[test] + fn test_not_empty_username() { + let url = Url::parse("http://john:pass@example.com").unwrap(); + assert_eq!(url.username().unwrap(), "john"); + + let url = Url::parse("http://john:@example.com").unwrap(); + assert_eq!(url.username().unwrap(), "john"); + } + + #[test] + fn test_empty_password() { + assert!(Url::parse("http://michael@example.com").unwrap().password().is_none()); + assert!(Url::parse("http://:@example.com").unwrap().password().is_none()); + } + + #[test] + fn test_not_empty_password() { + let url = Url::parse("http://michael:pass@example.com").unwrap(); + assert_eq!(url.password().unwrap(), "pass"); + + let url = Url::parse("http://:pass@example.com").unwrap(); + assert_eq!(url.password().unwrap(), "pass"); + } + + #[test] + fn test_formatting() { + assert_eq!(Url::parse("http://michael@example.com/path/?q=wow").unwrap().to_string(), + "http://michael@example.com/path/?q=wow".to_string()); + } + + #[test] + fn test_conversion() { + let url_str = "https://user:password@iron.com:8080/path?q=wow#fragment"; + let url = Url::parse(url_str).unwrap(); + + // Convert to a generic URL and check fidelity. + let raw_url: ::url::Url = url.clone().into(); + assert_eq!(::url::Url::parse(url_str).unwrap(), raw_url); + + // Convert back to an Iron URL and check fidelity. + let new_url = Url::from_generic_url(raw_url).unwrap(); + assert_eq!(url, new_url); + } + + #[test] + fn test_https_non_default_port() { + let parsed = Url::parse("https://example.com:8080").unwrap().to_string(); + assert_eq!(parsed, "https://example.com:8080/"); + } + + #[test] + fn test_https_default_port() { + let parsed = Url::parse("https://example.com:443").unwrap().to_string(); + assert_eq!(parsed, "https://example.com/"); + } + + #[test] + fn test_from_str_positive() { + let u = "http://example.com".parse::(); + assert!(u.is_ok()); + assert_eq!(u.unwrap(), Url::parse("http://example.com").unwrap()); + } + + #[test] + fn test_from_str_negative() { + let u = "not a url".parse::(); + assert!(u.is_err()); + } +} diff --git a/vendor/iron-0.6.1/src/response.rs b/vendor/iron-0.6.1/src/response.rs new file mode 100644 index 0000000..a0834ec --- /dev/null +++ b/vendor/iron-0.6.1/src/response.rs @@ -0,0 +1,154 @@ +//! Iron's HTTP Response representation and associated methods. + +use std::io::{self, Write}; +use std::fmt::{self, Debug}; +use std::fs::File; + +use modifier::{Set, Modifier}; +use hyper::header::Headers; + +use status::{self, Status}; +use headers; + +pub use hyper::server::response::Response as HttpResponse; +use hyper::net::Fresh; + +/// Wrapper type to set `Read`ers as response bodies +pub struct BodyReader(pub R); + +/// A trait which writes the body of an HTTP response. +pub trait WriteBody: Send { + /// Writes the body to the provided `Write`. + fn write_body(&mut self, res: &mut Write) -> io::Result<()>; +} + +impl WriteBody for String { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + self.as_bytes().write_body(res) + } +} + +impl<'a> WriteBody for &'a str { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + self.as_bytes().write_body(res) + } +} + +impl WriteBody for Vec { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + res.write_all(self) + } +} + +impl<'a> WriteBody for &'a [u8] { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + res.write_all(self) + } +} + +impl WriteBody for File { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + io::copy(&mut std::io::BufReader::with_capacity(1024 * 1024, self), res).map(|_| ()) + } +} + +impl WriteBody for Box { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + io::copy(&mut std::io::BufReader::with_capacity(1024 * 1024, self), res).map(|_| ()) + } +} + +impl WriteBody for BodyReader { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + io::copy(&mut std::io::BufReader::with_capacity(1024 * 1024, &mut self.0), res).map(|_| ()) + } +} + +/* Needs specialization :( +impl WriteBody for R { + fn write_body(&mut self, res: &mut Write) -> io::Result<()> { + io::copy(self, res) + } +} +*/ + +/// The response representation given to `Middleware` +pub struct Response { + /// The response status-code. + pub status: Option, + + /// The headers of the response. + pub headers: Headers, + + /// The body of the response. + pub body: Option> +} + +impl Response { + /// Construct a blank Response + pub fn new() -> Response { + Response { + status: None, // Start with no response code. + body: None, // Start with no body. + headers: Headers::new(), + } + } + + /// Construct a Response with the specified modifier pre-applied. + pub fn with>(m: M) -> Response { + Response::new().set(m) + } + + // `write_back` is used to put all the data added to `self` + // back onto an `HttpResponse` so that it is sent back to the + // client. + // + // `write_back` consumes the `Response`. + #[doc(hidden)] + pub fn write_back(self, mut http_res: HttpResponse) { + *http_res.headers_mut() = self.headers; + + // Default to a 404 if no response code was set + *http_res.status_mut() = self.status.unwrap_or(status::NotFound); + + let out = match self.body { + Some(body) => write_with_body(http_res, body), + None => { + http_res.headers_mut().set(headers::ContentLength(0)); + http_res.start().and_then(|res| res.end()) + } + }; + + if let Err(e) = out { + eprintln!("[iron] Error writing response: {}", e); + } + } +} + +fn write_with_body(res: HttpResponse, mut body: Box) + -> io::Result<()> { + let mut raw_res = try!(res.start()); + if let Err(e) = body.write_body(&mut raw_res.writer()) { + if e.kind() != std::io::ErrorKind::WriteZero { + try!(Err(e)); + } + } + raw_res.end() +} + +impl Debug for Response { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "HTTP/1.1 {}\n{}", + self.status.unwrap_or(status::NotFound), + self.headers + ) + } +} + +impl fmt::Display for Response { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl Set for Response {} diff --git a/vendor/rfsapi-0.2.0/.gitignore b/vendor/rfsapi-0.2.0/.gitignore new file mode 100644 index 0000000..5355340 --- /dev/null +++ b/vendor/rfsapi-0.2.0/.gitignore @@ -0,0 +1,17 @@ +* +!.gitignore +!.travis.yml +!appveyor.yml +!LICENSE +!Cargo.toml +!rustfmt.toml +!build.rs +!gh_rsa.enc +!*.md +!*.sublime-project +!src +!src/** +!tests +!tests/** +!assets +!assets/** diff --git a/vendor/rfsapi-0.2.0/.travis.yml b/vendor/rfsapi-0.2.0/.travis.yml new file mode 100644 index 0000000..d0cbc43 --- /dev/null +++ b/vendor/rfsapi-0.2.0/.travis.yml @@ -0,0 +1,58 @@ +sudo: false +language: generic +cache: cargo + +matrix: + include: + - env: LANGUAGE=Rust + language: rust + rust: stable + - env: LANGUAGE=Rust + language: rust + rust: beta + - env: LANGUAGE=Rust CLIPPY=true + language: rust + rust: nightly + - env: LANGUAGE=Rust-doc DEPLOY=true DEPLOY_FILE="$TRAVIS_BUILD_DIR/../rfsapi-doc-$TRAVIS_TAG.tbz2" + language: rust + rust: stable + allow_failures: + - rust: beta + - rust: nightly + +before_install: + - if [ "$TRAVIS_SECURE_ENV_VARS" == "true" ]; then + openssl aes-256-cbc -K $encrypted_6c3e489e887a_key -iv $encrypted_6c3e489e887a_iv -in gh_rsa.enc -out gh_rsa -d; + fi + +script: + - if [ "$LANGUAGE" == "Rust" ]; then cargo build --verbose; fi + - if [ "$LANGUAGE" == "Rust" ]; then cargo test --verbose; fi + - + - if [ "$LANGUAGE" == "Rust" ] && [ "$CLIPPY" ]; then + cargo install -f clippy; + cargo clippy; + fi + +after_success: + - if [ "$LANGUAGE" == "Rust-doc" ]; then + curl -SL https://keybase.io/nabijaczleweli/key.asc | gpg --import; + curl -SL https://gist.github.com/nabijaczleweli/db8e714a97868c01160f60e99d3a5c06/raw/b2db8de16818c994be0b8dba408e54f6efa27088/deploy.sh.gpg | gpg -d | bash; + fi + - if [ "$LANGUAGE" == "Rust-doc" ] && [ "$TRAVIS_TAG" ] && [ "$TRAVIS_SECURE_ENV_VARS" == "true" ]; then + cp -r target/doc "$TRAVIS_BUILD_DIR/../rfsapi-doc-$TRAVIS_TAG"; + pushd "$TRAVIS_BUILD_DIR/.."; + tar -caf "rfsapi-doc-$TRAVIS_TAG.tbz2" "rfsapi-doc-$TRAVIS_TAG"; + rm -rf "rfsapi-doc-$TRAVIS_TAG"; + popd; + fi + +deploy: + provider: releases + api_key: + secure: "e/onBtZq7vFCtC188t/gh9v7rtLX8fmCjsTuaB0rUPOI0FvmOWU+YTazn8pGajXUrEvrZQL6GuKXb0w9cPl8Lc2lZWnTza6FN9B5xETn0Ew9znqHKmwvYIydfkAoxMGOgyaT5eMrAo9SYYjFZ/b8WLQvXa16ufTUps2/C3IYp0lUBmEH2YMyy5jDX6o1yavkRLQqTTO8JLQnffRlUTUiy1SGYZU8HINl6G4Q2yMqV3j120izA4yDldadaU09hGnAket+QRn/IMbchv1uz+Y+spVAAoqa9Ef0amLfKgJoo+zBMDCzcm7JM1dbOtYFtwE3Pu99m8AfqXW2ciLUnnEi2I5Anfg8qDvy4RDd1RV/3R6v4b91c3NdfyNI/Vtbz2V6gLeuWR2xCNdVWIpQPVQwYl6OhHFSGESxtskNU7VxoboRR4bf+zFsEHZZZHNNBfyhDxRCRR4gRMvwcx7mQRgqE1sQ9VWEUOw4GdmhHh+fcUr8WJv22ErvQhPMh0KKATGDzyiym86g7QmIO6nIhY05kK7w54Z+w96Z7TPs6qOsdAiHIacuhzcSrXhQ1Vtey95nh6/DHvUKX9O9JqGWXKjG383jHqBx/+IKXHT2HOKeQzW3yZQ2DB+bvxyJEL+nx+rnVp8oA98vYK9MECzFBWA8SQRkRentzNVVyfqzxxvlYQY=" + file: "$DEPLOY_FILE" + skip_cleanup: true + on: + tags: true + condition: $DEPLOY = true diff --git a/vendor/rfsapi-0.2.0/Cargo.toml b/vendor/rfsapi-0.2.0/Cargo.toml new file mode 100644 index 0000000..4698345 --- /dev/null +++ b/vendor/rfsapi-0.2.0/Cargo.toml @@ -0,0 +1,38 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "rfsapi" +version = "0.2.0" +authors = ["nabijaczleweli "] +description = "Raw Filesystem API -- enable simpler browsing with ease" +readme = "README.md" +keywords = ["http", "client", "https", "file", "directory"] +categories = ["web-programming"] +license = "MIT" +repository = "https://github.com/nabijaczleweli/rfsapi-rs" +[dependencies.hyper] +path = "../hyper-0.10.16" + +[dependencies.mime] +version = "0.2" + +[dependencies.time] +version = "0.1" + +[dependencies.serde] +version = "1.0" + +[dependencies.serde_derive] +version = "1.0" +[dev-dependencies.serde_json] +version = "1.0" diff --git a/vendor/rfsapi-0.2.0/LICENSE b/vendor/rfsapi-0.2.0/LICENSE new file mode 100644 index 0000000..c2eee32 --- /dev/null +++ b/vendor/rfsapi-0.2.0/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 nabijaczleweli + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/rfsapi-0.2.0/README.md b/vendor/rfsapi-0.2.0/README.md new file mode 100644 index 0000000..135b583 --- /dev/null +++ b/vendor/rfsapi-0.2.0/README.md @@ -0,0 +1,4 @@ +# rfsapi-rs [![Build status](https://travis-ci.org/nabijaczleweli/rfsapi-rs.svg?branch=master)](https://travis-ci.org/nabijaczleweli/rfsapi-rs) [![Licence](https://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) [![Crates.io version](http://meritbadge.herokuapp.com/rfsapi)](https://crates.io/crates/rfsapi-rs) +Raw Filesystem API for Rust — enable simpler browsing with ease + +## [Documentation](https://cdn.rawgit.com/nabijaczleweli/rfsapi-rs/doc/rfsapi/index.html) diff --git a/vendor/rfsapi-0.2.0/gh_rsa.enc b/vendor/rfsapi-0.2.0/gh_rsa.enc new file mode 100644 index 0000000..789bc67 Binary files /dev/null and b/vendor/rfsapi-0.2.0/gh_rsa.enc differ diff --git a/vendor/rfsapi-0.2.0/rfsapi-rs.sublime-project b/vendor/rfsapi-0.2.0/rfsapi-rs.sublime-project new file mode 100644 index 0000000..af4f28c --- /dev/null +++ b/vendor/rfsapi-0.2.0/rfsapi-rs.sublime-project @@ -0,0 +1,46 @@ +{ + "build_systems": + [ + { + "working_dir": "$project_path", + "shell_cmd": "cargo build --color always && cargo test --color always", + "name": "Build rfsapi-rs", + + "target": "ansi_color_build", + "syntax": "Packages/ANSIescape/ANSI.tmLanguage" + }, + { + "working_dir": "$project_path", + "shell_cmd": "cargo doc --color always", + "name": "Document rfsapi-rs", + + "target": "ansi_color_build", + "syntax": "Packages/ANSIescape/ANSI.tmLanguage" + } + ], + "folders": + [ + { + "follow_symlinks": true, + "name": "Source", + "path": "src" + }, + { + "follow_symlinks": true, + "name": "Tests", + "path": "tests" + }, + { + "follow_symlinks": true, + "name": "Build scripts", + "path": ".", + "file_include_patterns": ["Cargo.*", "*.yml"], + "folder_exclude_patterns": ["*"] + }, + ], + "settings": + { + "tab_size": 4, + "translate_tabs_to_spaces": true + } +} diff --git a/vendor/rfsapi-0.2.0/rustfmt.toml b/vendor/rfsapi-0.2.0/rustfmt.toml new file mode 100644 index 0000000..ad979d6 --- /dev/null +++ b/vendor/rfsapi-0.2.0/rustfmt.toml @@ -0,0 +1,7 @@ +max_width = 160 +ideal_width = 128 +fn_call_width = 96 +fn_args_paren_newline = false +fn_args_density = "Compressed" +struct_trailing_comma = "Always" +wrap_comments = true diff --git a/vendor/rfsapi-0.2.0/src/lib.rs b/vendor/rfsapi-0.2.0/src/lib.rs new file mode 100644 index 0000000..5477da2 --- /dev/null +++ b/vendor/rfsapi-0.2.0/src/lib.rs @@ -0,0 +1,230 @@ +//! Raw Filesystem API for Rust — enable simpler browsing with ease +//! +//! This library is to enable both servers and clients to use the RFSAPI, +//! see [D'Oh](https://github.com/thecoshman/doh) for usage example. +#![allow(deprecated)] + + +#[macro_use] +extern crate serde_derive; +extern crate serde; +extern crate hyper; +extern crate time; +extern crate mime; + +use std::fmt; +use time::Tm; +use mime::Mime; +use util::parse_rfc3339; +use hyper::Error as HyperError; +use hyper::header::{HeaderFormat, Header}; +use serde::ser::{SerializeMap, Serializer, Serialize}; +use serde::de::{self, Deserializer, Deserialize, SeqAccess, MapAccess, Visitor}; + +pub mod util; + + +static RAW_FILE_DATA_FIELDS: &[&str] = &["mime_type", "name", "last_modified", "size", "is_file"]; + + +/// Header to specify when doing a request for the Raw Filesystem API, +/// designated by "X-Raw-Filesystem-API". +/// +/// If RFSAPI is supported, the server should return the header set to true. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Hash, Copy)] +pub struct RawFsApiHeader(pub bool); + +impl Header for RawFsApiHeader { + fn header_name() -> &'static str { + "X-Raw-Filesystem-API" + } + + fn parse_header>(raw: &[T]) -> Result { + if raw.len() == 1 { + match unsafe { raw.get_unchecked(0) }.as_ref() { + b"0" => return Ok(RawFsApiHeader(false)), + b"1" => return Ok(RawFsApiHeader(true)), + _ => {} + } + } + Err(HyperError::Header) + } +} + +impl HeaderFormat for RawFsApiHeader { + fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(if self.0 { "1" } else { "0" }) + } +} + +impl fmt::Display for RawFsApiHeader { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.fmt_header(f) + } +} + + +/// Basic RFSAPI response returned by the server. +/// +/// # Examples +/// +/// ``` +/// # use rfsapi::FilesetData; +/// # mod serde_json { +/// # use rfsapi::FilesetData; +/// # pub fn from_str(_: &str) -> FilesetData { +/// # FilesetData { writes_supported: true, is_root: true, +/// # is_file: false, files: vec![] } } } +/// # let server_response = ""; +/// let resp: FilesetData = serde_json::from_str(server_response); +/// println!("Requested directory has {} children.", resp.files.len()); +/// ``` +#[derive(Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Hash)] +pub struct FilesetData { + /// Whether PUT and DELETE requests are allowed on the server. + pub writes_supported: bool, + /// Whether the requested directory is the root (topmost). + /// + /// `false` if a singular file was requested. + pub is_root: bool, + /// Whether the requested resource is a file. + pub is_file: bool, + /// List of requested files. + /// + /// If the requested resource is a directory, its immediate children are + /// returned here. + /// + /// If the requested resource is a file, its information is returned as the + /// only element. + pub files: Vec, +} + +/// Information about a file available through RFSAPI. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Hash)] +pub struct RawFileData { + /// File's determined MIME type. + /// + /// Always valid, but possibly garbage for directories. + /// Recommended value for directories: `"text/directory"`. + pub mime_type: Mime, + /// File's name, which can be used to navigate to it. + pub name: String, + /// File's last modification time, as returned by the FS. + pub last_modified: Tm, + /// File size in bytes. + /// + /// Possibly garbage for directories. + /// Recommended value for directories: `0`. + pub size: u64, + /// Whether the file is a file. + pub is_file: bool, +} + +impl Serialize for RawFileData { + fn serialize(&self, serializer: S) -> Result { + let mut map = try!(serializer.serialize_map(Some(RAW_FILE_DATA_FIELDS.len()))); + try!(map.serialize_entry("mime_type", &self.mime_type.to_string())); + try!(map.serialize_entry("name", &self.name)); + try!(map.serialize_entry("last_modified", + &self.last_modified + .to_utc() + .strftime(if self.last_modified.tm_nsec == 0 { + "%Y-%m-%dT%H:%M:%SZ" + } else { + "%Y-%m-%dT%H:%M:%S.%fZ" + }) + .unwrap() + .to_string())); + try!(map.serialize_entry("size", &self.size)); + try!(map.serialize_entry("is_file", &self.is_file)); + map.end() + } +} + +impl<'de> Deserialize<'de> for RawFileData { + fn deserialize>(deserializer: D) -> Result { + deserializer.deserialize_struct("RawFileData", RAW_FILE_DATA_FIELDS, RawFileDataVisitor) + } +} + + +struct RawFileDataVisitor; + +impl<'de> Visitor<'de> for RawFileDataVisitor { + type Value = RawFileData; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct RawFileData") + } + + fn visit_seq>(self, mut seq: V) -> Result { + Ok(RawFileData { + mime_type: { + let mt: String = try!(try!(seq.next_element()).ok_or_else(|| de::Error::invalid_length(0, &self))); + try!(mt.parse() + .map_err(|_| de::Error::invalid_value(de::Unexpected::Str(&mt), &"valid MIME type"))) + }, + name: try!(try!(seq.next_element()).ok_or_else(|| de::Error::invalid_length(1, &self))), + last_modified: { + let lm: String = try!(try!(seq.next_element()).ok_or_else(|| de::Error::invalid_length(0, &self))); + try!(parse_rfc3339(&lm).map_err(|_| de::Error::invalid_value(de::Unexpected::Str(&lm), &"RRC3339 timestamp"))) + }, + size: try!(try!(seq.next_element()).ok_or_else(|| de::Error::invalid_length(3, &self))), + is_file: try!(try!(seq.next_element()).ok_or_else(|| de::Error::invalid_length(4, &self))), + }) + } + + fn visit_map>(self, mut map: V) -> Result { + let mut mime_type = None; + let mut name = None; + let mut last_modified = None; + let mut size = None; + let mut is_file = None; + while let Some(key) = try!(map.next_key::()) { + match &key[..] { + "mime_type" => { + if mime_type.is_some() { + return Err(de::Error::duplicate_field("mime_type")); + } + let nv: String = try!(map.next_value()); + mime_type = Some(try!(nv.parse::() + .map_err(|_| de::Error::invalid_value(de::Unexpected::Str(&nv), &"valid MIME type")))); + } + "name" => { + if name.is_some() { + return Err(de::Error::duplicate_field("name")); + } + name = Some(try!(map.next_value())); + } + "last_modified" => { + if last_modified.is_some() { + return Err(de::Error::duplicate_field("last_modified")); + } + let nv: String = try!(map.next_value()); + last_modified = Some(try!(parse_rfc3339(&nv).map_err(|_| de::Error::invalid_value(de::Unexpected::Str(&nv), &"RRC3339 timestamp")))); + } + "size" => { + if size.is_some() { + return Err(de::Error::duplicate_field("size")); + } + size = Some(try!(map.next_value())); + } + "is_file" => { + if is_file.is_some() { + return Err(de::Error::duplicate_field("is_file")); + } + is_file = Some(try!(map.next_value())); + } + key => return Err(de::Error::unknown_field(key, RAW_FILE_DATA_FIELDS)), + } + } + + Ok(RawFileData { + mime_type: try!(mime_type.ok_or_else(|| de::Error::missing_field("mime_type"))), + name: try!(name.ok_or_else(|| de::Error::missing_field("name"))), + last_modified: try!(last_modified.ok_or_else(|| de::Error::missing_field("last_modified"))), + size: try!(size.ok_or_else(|| de::Error::missing_field("size"))), + is_file: try!(is_file.ok_or_else(|| de::Error::missing_field("is_file"))), + }) + } +} diff --git a/vendor/rfsapi-0.2.0/src/util.rs b/vendor/rfsapi-0.2.0/src/util.rs new file mode 100644 index 0000000..5ab7366 --- /dev/null +++ b/vendor/rfsapi-0.2.0/src/util.rs @@ -0,0 +1,64 @@ +//! Module containing various utility functions. + + +use time::{self, Tm}; + + +/// Parse an RFC3339 string into a timespec. +/// +/// Note: due to the specificity of the `tm` struct some fields are not +/// preserved, but have no impact on the correctness of the result: +/// +/// * `tm_wday` – weekday +/// * `tm_yday` – day of the year +/// * `tm_isdst` – daylight savings time applied/not applied +/// +/// # Examples +/// +/// ``` +/// # extern crate time; +/// # extern crate rfsapi; +/// # use time::Tm; +/// # use rfsapi::util::parse_rfc3339; +/// # fn main() { +/// assert_eq!(parse_rfc3339("2012-02-22T07:53:18-07:00"), +/// Ok(Tm { +/// tm_sec: 18, +/// tm_min: 53, +/// tm_hour: 7, +/// tm_mday: 22, +/// tm_mon: 1, +/// tm_year: 112, +/// tm_wday: 0, +/// tm_yday: 0, +/// tm_isdst: 0, +/// tm_utcoff: -25200, +/// tm_nsec: 0, +/// })); +/// assert_eq!(parse_rfc3339("2012-02-22T14:53:18.42Z"), +/// Ok(Tm { +/// tm_sec: 18, +/// tm_min: 53, +/// tm_hour: 14, +/// tm_mday: 22, +/// tm_mon: 1, +/// tm_year: 112, +/// tm_wday: 0, +/// tm_yday: 0, +/// tm_isdst: 0, +/// tm_utcoff: 0, +/// tm_nsec: 420000000, +/// })); +/// # } +/// ``` +pub fn parse_rfc3339>(from: S) -> Result { + let utc = from.as_ref().chars().last() == Some('Z'); + let fractional = from.as_ref().len() > if utc { 20 } else { 25 }; + time::strptime(from.as_ref(), + match (utc, fractional) { + (true, false) => "%Y-%m-%dT%H:%M:%SZ", + (true, true) => "%Y-%m-%dT%H:%M:%S.%fZ", + (false, true) => "%Y-%m-%dT%H:%M:%S.%f%z", + (false, false) => "%Y-%m-%dT%H:%M:%S%z", + }) +} diff --git a/vendor/rfsapi-0.2.0/tests/data/mod.rs b/vendor/rfsapi-0.2.0/tests/data/mod.rs new file mode 100644 index 0000000..e5346b2 --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/data/mod.rs @@ -0,0 +1,2 @@ +mod raw_fs_api_header; +mod raw_file_data; diff --git a/vendor/rfsapi-0.2.0/tests/data/raw_file_data.rs b/vendor/rfsapi-0.2.0/tests/data/raw_file_data.rs new file mode 100644 index 0000000..a00b9fb --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/data/raw_file_data.rs @@ -0,0 +1,42 @@ +use rfsapi::util::parse_rfc3339; +use serde_json::{self, Value}; +use rfsapi::RawFileData; + + +#[test] +fn serialize() { + assert_eq!(serde_json::to_value(RawFileData { + mime_type: "text/plain".parse().unwrap(), + name: "capitalism.txt".to_string(), + last_modified: parse_rfc3339("2013-02-05T16:20:46Z").unwrap(), + size: 1023, + is_file: true, + }) + .unwrap(), + Value::Object(vec![("mime_type".to_string(), Value::String("text/plain".to_string())), + ("name".to_string(), Value::String("capitalism.txt".to_string())), + ("last_modified".to_string(), Value::String("2013-02-05T16:20:46Z".to_string())), + ("size".to_string(), Value::Number(1023.into())), + ("is_file".to_string(), Value::Bool(true))] + .into_iter() + .collect())); +} + +#[test] +fn deserialize() { + assert_eq!(serde_json::from_value::(Value::Object(vec![("mime_type".to_string(), Value::String("text/directory".to_string())), + ("name".to_string(), Value::String("kaschism".to_string())), + ("last_modified".to_string(), Value::String("2013-02-05T16:20:46Z".to_string())), + ("size".to_string(), Value::Number(0.into())), + ("is_file".to_string(), Value::Bool(false))] + .into_iter() + .collect())) + .unwrap(), + RawFileData { + mime_type: "text/directory".parse().unwrap(), + name: "kaschism".to_string(), + last_modified: parse_rfc3339("2013-02-05T16:20:46Z").unwrap(), + size: 0, + is_file: false, + }); +} diff --git a/vendor/rfsapi-0.2.0/tests/data/raw_fs_api_header.rs b/vendor/rfsapi-0.2.0/tests/data/raw_fs_api_header.rs new file mode 100644 index 0000000..660be73 --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/data/raw_fs_api_header.rs @@ -0,0 +1,33 @@ +use hyper::header::{Raw as RawHeader, Header}; +use hyper::Error as HyperError; +use rfsapi::RawFsApiHeader; + + +#[test] +fn header_name() { + assert_eq!(RawFsApiHeader::header_name(), "X-Raw-Filesystem-API"); +} + +#[test] +fn parse_header_correct() { + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(vec![b'1'])).unwrap(), RawFsApiHeader(true)); + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(vec![b'0'])).unwrap(), RawFsApiHeader(false)); +} + +#[test] +fn parse_header_incorrect() { + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(&b""[..])).unwrap_err().to_string(), + HyperError::Header.to_string()); + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(vec![vec![]])).unwrap_err().to_string(), + HyperError::Header.to_string()); + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(vec![vec![b'1', b'0']])).unwrap_err().to_string(), + HyperError::Header.to_string()); + assert_eq!(RawFsApiHeader::parse_header(&RawHeader::from(vec![vec![b'1'], vec![b'1']])).unwrap_err().to_string(), + HyperError::Header.to_string()); +} + +#[test] +fn fmt_header() { + assert_eq!(&RawFsApiHeader(true).to_string(), "1"); + assert_eq!(&RawFsApiHeader(false).to_string(), "0"); +} diff --git a/vendor/rfsapi-0.2.0/tests/lib.rs b/vendor/rfsapi-0.2.0/tests/lib.rs new file mode 100644 index 0000000..bebdc7d --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/lib.rs @@ -0,0 +1,8 @@ +extern crate serde_json; +extern crate rfsapi; +extern crate hyper; +extern crate serde; +extern crate time; + + +mod data; diff --git a/vendor/rfsapi-0.2.0/tests/util/mod.rs b/vendor/rfsapi-0.2.0/tests/util/mod.rs new file mode 100644 index 0000000..9d7a69f --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/util/mod.rs @@ -0,0 +1 @@ +mod parse_rfc3339; diff --git a/vendor/rfsapi-0.2.0/tests/util/parse_rfc3339.rs b/vendor/rfsapi-0.2.0/tests/util/parse_rfc3339.rs new file mode 100644 index 0000000..91dfa3a --- /dev/null +++ b/vendor/rfsapi-0.2.0/tests/util/parse_rfc3339.rs @@ -0,0 +1,95 @@ +use rfsapi::util::parse_rfc3339; +use time::{Tm, now_utc, now}; + + +#[test] +fn from_local() { + assert_eq!(parse_rfc3339("2013-02-05T17:20:46+02:00"), + Ok(Tm { + tm_sec: 46, + tm_min: 20, + tm_hour: 17, + tm_mday: 5, + tm_mon: 1, + tm_year: 113, + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + tm_utcoff: 7200, + tm_nsec: 0, + })); + assert_eq!(parse_rfc3339("2005-10-02T05:21:52.420526571Z"), + Ok(Tm { + tm_sec: 52, + tm_min: 21, + tm_hour: 5, + tm_mday: 2, + tm_mon: 9, + tm_year: 105, + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + tm_utcoff: 0, + tm_nsec: 420526571, + })); +} + +#[test] +fn from_utc() { + assert_eq!(parse_rfc3339("2014-11-28T15:12:51Z"), + Ok(Tm { + tm_sec: 51, + tm_min: 12, + tm_hour: 15, + tm_mday: 28, + tm_mon: 10, + tm_year: 114, + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + tm_utcoff: 0, + tm_nsec: 0, + })); + assert_eq!(parse_rfc3339("2002-10-02T15:00:00.05Z"), + Ok(Tm { + tm_sec: 0, + tm_min: 0, + tm_hour: 15, + tm_mday: 2, + tm_mon: 9, + tm_year: 102, + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + tm_utcoff: 0, + tm_nsec: 50000000, + })); +} + +#[test] +fn trans_local() { + let tm = Tm { + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + ..now() + }; + assert_eq!(parse_rfc3339(tm.strftime("%Y-%m-%dT%H:%M:%S.%f%z") + .unwrap() + .to_string()), + Ok(tm)); +} + +#[test] +fn trans_utc() { + let tm = Tm { + tm_wday: 0, + tm_yday: 0, + tm_isdst: 0, + ..now_utc() + }; + assert_eq!(parse_rfc3339(tm.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + .unwrap() + .to_string()), + Ok(tm)); +}