diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..48b8bf90 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +vendor/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..dba13ed2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..9525c564 --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +all: + gometalinter ./... --deadline 1m | grep -v '.*\.pb\..*' diff --git a/README.md b/README.md new file mode 100644 index 00000000..e0bd577a --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +# Brig: Ship your data around the world + +![a brig](http://www.rodlangton.com/nnimages/brig1.jpg) + +``brig`` is a distributed & secure file synchronization tool (and more!) +It is based on ``ipfs``. Here's a list of what it can do: + +- Transfer data. +- Synchronize data. +- ... TODO ... + + +This is a very early work in progress. +More information will follow once a rough first prototype is ready. diff --git a/cmd/cli.go b/cmd/cli.go new file mode 100644 index 00000000..c9d0b0f7 --- /dev/null +++ b/cmd/cli.go @@ -0,0 +1,627 @@ +package cmdline + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig" + "github.com/disorganizer/brig/daemon" + "github.com/disorganizer/brig/fuse" + "github.com/disorganizer/brig/repo" + "github.com/disorganizer/brig/repo/config" + "github.com/disorganizer/brig/util/colors" + colorlog "github.com/disorganizer/brig/util/log" + yamlConfig "github.com/olebedev/config" + "github.com/tsuibin/goxmpp2/xmpp" + "github.com/tucnak/climax" +) + +func init() { + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.DebugLevel) + + // Log pretty text + log.SetFormatter(&colorlog.ColorfulLogFormatter{}) +} + +/////////////////////// +// Utility functions // +/////////////////////// + +func formatGroup(category string) string { + return strings.ToUpper(category) + " COMMANDS:" +} + +// guessRepoFolder tries to find the repository path +// by using a number of sources. +func guessRepoFolder() string { + folder := repo.GuessFolder() + if folder == "" { + log.Fatalf("This does not like a brig repository (missing .brig)") + } + + return folder +} + +func readPassword() (string, error) { + repoFolder := guessRepoFolder() + pwd, err := repo.PromptPasswordMaxTries(4, func(pwd string) bool { + err := repo.CheckPassword(repoFolder, pwd) + return err == nil + }) + + return pwd, err +} + +/////////////////////// +// Handler functions // +/////////////////////// + +func handleVersion(ctx climax.Context) int { + fmt.Println(brig.VersionString()) + return 0 +} + +func handleOpen(ctx climax.Context) int { + repoFolder := guessRepoFolder() + pwd, err := readPassword() + + if err != nil { + log.Errorf("Open failed: %v", err) + return 1 + } + + if _, err := daemon.Reach(pwd, repoFolder, 6666); err != nil { + log.Errorf("Unable to start daemon: %v", err) + return 3 + } + return 0 +} + +func handleClose(ctx climax.Context) int { + // This is currently the same as `brig daemon -q` + return handleDaemonQuit() +} + +func handleDaemonPing() int { + client, err := daemon.Dial(6666) + if err != nil { + log.Warning("Unable to dial to daemon: ", err) + return 1 + } + defer client.Close() + + for i := 0; i < 100; i++ { + before := time.Now() + symbol := colors.Colorize("✔", colors.Green) + if !client.Ping() { + symbol = colors.Colorize("✘", colors.Red) + } + + delay := time.Since(before) + + fmt.Printf("#%02d %s ➔ %s: %s (%v)\n", + i+1, + client.LocalAddr().String(), + client.RemoteAddr().String(), + symbol, delay) + time.Sleep(1 * time.Second) + } + + return 0 +} + +func handleDaemonQuit() int { + client, err := daemon.Dial(6666) + if err != nil { + log.Warning("Unable to dial to daemon: ", err) + return 1 + } + defer client.Close() + + client.Exorcise() + return 0 +} + +func handleDaemon(ctx climax.Context) int { + if ctx.Is("ping") { + return handleDaemonPing() + } else if ctx.Is("quit") { + return handleDaemonQuit() + } + + pwd, ok := ctx.Get("password") + if !ok { + var err error + pwd, err = readPassword() + if err != nil { + log.Errorf("Could not read password: %v", pwd) + return 1 + } + } + + repoFolder := guessRepoFolder() + err := repo.CheckPassword(repoFolder, pwd) + if err != nil { + log.Error("Wrong password.") + return 2 + } + + baal, err := daemon.Summon(pwd, repoFolder, 6666) + if err != nil { + log.Warning("Unable to start daemon: ", err) + return 3 + } + + baal.Serve() + return 0 +} + +func handleMount(ctx climax.Context) int { + if len(ctx.Args) == 0 { + fmt.Println("Usage: brig mount [mntpath]") + return 1 + } + + mntpath := ctx.Args[0] + if err := fuse.Mount(mntpath); err != nil { + log.Errorf("Unable to mount: %v", err) + return 2 + } + + return 0 +} + +func handleConfig(ctx climax.Context) int { + folder := guessRepoFolder() + cfgPath := filepath.Join(folder, ".brig", "config") + + cfg, err := config.LoadConfig(cfgPath) + if err != nil { + log.Errorf("Could not load config: %v", err) + return 2 + } + + switch len(ctx.Args) { + case 0: + yaml, err := yamlConfig.RenderYaml(cfg) + if err != nil { + log.Errorf("Unable to render config: %v", err) + return 3 + } + fmt.Println(yaml) + case 1: + key := ctx.Args[0] + value, err := cfg.String(key) + if err != nil { + log.Errorf("Could not retrieve %s: %v", key, err) + return 4 + } + fmt.Println(value) + case 2: + key := ctx.Args[0] + value := ctx.Args[1] + if err := cfg.Set(key, value); err != nil { + log.Errorf("Could not set %s: %v", key, err) + return 5 + } + + if _, err := config.SaveConfig(cfgPath, cfg); err != nil { + log.Errorf("Could not save config: %v", err) + return 6 + } + } + + return 0 +} + +func handleInit(ctx climax.Context) int { + if len(ctx.Args) < 1 { + log.Error("Need your Jabber ID.") + return 1 + } + + jid := xmpp.JID(ctx.Args[0]) + if jid.Domain() == "" { + log.Error("Your JabberID needs a domain.") + return 2 + } + + // Extract the folder from the resource name by default: + folder := jid.Resource() + if folder == "" { + log.Error("Need a resource in your JID.") + return 3 + } + + if envFolder := os.Getenv("BRIG_PATH"); envFolder != "" { + folder = envFolder + } + + if ctx.Is("folder") { + folder, _ = ctx.Get("folder") + } + + pwd, ok := ctx.Get("password") + if !ok { + var err error + pwdBytes, err := repo.PromptNewPassword(40.0) + if err != nil { + log.Error(err) + return 4 + } + + pwd = string(pwdBytes) + } + + repo, err := repo.NewRepository(string(jid), pwd, folder) + if err != nil { + log.Error(err) + return 5 + } + + if err := repo.Close(); err != nil { + log.Errorf("close: %v", err) + return 6 + } + + if !ctx.Is("nodaemon") { + if _, err := daemon.Reach(string(pwd), folder, 6666); err != nil { + log.Errorf("Unable to start daemon: %v", err) + return 7 + } + } + + return 0 +} + +func handleAdd(ctx climax.Context) int { + if len(ctx.Args) < 1 { + log.Errorf("add: Need at least one file.") + return 1 + } + + // TODO: Start daemon if necessary. + client, err := daemon.Dial(6666) + if err != nil { + log.Warning("Unable to dial to daemon: ", err) + return 1 + } + defer client.Close() + + for _, path := range ctx.Args { + absPath, err := filepath.Abs(path) + if err != nil { + log.Errorf("Unable to make abs path: %v: %v", path, err) + continue + } + + hash, err := client.Add(absPath) + if err != nil { + log.Errorf("Could not add file: %v: %v", absPath, err) + return 3 + } + + fmt.Println("%s\n", hash.B58String()) + } + + return 0 +} + +func handleCat(ctx climax.Context) int { + if len(ctx.Args) < 2 { + log.Errorf("cat: Need at least src and dest file.") + return 1 + } + + // TODO: Start daemon if necessary. + client, err := daemon.Dial(6666) + if err != nil { + log.Warning("Unable to dial to daemon: ", err) + return 2 + } + defer client.Close() + + dstPath, err := filepath.Abs(ctx.Args[0]) + absPath, err := filepath.Abs(ctx.Args[1]) + if err != nil { + log.Errorf("Unable to make abs path: %v: %v", absPath, err) + return 3 + } + + newPath, err := client.Cat(dstPath, absPath) + if err != nil { + log.Errorf("Could not cat file: %v: %v", absPath, err) + return 4 + } + + fmt.Printf("%s\n", newPath) + return 0 +} + +//////////////////////////// +// Commandline definition // +//////////////////////////// + +// RunCmdline starts a brig commandline tool. +func RunCmdline() int { + demo := climax.New("brig") + demo.Brief = "brig is a decentralized file syncer based on IPFS and XMPP." + demo.Version = "unstable" + + repoGroup := demo.AddGroup(formatGroup("repository")) + xmppGroup := demo.AddGroup(formatGroup("xmpp helper")) + wdirGroup := demo.AddGroup(formatGroup("working")) + advnGroup := demo.AddGroup(formatGroup("advanced")) + miscGroup := demo.AddGroup(formatGroup("misc")) + + commands := []climax.Command{ + climax.Command{ + Name: "init", + Brief: "Initialize an empty repository and open it", + Group: repoGroup, + Usage: ` []`, + Help: `Create an empty repository, open it and associate it with the JID`, + Flags: []climax.Flag{ + { + Name: "depth", + Short: "o", + Usage: `--depth="N"`, + Help: `Only clone up to this depth of pinned files`, + Variable: true, + }, { + Name: "nodaemon", + Short: "n", + Help: `Do not start the daemon.`, + }, { + Name: "password", + Short: "x", + Usage: `--password PWD`, + Help: `Supply password.`, + Variable: true, + }, + }, + Examples: []climax.Example{ + { + Usecase: `alice@jabber.de/laptop`, + Description: `Create a folder laptop/ with hidden directories`, + }, + }, + Handle: func(ctx climax.Context) int { + return handleInit(ctx) + }, + }, + climax.Command{ + Name: "clone", + Brief: "Clone an repository from somebody else", + Group: repoGroup, + Usage: ` []`, + Help: `...`, + Flags: []climax.Flag{ + { + Name: "--depth", + Short: "d", + Usage: `--depth="N"`, + Help: `Only clone up to this depth of pinned files`, + Variable: true, + }, + }, + Examples: []climax.Example{ + { + Usecase: `alice@jabber.de/laptop bob@jabber.de/desktop`, + Description: `Clone Alice' contents`, + }, + }, + Handle: func(ctx climax.Context) int { + // TODO: Utils to convert string to int. + // TODO: Utils to get default value. + depth, ok := ctx.Get("--depth") + if !ok { + depth = "-1" + } + + fmt.Println(depth) + return 0 + }, + }, + climax.Command{ + Name: "open", + Group: repoGroup, + Brief: "Open an encrypted port. Asks for passphrase.", + Handle: handleOpen, + }, + climax.Command{ + Name: "close", + Group: repoGroup, + Brief: "Encrypt all metadata in the port and go offline.", + Handle: handleClose, + }, + climax.Command{ + Name: "sync", + Group: repoGroup, + Brief: "Sync with all or selected trusted peers.", + }, + climax.Command{ + Name: "push", + Group: repoGroup, + Brief: "Push your content to all or selected trusted peers.", + }, + climax.Command{ + Name: "pull", + Group: repoGroup, + Brief: "Pull content from all or selected trusted peers.", + }, + climax.Command{ + Name: "watch", + Group: repoGroup, + Brief: "Enable or disable watch mode.", + }, + climax.Command{ + Name: "discover", + Group: xmppGroup, + Brief: "Try to find other brig users near you.", + }, + climax.Command{ + Name: "friends", + Group: xmppGroup, + Brief: "List your trusted peers.", + }, + climax.Command{ + Name: "beg", + Group: xmppGroup, + Brief: "Request authorisation from a buddy.", + }, + climax.Command{ + Name: "ban", + Group: xmppGroup, + Brief: "Discontinue friendship with a peer.", + }, + climax.Command{ + Name: "prio", + Group: xmppGroup, + Brief: "Change priority of a peer.", + }, + climax.Command{ + Name: "status", + Group: wdirGroup, + Brief: "Give an overview of brig's current state.", + }, + climax.Command{ + Name: "add", + Group: wdirGroup, + Brief: "Make file to be managed by brig.", + Usage: `FILE_OR_FOLDER [FILE_OR_FOLDER ...]`, + Help: `TODO`, + Handle: handleAdd, + }, + climax.Command{ + Name: "get", + Group: wdirGroup, + Brief: "Write ", + Usage: `FILE_OR_FOLDER DEST_PATH`, + Help: `TODO`, + Handle: handleCat, + }, + climax.Command{ + Name: "find", + Group: wdirGroup, + Brief: "Find filenames in the fleet.", + }, + climax.Command{ + Name: "rm", + Group: wdirGroup, + Brief: "Remove file from brig's control.", + }, + climax.Command{ + Name: "log", + Group: wdirGroup, + Brief: "Visualize changelog tree.", + }, + climax.Command{ + Name: "checkout", + Group: wdirGroup, + Brief: "Attempt to checkout previous version of a file.", + }, + climax.Command{ + Name: "fsck", + Group: advnGroup, + Brief: "Verify, and possibly fix, broken files.", + }, + climax.Command{ + Name: "daemon", + Group: advnGroup, + Brief: "Manually run the daemon process.", + Flags: []climax.Flag{ + { + Name: "ping", + Short: "p", + Usage: `--ping`, + Help: `Ping the dameon to check if it's running.`, + }, + { + Name: "quit", + Short: "q", + Usage: `--quit`, + Help: `Kill a running daemon.`, + }, + { + Name: "password", + Short: "x", + Usage: `--password PWD`, + Help: `Supply password.`, + Variable: true, + }, + }, + Handle: handleDaemon, + }, + climax.Command{ + Name: "passwd", + Group: advnGroup, + Brief: "Set your XMPP and access password.", + }, + climax.Command{ + Name: "yubi", + Group: advnGroup, + Brief: "Manage YubiKeys.", + }, + climax.Command{ + Name: "config", + Group: miscGroup, + Brief: "Access, list and modify configuration values.", + Handle: handleConfig, + }, + climax.Command{ + Name: "mount", + Group: miscGroup, + Brief: "Handle FUSE mountpoints.", + Handle: handleMount, + }, + climax.Command{ + Name: "update", + Group: miscGroup, + Brief: "Try to securely update brig.", + }, + climax.Command{ + Name: "help", + Group: miscGroup, + Brief: "Print some help", + Usage: "Did you really need help on help?", + }, + climax.Command{ + Name: "version", + Group: miscGroup, + Brief: "Print current version.", + Usage: "Print current version.", + Handle: handleVersion, + }, + } + + for _, command := range commands { + demo.AddCommand(command) + } + + // Help topics: + demo.AddTopic(climax.Topic{ + Name: "quickstart", + Brief: "A very short introduction to brig", + Text: "TODO: write.", + }) + demo.AddTopic(climax.Topic{ + Name: "tutorial", + Brief: "A slightly longer introduction.", + Text: "TODO: write.", + }) + demo.AddTopic(climax.Topic{ + Name: "terms", + Brief: "Cheat sheet for often used terms.", + Text: "TODO: write.", + }) + + return demo.Run() +} diff --git a/cmd/main/.gitignore b/cmd/main/.gitignore new file mode 100644 index 00000000..096a3169 --- /dev/null +++ b/cmd/main/.gitignore @@ -0,0 +1,2 @@ +main +brig diff --git a/cmd/main/brig.go b/cmd/main/brig.go new file mode 100644 index 00000000..624d7433 --- /dev/null +++ b/cmd/main/brig.go @@ -0,0 +1,10 @@ +package main + +import ( + "github.com/disorganizer/brig/cmd" + "os" +) + +func main() { + os.Exit(cmdline.RunCmdline()) +} diff --git a/cmd/main/test_add.sh b/cmd/main/test_add.sh new file mode 100755 index 00000000..695c79c2 --- /dev/null +++ b/cmd/main/test_add.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +export BRIG_PATH=/tmp/alice +pkill -f brig +rm -rf $BRIG_PATH +echo "=== INIT ===" +./brig init alice@jabber.de/home -x hello_password --nodaemon +echo "=== DAEMON ===" +./brig daemon -x hello_password +echo "=== FINISH ===" diff --git a/daemon/Makefile b/daemon/Makefile new file mode 100644 index 00000000..7e5867aa --- /dev/null +++ b/daemon/Makefile @@ -0,0 +1,2 @@ +all: + protoc --proto_path=. --gofast_out proto daemon.proto diff --git a/daemon/client.go b/daemon/client.go new file mode 100644 index 00000000..0a1a3224 --- /dev/null +++ b/daemon/client.go @@ -0,0 +1,207 @@ +package daemon + +import ( + "fmt" + "io" + "net" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/VividCortex/godaemon" + "github.com/disorganizer/brig/daemon/proto" + "github.com/disorganizer/brig/util/tunnel" + protobuf "github.com/gogo/protobuf/proto" + "github.com/jbenet/go-multihash" +) + +// Client is the client API to brigd. +type Client struct { + // Use this channel to send commands to the daemon + Send chan *proto.Command + + // Responses are sent to this channel + Recv chan *proto.Response + + // Underlying tcp connection: + conn net.Conn + + // Be able to tell handleMessages to stop + quit chan bool +} + +// Dial connects to a running daemon instance. +func Dial(port int) (*Client, error) { + client := &Client{ + Send: make(chan *proto.Command), + Recv: make(chan *proto.Response), + quit: make(chan bool, 1), + } + + addr := fmt.Sprintf("127.0.0.1:%d", port) + conn, err := net.Dial("tcp", addr) + if err != nil { + return nil, err + } + + client.conn = conn + tnl, err := tunnel.NewEllipticTunnel(conn) + if err != nil { + log.Error("Tunneling failed: ", err) + return nil, err + } + + go client.handleMessages(tnl) + + client.Ping() + return client, nil +} + +// handleMessages takes all messages from the Send channel +// and actually sends them over the network. It then waits +// for the response and puts it in the Recv channel. +func (c *Client) handleMessages(tnl io.ReadWriter) { + for { + select { + case <-c.quit: + return + case msg := <-c.Send: + if err := send(tnl, msg); err != nil { + log.Warning("client-send: ", err) + c.Recv <- nil + continue + } + + resp := &proto.Response{} + if err := recv(tnl, resp); err != nil { + log.Warning("client-recv: ", err) + c.Recv <- nil + continue + } + + c.Recv <- resp + } + } +} + +// Reach tries to Dial() the daemon, if not there it Launch()'es one. +func Reach(pwd, repoPath string, port int) (*Client, error) { + // Try to Dial directly first: + if client, err := Dial(port); err == nil { + return client, nil + } + + // Probably not running, find out our own binary: + exePath, err := godaemon.GetExecutablePath() + if err != nil { + return nil, err + } + + // Start a new daemon process: + log.Info("Starting daemon: ", exePath) + proc, err := os.StartProcess( + exePath, + []string{"brig", "daemon", "-x", pwd}, + &os.ProcAttr{}, + ) + + if err != nil { + return nil, err + } + + // Make sure it it's still referenced: + go func() { + log.Info("Daemon has PID: ", proc.Pid) + if _, err := proc.Wait(); err != nil { + log.Warning("Bad exit state: ", err) + } + }() + + // Wait at max 15 seconds for the daemon to start up: + // (this means, wait till it's network interface is started) + for i := 0; i < 15; i++ { + client, err := Dial(port) + fmt.Println("Try dial", client) + if err != nil { + time.Sleep(1 * time.Second) + continue + } + + return client, nil + } + + return nil, fmt.Errorf("Daemon could not be started or took to long.") +} + +// Ping returns true if the daemon is running and responds correctly. +func (c *Client) Ping() bool { + cmd := &proto.Command{} + cmd.CommandType = proto.MessageType_PING.Enum() + + c.Send <- cmd + resp := <-c.Recv + if resp != nil { + return "PONG" == resp.GetResponse() + } + + return false +} + +// Exorcise sends a QUIT message to the daemon. +func (c *Client) Exorcise() { + cmd := &proto.Command{} + cmd.CommandType = proto.MessageType_QUIT.Enum() + c.Send <- cmd + <-c.Recv +} + +// Close shuts down the daemon client +func (c *Client) Close() { + if c != nil { + c.quit <- true + c.conn.Close() + } +} + +// LocalAddr returns a net.Addr with the client end of the Connection +func (c *Client) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns a net.Addr with the server end of the Connection +func (c *Client) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *Client) Add(absPath string) (multihash.Multihash, error) { + c.Send <- &proto.Command{ + CommandType: proto.MessageType_ADD.Enum(), + AddCommand: &proto.Command_AddCmd{ + FilePath: protobuf.String(absPath), + }, + } + + resp := <-c.Recv + if resp != nil && !resp.GetSuccess() { + return nil, fmt.Errorf("client: add: %v", resp.GetError()) + } + + return multihash.FromB58String(resp.GetResponse()) +} + +func (c *Client) Cat(name, destPath string) (string, error) { + c.Send <- &proto.Command{ + CommandType: proto.MessageType_CAT.Enum(), + CatCommand: &proto.Command_CatCmd{ + DestPath: protobuf.String(destPath), + FilePath: protobuf.String(name), + }, + } + + resp := <-c.Recv + if resp != nil && !resp.GetSuccess() { + return "", fmt.Errorf("client: cat: %v", resp.GetError()) + } + + return resp.GetResponse(), nil +} diff --git a/daemon/common.go b/daemon/common.go new file mode 100644 index 00000000..421510b1 --- /dev/null +++ b/daemon/common.go @@ -0,0 +1,67 @@ +package daemon + +import ( + "encoding/binary" + "fmt" + "io" + + protobuf "github.com/gogo/protobuf/proto" +) + +// send transports a msg over conn with a size header. +func send(conn io.Writer, msg protobuf.Message) error { + data, err := protobuf.Marshal(msg) + if err != nil { + return err + } + + sizeBuf := make([]byte, binary.MaxVarintLen64) + binary.PutUvarint(sizeBuf, uint64(len(data))) + + n, err := conn.Write(sizeBuf) + if err != nil { + return err + } + + if n < len(sizeBuf) { + return io.ErrShortWrite + } + + n, err = conn.Write(data) + if err != nil { + return err + } + + if n < len(data) { + return io.ErrShortWrite + } + + return nil +} + +// recv reads a size-prefixed protobuf buffer +func recv(conn io.Reader, msg protobuf.Message) error { + sizeBuf := make([]byte, binary.MaxVarintLen64) + n, err := conn.Read(sizeBuf) + if err != nil { + return err + } + + size, _ := binary.Uvarint(sizeBuf[:n]) + if size > 1*1024*1024 { + return fmt.Errorf("Message too large: %d", size) + } + + buf := make([]byte, size) + _, err = conn.Read(buf) + if err != nil { + return err + } + + err = protobuf.Unmarshal(buf, msg) + if err != nil { + return err + } + + return nil +} diff --git a/daemon/daemon.proto b/daemon/daemon.proto new file mode 100644 index 00000000..0bb034fd --- /dev/null +++ b/daemon/daemon.proto @@ -0,0 +1,40 @@ +package daemon.protocol; +option go_package = "proto"; + +enum MessageType { + ADD = 0; + CAT = 1; + PING = 2; + QUIT = 3; +} + +message Command { + message AddCmd { + required string file_path = 1; + } + + message CatCmd { + required string file_path = 1; + required string dest_path = 2; + } + + message PingCmd { + } + + message QuitCmd { + } + + required MessageType command_type = 1; + + optional AddCmd add_command = 2; + optional CatCmd cat_command = 3; + optional PingCmd ping_command = 4; + optional QuitCmd quit_command = 5; +} + +message Response { + required MessageType response_type = 1; + required bool success = 3; + optional string response = 2; + optional string error = 4; +} diff --git a/daemon/proto/daemon.pb.go b/daemon/proto/daemon.pb.go new file mode 100644 index 00000000..87e16d8c --- /dev/null +++ b/daemon/proto/daemon.pb.go @@ -0,0 +1,1369 @@ +// Code generated by protoc-gen-gogo. +// source: daemon.proto +// DO NOT EDIT! + +/* + Package proto is a generated protocol buffer package. + + It is generated from these files: + daemon.proto + + It has these top-level messages: + Command + Response +*/ +package proto + +import proto1 "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_golang_protobuf_proto "github.com/golang/protobuf/proto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto1.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type MessageType int32 + +const ( + MessageType_ADD MessageType = 0 + MessageType_CAT MessageType = 1 + MessageType_PING MessageType = 2 + MessageType_QUIT MessageType = 3 +) + +var MessageType_name = map[int32]string{ + 0: "ADD", + 1: "CAT", + 2: "PING", + 3: "QUIT", +} +var MessageType_value = map[string]int32{ + "ADD": 0, + "CAT": 1, + "PING": 2, + "QUIT": 3, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto1.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto1.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} + +type Command struct { + CommandType *MessageType `protobuf:"varint,1,req,name=command_type,enum=daemon.protocol.MessageType" json:"command_type,omitempty"` + AddCommand *Command_AddCmd `protobuf:"bytes,2,opt,name=add_command" json:"add_command,omitempty"` + CatCommand *Command_CatCmd `protobuf:"bytes,3,opt,name=cat_command" json:"cat_command,omitempty"` + PingCommand *Command_PingCmd `protobuf:"bytes,4,opt,name=ping_command" json:"ping_command,omitempty"` + QuitCommand *Command_QuitCmd `protobuf:"bytes,5,opt,name=quit_command" json:"quit_command,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Command) Reset() { *m = Command{} } +func (m *Command) String() string { return proto1.CompactTextString(m) } +func (*Command) ProtoMessage() {} + +func (m *Command) GetCommandType() MessageType { + if m != nil && m.CommandType != nil { + return *m.CommandType + } + return MessageType_ADD +} + +func (m *Command) GetAddCommand() *Command_AddCmd { + if m != nil { + return m.AddCommand + } + return nil +} + +func (m *Command) GetCatCommand() *Command_CatCmd { + if m != nil { + return m.CatCommand + } + return nil +} + +func (m *Command) GetPingCommand() *Command_PingCmd { + if m != nil { + return m.PingCommand + } + return nil +} + +func (m *Command) GetQuitCommand() *Command_QuitCmd { + if m != nil { + return m.QuitCommand + } + return nil +} + +type Command_AddCmd struct { + FilePath *string `protobuf:"bytes,1,req,name=file_path" json:"file_path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Command_AddCmd) Reset() { *m = Command_AddCmd{} } +func (m *Command_AddCmd) String() string { return proto1.CompactTextString(m) } +func (*Command_AddCmd) ProtoMessage() {} + +func (m *Command_AddCmd) GetFilePath() string { + if m != nil && m.FilePath != nil { + return *m.FilePath + } + return "" +} + +type Command_CatCmd struct { + FilePath *string `protobuf:"bytes,1,req,name=file_path" json:"file_path,omitempty"` + DestPath *string `protobuf:"bytes,2,req,name=dest_path" json:"dest_path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Command_CatCmd) Reset() { *m = Command_CatCmd{} } +func (m *Command_CatCmd) String() string { return proto1.CompactTextString(m) } +func (*Command_CatCmd) ProtoMessage() {} + +func (m *Command_CatCmd) GetFilePath() string { + if m != nil && m.FilePath != nil { + return *m.FilePath + } + return "" +} + +func (m *Command_CatCmd) GetDestPath() string { + if m != nil && m.DestPath != nil { + return *m.DestPath + } + return "" +} + +type Command_PingCmd struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Command_PingCmd) Reset() { *m = Command_PingCmd{} } +func (m *Command_PingCmd) String() string { return proto1.CompactTextString(m) } +func (*Command_PingCmd) ProtoMessage() {} + +type Command_QuitCmd struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Command_QuitCmd) Reset() { *m = Command_QuitCmd{} } +func (m *Command_QuitCmd) String() string { return proto1.CompactTextString(m) } +func (*Command_QuitCmd) ProtoMessage() {} + +type Response struct { + ResponseType *MessageType `protobuf:"varint,1,req,name=response_type,enum=daemon.protocol.MessageType" json:"response_type,omitempty"` + Success *bool `protobuf:"varint,3,req,name=success" json:"success,omitempty"` + Response *string `protobuf:"bytes,2,opt,name=response" json:"response,omitempty"` + Error *string `protobuf:"bytes,4,opt,name=error" json:"error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto1.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponseType() MessageType { + if m != nil && m.ResponseType != nil { + return *m.ResponseType + } + return MessageType_ADD +} + +func (m *Response) GetSuccess() bool { + if m != nil && m.Success != nil { + return *m.Success + } + return false +} + +func (m *Response) GetResponse() string { + if m != nil && m.Response != nil { + return *m.Response + } + return "" +} + +func (m *Response) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func init() { + proto1.RegisterType((*Command)(nil), "daemon.protocol.Command") + proto1.RegisterType((*Command_AddCmd)(nil), "daemon.protocol.Command.AddCmd") + proto1.RegisterType((*Command_CatCmd)(nil), "daemon.protocol.Command.CatCmd") + proto1.RegisterType((*Command_PingCmd)(nil), "daemon.protocol.Command.PingCmd") + proto1.RegisterType((*Command_QuitCmd)(nil), "daemon.protocol.Command.QuitCmd") + proto1.RegisterType((*Response)(nil), "daemon.protocol.Response") + proto1.RegisterEnum("daemon.protocol.MessageType", MessageType_name, MessageType_value) +} +func (m *Command) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Command) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CommandType == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0x8 + i++ + i = encodeVarintDaemon(data, i, uint64(*m.CommandType)) + } + if m.AddCommand != nil { + data[i] = 0x12 + i++ + i = encodeVarintDaemon(data, i, uint64(m.AddCommand.Size())) + n1, err := m.AddCommand.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.CatCommand != nil { + data[i] = 0x1a + i++ + i = encodeVarintDaemon(data, i, uint64(m.CatCommand.Size())) + n2, err := m.CatCommand.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PingCommand != nil { + data[i] = 0x22 + i++ + i = encodeVarintDaemon(data, i, uint64(m.PingCommand.Size())) + n3, err := m.PingCommand.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.QuitCommand != nil { + data[i] = 0x2a + i++ + i = encodeVarintDaemon(data, i, uint64(m.QuitCommand.Size())) + n4, err := m.QuitCommand.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Command_AddCmd) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Command_AddCmd) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FilePath == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0xa + i++ + i = encodeVarintDaemon(data, i, uint64(len(*m.FilePath))) + i += copy(data[i:], *m.FilePath) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Command_CatCmd) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Command_CatCmd) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FilePath == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0xa + i++ + i = encodeVarintDaemon(data, i, uint64(len(*m.FilePath))) + i += copy(data[i:], *m.FilePath) + } + if m.DestPath == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0x12 + i++ + i = encodeVarintDaemon(data, i, uint64(len(*m.DestPath))) + i += copy(data[i:], *m.DestPath) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Command_PingCmd) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Command_PingCmd) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Command_QuitCmd) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Command_QuitCmd) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Response) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Response) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResponseType == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0x8 + i++ + i = encodeVarintDaemon(data, i, uint64(*m.ResponseType)) + } + if m.Response != nil { + data[i] = 0x12 + i++ + i = encodeVarintDaemon(data, i, uint64(len(*m.Response))) + i += copy(data[i:], *m.Response) + } + if m.Success == nil { + return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + } else { + data[i] = 0x18 + i++ + if *m.Success { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.Error != nil { + data[i] = 0x22 + i++ + i = encodeVarintDaemon(data, i, uint64(len(*m.Error))) + i += copy(data[i:], *m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Daemon(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Daemon(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintDaemon(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Command) Size() (n int) { + var l int + _ = l + if m.CommandType != nil { + n += 1 + sovDaemon(uint64(*m.CommandType)) + } + if m.AddCommand != nil { + l = m.AddCommand.Size() + n += 1 + l + sovDaemon(uint64(l)) + } + if m.CatCommand != nil { + l = m.CatCommand.Size() + n += 1 + l + sovDaemon(uint64(l)) + } + if m.PingCommand != nil { + l = m.PingCommand.Size() + n += 1 + l + sovDaemon(uint64(l)) + } + if m.QuitCommand != nil { + l = m.QuitCommand.Size() + n += 1 + l + sovDaemon(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Command_AddCmd) Size() (n int) { + var l int + _ = l + if m.FilePath != nil { + l = len(*m.FilePath) + n += 1 + l + sovDaemon(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Command_CatCmd) Size() (n int) { + var l int + _ = l + if m.FilePath != nil { + l = len(*m.FilePath) + n += 1 + l + sovDaemon(uint64(l)) + } + if m.DestPath != nil { + l = len(*m.DestPath) + n += 1 + l + sovDaemon(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Command_PingCmd) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Command_QuitCmd) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Response) Size() (n int) { + var l int + _ = l + if m.ResponseType != nil { + n += 1 + sovDaemon(uint64(*m.ResponseType)) + } + if m.Response != nil { + l = len(*m.Response) + n += 1 + l + sovDaemon(uint64(l)) + } + if m.Success != nil { + n += 2 + } + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovDaemon(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDaemon(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDaemon(x uint64) (n int) { + return sovDaemon(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Command) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Command: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Command: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommandType", wireType) + } + var v MessageType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CommandType = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AddCommand == nil { + m.AddCommand = &Command_AddCmd{} + } + if err := m.AddCommand.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CatCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CatCommand == nil { + m.CatCommand = &Command_CatCmd{} + } + if err := m.CatCommand.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingCommand == nil { + m.PingCommand = &Command_PingCmd{} + } + if err := m.PingCommand.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuitCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QuitCommand == nil { + m.QuitCommand = &Command_QuitCmd{} + } + if err := m.QuitCommand.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Command_AddCmd) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCmd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCmd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.FilePath = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Command_CatCmd) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CatCmd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CatCmd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.FilePath = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.DestPath = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + if hasFields[0]&uint64(0x00000002) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Command_PingCmd) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingCmd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingCmd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Command_QuitCmd) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuitCmd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuitCmd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseType", wireType) + } + var v MessageType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseType = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Response = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Success = &b + hasFields[0] |= uint64(0x00000002) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDaemon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDaemon + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDaemon(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDaemon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + if hasFields[0]&uint64(0x00000002) == 0 { + return new(github_com_golang_protobuf_proto.RequiredNotSetError) + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDaemon(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDaemon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDaemon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDaemon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDaemon + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDaemon + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDaemon(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDaemon = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDaemon = fmt.Errorf("proto: integer overflow") +) diff --git a/daemon/server.go b/daemon/server.go new file mode 100644 index 00000000..12b4bec1 --- /dev/null +++ b/daemon/server.go @@ -0,0 +1,276 @@ +package daemon + +import ( + "fmt" + "io" + "net" + "os" + "os/exec" + "os/signal" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/daemon/proto" + "github.com/disorganizer/brig/im" + "github.com/disorganizer/brig/repo" + "github.com/disorganizer/brig/util/tunnel" + protobuf "github.com/gogo/protobuf/proto" + "golang.org/x/net/context" +) + +const ( + // MaxConnections is the upper limit of clients that may connect to a daemon + // at the same time. Other client will wait in Accept(). + MaxConnections = 20 +) + +// This is just here to make the maxConnections prettier. +type allowOneConn struct{} + +// Server is a TCP server that executed all commands +// on a single repository. Once the daemon is started, it +// attempts to open the repository, for which a password is needed. +type Server struct { + // The repo we're working on + Repo *repo.Repository + + // XMPP is the control client to the outside world. + XMPP *im.Client + + // Handle to `ipfs daemon` + ipfsDaemon *exec.Cmd + + // signals (external and self triggered) arrive on this channel. + signals chan os.Signal + + // Root context for this daemon + ctx context.Context + + // TCP Listener for incoming connections: + listener net.Listener + + // buffered channel with N places, + // every active connection holds one. + maxConnections chan allowOneConn +} + +// Summon creates a new up and running Server instance +func Summon(pwd, repoFolder string, port int) (*Server, error) { + // Load the on-disk repository: + log.Infof("Opening repo: %s", repoFolder) + repository, err := repo.Open(pwd, repoFolder) + if err != nil { + log.Error("Could not load repository: ", err) + return nil, err + } + + // TODO: Uncomment later. + // proc, err := ipfsutil.StartDaemon(repository.Store.IpfsCtx) + // if err != nil { + // log.Error("Unable to start ipfs daemon: ", err) + // return nil, err + // } + + // Listen for incoming connections. + addr := fmt.Sprintf("localhost:%d", port) + listener, err := net.Listen("tcp", addr) + if err != nil { + log.Error("Error listening:", err.Error()) + return nil, err + } + + // TODO: Uncomment later + // xmppClient, err := im.NewClient( + // &im.Config{ + // Jid: xmpp.JID(repository.Jid), + // Password: pwd, + // TLSConfig: tls.Config{ + // ServerName: xmpp.JID(repository.Jid).Domain(), + // }, + // KeyPath: filepath.Join(repository.InternalFolder, "otr.key"), + // FingerprintStorePath: filepath.Join(repository.InternalFolder, "otr.buddies"), + // }, + // ) + // if err != nil { + // return nil, err + // } + + // Close the listener when the application closes. + log.Info("Listening on ", addr) + + ctx, cancel := context.WithCancel(context.Background()) + daemon := &Server{ + Repo: repository, + // XMPP: xmppClient, + signals: make(chan os.Signal, 1), + listener: listener, + // ipfsDaemon: proc, + maxConnections: make(chan allowOneConn, MaxConnections), + ctx: ctx, + } + + go daemon.loop(cancel) + return daemon, nil +} + +// Serve waits until the Server received a quit reason. +func (d *Server) Serve() { + fmt.Println("Serving... ") + <-d.ctx.Done() + fmt.Println("Serving done... ") + d.listener.Close() + + if d.ipfsDaemon != nil { + if err := d.ipfsDaemon.Process.Kill(); err != nil { + log.Errorf("Unable to kill off ipfs daemon: %v", err) + } + } + + if err := d.Repo.Close(); err != nil { + log.Errorf("Unable to close repository: %v", err) + } +} + +// Handle incoming connections: +func (d *Server) loop(cancel context.CancelFunc) { + // Forward signals to the signals channel: + signal.Notify(d.signals, os.Interrupt, os.Kill) + + // Reserve at least cap(d.maxConnections) + for i := 0; i < cap(d.maxConnections); i++ { + d.maxConnections <- allowOneConn{} + } + + for { + select { + case <-d.signals: + // Break the Serve() loop + cancel() + return + case <-d.maxConnections: + // Listen for an incoming connection. + deadline := time.Now().Add(500 * time.Millisecond) + err := d.listener.(*net.TCPListener).SetDeadline(deadline) + if err != nil { + log.Errorf("BUG: SetDeadline failed: %v", err) + return + } + + conn, err := d.listener.Accept() + if err != nil && err.(*net.OpError).Timeout() { + d.maxConnections <- allowOneConn{} + continue + } + + if err != nil { + log.Errorf("Error in Accept(): %v", err) + return + } + + // Handle connections in a new goroutine. + go d.handleRequest(d.ctx, conn) + default: + log.Infof("Max number of connections hit: %d", cap(d.maxConnections)) + time.Sleep(500 * time.Millisecond) + } + } +} + +// Handles incoming requests: +func (d *Server) handleRequest(ctx context.Context, conn net.Conn) { + defer conn.Close() + + // Make sure this connection count gets released + defer func() { + d.maxConnections <- allowOneConn{} + }() + + tnl, err := tunnel.NewEllipticTunnel(conn) + if err != nil { + log.Error("Tunnel failed", err) + return + } + + // Loop until client disconnect or dies otherwise: + for { + msg := &proto.Command{} + if err := recv(tnl, msg); err != nil { + if err != io.EOF { + log.Warning("daemon-recv: ", err) + } + return + } + + log.Infof("recv: %s: %v", conn.RemoteAddr().String(), msg) + d.handleCommand(ctx, msg, tnl) + } +} + +// Handles the actual incoming commands: +func (d *Server) handleCommand(ctx context.Context, cmd *proto.Command, conn io.ReadWriter) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Prepare a response template + resp := &proto.Response{ + ResponseType: cmd.CommandType, + Success: protobuf.Bool(false), + } + + switch *(cmd.CommandType) { + case proto.MessageType_ADD: + d.handleAddCommand(ctx, cmd, resp) + case proto.MessageType_CAT: + d.handleCatCommand(ctx, cmd, resp) + case proto.MessageType_QUIT: + resp.Response = protobuf.String("BYE") + resp.Success = protobuf.Bool(true) + d.signals <- os.Interrupt + case proto.MessageType_PING: + resp.Response = protobuf.String("PONG") + resp.Success = protobuf.Bool(true) + default: + fmt.Println("Unknown message type.") + return + } + + if err := send(conn, resp); err != nil { + log.Warning("Unable to send message back to client: ", err) + } +} + +func (d *Server) handleAddCommand(ctx context.Context, cmd *proto.Command, resp *proto.Response) { + path := cmd.GetAddCommand().GetFilePath() + fd, err := os.Open(path) + if err != nil { + resp.Error = protobuf.String(err.Error()) + return + } + + hash, err := d.Repo.Store.Add(path, fd) + if err != nil { + resp.Error = protobuf.String(err.Error()) + return + } + + resp.Success = protobuf.Bool(true) + resp.Response = protobuf.String(hash.B58String()) +} + +func (d *Server) handleCatCommand(ctx context.Context, cmd *proto.Command, resp *proto.Response) { + destPath := cmd.GetCatCommand().GetDestPath() + fd, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + resp.Error = protobuf.String(err.Error()) + return + } + + srcPath := cmd.GetCatCommand().GetFilePath() + if err := d.Repo.Store.Cat(srcPath, fd); err != nil { + resp.Error = protobuf.String(err.Error()) + return + } + + resp.Success = protobuf.Bool(true) + resp.Response = protobuf.String(srcPath) +} diff --git a/doc.go b/doc.go new file mode 100644 index 00000000..248e3ad7 --- /dev/null +++ b/doc.go @@ -0,0 +1,3 @@ +// Package brig implements a distributed, encrypted & compressed filesystem, +// based on ipfs (for storage and distribution) and xmpp (for authentication). +package brig diff --git a/doc/blog/README.md b/doc/blog/README.md new file mode 100644 index 00000000..e5f6f98a --- /dev/null +++ b/doc/blog/README.md @@ -0,0 +1 @@ +This is just a small devlog. diff --git a/doc/blog/public/404.html b/doc/blog/public/404.html new file mode 100644 index 00000000..187b4e38 --- /dev/null +++ b/doc/blog/public/404.html @@ -0,0 +1,51 @@ + + + + + + + + + + + + +404 Page not found - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ + +
+ +

+ [404] + # _ +

+ +

+ not found +

+ +
+ + +
+ + + + + + diff --git a/doc/blog/public/css/style-cssnext.css b/doc/blog/public/css/style-cssnext.css new file mode 100644 index 00000000..96d12d08 --- /dev/null +++ b/doc/blog/public/css/style-cssnext.css @@ -0,0 +1,494 @@ +/* Base16 Eighties Colorscheme by Chris Kempson (http://chriskempson.com) */ + +:root { + --base00: #2d2d2d; + --base01: #393939; + --base02: #515151; + --base03: #747369; + --base04: #a09f93; + --base05: #d3d0c8; + --base06: #e8e6df; + --base07: #f2f0ec; + --base08: #f2777a; + --base09: #f99157; + --base0a: #ffcc66; + --base0b: #99cc99; + --base0c: #66cccc; + --base0d: #6699cc; + --base0e: #cc99cc; + --base0f: #d27b53; +} + +.base00 { color: var(--base00); } +.base01 { color: var(--base01); } +.base02 { color: var(--base02); } +.base03 { color: var(--base03); } +.base04 { color: var(--base04); } +.base05 { color: var(--base05); } +.base06 { color: var(--base06); } +.base07 { color: var(--base07); } +.base08 { color: var(--base08); } +.base09 { color: var(--base09); } +.base0a { color: var(--base0a); } +.base0b { color: var(--base0b); } +.base0c { color: var(--base0c); } +.base0d { color: var(--base0d); } +.base0e { color: var(--base0e); } +.base0f { color: var(--base0f); } + +@import url(https://fonts.googleapis.com/css?family=Roboto+Mono:400,700); +@custom-media --breakpoint-md (max-width: 52em); + +/* General Page Layout */ + +body { + margin: 0; + background-color: var(--base00); + color: var(--base07); + line-height: 1.5; + font-size: 100%; + font-family: 'Source Code Pro', monospace; +} + +.container { + max-width: 52em; + margin-left: auto; + margin-right: auto; +} + +@media (--breakpoint-md) { + .container { + width: 100%; + } +} + +article.single section, +.article-list article { + background-color: var(--base07); + color: var(--base00); + padding-left: 8rem; + padding-right: 8rem; + padding-top: 1rem; + padding-bottom: 1rem; +} + +@media (--breakpoint-md) { + article.single section, + .article-list article { + padding-left: 2rem; + padding-right: 2rem; + } +} + +header, footer { + background-color: var(--base01); + padding-top: 1rem; + padding-bottom: 1rem; +} + +header { + margin-top: 1rem; + margin-bottom: 2rem; +} + +@media (--breakpoint-md) { + header { + margin-top: 0; + padding-left: 2rem; + padding-right: 2rem; + } +} + +footer { + margin-top: 2rem; + margin-bottom: 1rem; + text-align: center; + font-size: 0.9em; + color: var(--base03); +} + +@media (--breakpoint-md) { + footer { + margin-top: 0; + margin-bottom: 0; + } +} + +/* Typography */ + +h1, h2, h3, h4, h5, h6 { + font-weight: bold; + line-height: 1.25; + margin-top: 1em; + margin-bottom: .5em; +} + +p { + margin-top: 0; + margin-bottom: 1rem; +} + +h1 { font-size: 2rem } +h2 { font-size: 1.5rem } +h3 { font-size: 1.25rem } +h4 { font-size: 1rem } +h5 { font-size: .875rem } +h6 { font-size: .75rem } + +pre, code { + font-family: inherit; + font-size: inherit; +} + +/* Header Layout */ + +header a.path { + color: var(--base0d); +} + +header span.caret { + color: var(--base07); +} + +/* Footer Layout */ + +footer a { + color: var(--base03); + text-decoration: none; +} + +/* 404 Page Layout */ + +.page-not-found h1 { + text-align: center; + font-size: 5em; +} + +.page-not-found h2 { + text-align: center; + font-size: 3em; + color: var(--base04); + margin-bottom: 4rem; +} + +@media (--breakpoint-md) { + .page-not-found h1 { + font-size: 3em; + } + + .page-not-found h2 { + font-size: 2em; + } +} + +/* Homepage Layout */ + +@media (--breakpoint-md) { + .homepage { + margin-bottom: 2rem; + } +} + +.homepage h1.site-title { + text-align: center; + font-size: 5em; + color: var(--base0c); +} + +@media (--breakpoint-md) { + .homepage h1.site-title { + font-size: 3em; + } +} + +.homepage h1.headline { + font-size: 3em; + color: var(--base0a); +} + +@media (--breakpoint-md) { + .homepage h1.headline { + padding-left: 2rem; + padding-right: 2rem; + } +} + +.homepage .hero-logo img { + width: 100%; +} + +.homepage section.categories, +.homepage section.tags { + padding-left: 2rem; + padding-right: 2rem; +} + +.homepage .category, +.homepage .category a, +.homepage .tag, +.homepage .tag a { + color: var(--base0e); +} + +.homepage .tag { + margin-right: 2em; +} + +/* Post List Layout */ + +.article-list h1.list-title { + font-size: 3em; + color: var(--base0a); +} + +.article-list article { + padding-top: 4rem; + padding-bottom: 4rem; + margin-bottom: 4rem; +} + +.article-list article h2.headline, +.article-list article h2.headline a { + margin-top: 0; + color: var(--base0d); +} + +.article-list article .meta { + margin-bottom: 1rem; +} + +.article-list article .meta .key { + color: var(--base03); +} + +.article-list article .meta .val, +.article-list article .meta .val a { + color: var(--base0e); +} + +.article-list article section.summary a { color: var(--base0f); } + + +/* Single Post Layout */ + +article.single .meta { + font-size: 0.9em; + text-align: right; +} + +article.single .meta .key { + color: var(--base03); +} + +article.single .meta .val, article.single .meta .val a { + color: var(--base0e); +} + +@media (--breakpoint-md) { + article.single .meta { + padding-left: 2rem; + padding-right: 2rem; + } +} + +article.single h1.headline { + margin-top: 0; + font-size: 3em; + color: var(--base0a); +} + +@media (--breakpoint-md) { + article.single h1.headline { + padding-left: 2rem; + padding-right: 2rem; + } +} + +article.single section.body { + padding-top: 4rem; + padding-bottom: 3rem; +} + +@media (--breakpoint-md) { + article.single section.body { + padding-top: 2rem; + padding-bottom: 1rem; + } +} + +/* Highlight Colors */ + +article.single section.body h1 { color: var(--base0d); } +article.single section.body h2 { color: var(--base0b); } +article.single section.body h3 { color: var(--base09); } +article.single section.body h4 { color: var(--base08); } +article.single section.body h5 { color: var(--base02); } +article.single section.body h6 { color: var(--base03); } + +article.single section.body a { color: var(--base0f); } + +/* Article Elements */ + +article.single pre { + margin-top: 0; + margin-bottom: 1rem; + overflow-x: scroll; + border-radius: 3px; + padding: 2rem; +} + +article.single p code { + padding: 0.2em 0.5em; + border-radius: 3px; + background: var(--base03); + color: var(--base07); +} + +article.single figure { + box-sizing: border-box; + max-width: 52rem; + width: 52rem; + margin-left: -8rem; + margin-right: -8rem; + margin-bottom: 1rem; + padding: 1em; + background-color: var(--base01); +} + +@media (--breakpoint-md) { + article.single figure { + width: 100%; + margin-left: 0; + margin-right: 0; + border-radius: 3px; + } +} + +article.single figure img { + max-width: 100%; + width: 100%; + border-radius: 3px; +} + +article.single figure figcaption { + margin-top: 1rem; +} + +article.single figure figcaption h4 { + margin-top: 0; + text-align: center; + font-style: italic; + font-weight: normal; + color: var(--base07); +} + +article.single table { + border-collapse: separate; + border-spacing: 0; + max-width: 100%; + width: 100%; +} + +article.single th, +article.single td { + padding: .25rem 1rem; + line-height: inherit; + border-bottom-width: 1px; + border-bottom-style: solid; + border-bottom-color: var(--base04); +} + +article.single tr:last-child td { + border-bottom: 0; +} + +article.single th { + text-align: left; + font-weight: bold; + vertical-align: bottom; +} + +article.single td { vertical-align: top } + +article.single blockquote { + margin-left: 2rem; + margin-right: 3rem; + padding-left: 1rem; + border-left: 5px solid var(--base0c); +} + +article.single hr { + border: 0; + border-bottom-style: solid; + border-bottom-width: 1px; + border-bottom-color: var(--base04); +} + +/* Pygments template by Jan T. Sott (https://github.com/idleberg) */ + +pre { background: var(--base00); color: var(--base07) } + +.highlight .hll { background-color: var(--base02) } +.highlight .c { color: var(--base03) } /* Comment */ +.highlight .err { color: var(--base08) } /* Error */ +.highlight .k { color: var(--base0e) } /* Keyword */ +.highlight .l { color: var(--base09) } /* Literal */ +.highlight .n { color: var(--base07) } /* Name */ +.highlight .o { color: var(--base0c) } /* Operator */ +.highlight .p { color: var(--base07) } /* Punctuation */ +.highlight .cm { color: var(--base03) } /* Comment.Multiline */ +.highlight .cp { color: var(--base03) } /* Comment.Preproc */ +.highlight .c1 { color: var(--base03) } /* Comment.Single */ +.highlight .cs { color: var(--base03) } /* Comment.Special */ +.highlight .gd { color: var(--base08) } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gh { color: var(--base07); font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: var(--base0b) } /* Generic.Inserted */ +.highlight .gp { color: var(--base03); font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: var(--base0c); font-weight: bold } /* Generic.Subheading */ +.highlight .kc { color: var(--base0e) } /* Keyword.Constant */ +.highlight .kd { color: var(--base0e) } /* Keyword.Declaration */ +.highlight .kn { color: var(--base0c) } /* Keyword.Namespace */ +.highlight .kp { color: var(--base0e) } /* Keyword.Pseudo */ +.highlight .kr { color: var(--base0e) } /* Keyword.Reserved */ +.highlight .kt { color: var(--base0a) } /* Keyword.Type */ +.highlight .ld { color: var(--base0b) } /* Literal.Date */ +.highlight .m { color: var(--base09) } /* Literal.Number */ +.highlight .s { color: var(--base0b) } /* Literal.String */ +.highlight .na { color: var(--base0d) } /* Name.Attribute */ +.highlight .nb { color: var(--base07) } /* Name.Builtin */ +.highlight .nc { color: var(--base0a) } /* Name.Class */ +.highlight .no { color: var(--base08) } /* Name.Constant */ +.highlight .nd { color: var(--base0c) } /* Name.Decorator */ +.highlight .ni { color: var(--base07) } /* Name.Entity */ +.highlight .ne { color: var(--base08) } /* Name.Exception */ +.highlight .nf { color: var(--base0d) } /* Name.Function */ +.highlight .nl { color: var(--base07) } /* Name.Label */ +.highlight .nn { color: var(--base0a) } /* Name.Namespace */ +.highlight .nx { color: var(--base0d) } /* Name.Other */ +.highlight .py { color: var(--base07) } /* Name.Property */ +.highlight .nt { color: var(--base0c) } /* Name.Tag */ +.highlight .nv { color: var(--base08) } /* Name.Variable */ +.highlight .ow { color: var(--base0c) } /* Operator.Word */ +.highlight .w { color: var(--base07) } /* Text.Whitespace */ +.highlight .mf { color: var(--base09) } /* Literal.Number.Float */ +.highlight .mh { color: var(--base09) } /* Literal.Number.Hex */ +.highlight .mi { color: var(--base09) } /* Literal.Number.Integer */ +.highlight .mo { color: var(--base09) } /* Literal.Number.Oct */ +.highlight .sb { color: var(--base0b) } /* Literal.String.Backtick */ +.highlight .sc { color: var(--base07) } /* Literal.String.Char */ +.highlight .sd { color: var(--base03) } /* Literal.String.Doc */ +.highlight .s2 { color: var(--base0b) } /* Literal.String.Double */ +.highlight .se { color: var(--base09) } /* Literal.String.Escape */ +.highlight .sh { color: var(--base0b) } /* Literal.String.Heredoc */ +.highlight .si { color: var(--base09) } /* Literal.String.Interpol */ +.highlight .sx { color: var(--base0b) } /* Literal.String.Other */ +.highlight .sr { color: var(--base0b) } /* Literal.String.Regex */ +.highlight .s1 { color: var(--base0b) } /* Literal.String.Single */ +.highlight .ss { color: var(--base0b) } /* Literal.String.Symbol */ +.highlight .bp { color: var(--base07) } /* Name.Builtin.Pseudo */ +.highlight .vc { color: var(--base08) } /* Name.Variable.Class */ +.highlight .vg { color: var(--base08) } /* Name.Variable.Global */ +.highlight .vi { color: var(--base08) } /* Name.Variable.Instance */ +.highlight .il { color: var(--base09) } /* Literal.Number.Integer.Long */ diff --git a/doc/blog/public/css/style.css b/doc/blog/public/css/style.css new file mode 100644 index 00000000..0717cd29 --- /dev/null +++ b/doc/blog/public/css/style.css @@ -0,0 +1,508 @@ + + +@import url(https://fonts.googleapis.com/css?family=Roboto+Mono:400,700); + +/* Base16 Eighties Colorscheme by Chris Kempson (http://chriskempson.com) */ + +.base00 { color: #2d2d2d; } +.base01 { color: #393939; } +.base02 { color: #515151; } +.base03 { color: #747369; } +.base04 { color: #a09f93; } +.base05 { color: #d3d0c8; } +.base06 { color: #e8e6df; } +.base07 { color: #f2f0ec; } +.base08 { color: #f2777a; } +.base09 { color: #f99157; } +.base0a { color: #ffcc66; } +.base0b { color: #99cc99; } +.base0c { color: #66cccc; } +.base0d { color: #6699cc; } +.base0e { color: #cc99cc; } +.base0f { color: #d27b53; } + +/* General Page Layout */ + +body { + margin: 0; + background-color: #2d2d2d; + color: #f2f0ec; + line-height: 1.5; + font-size: 100%; + font-family: 'Source Code Pro', monospace; +} + +.container { + max-width: 52em; + margin-left: auto; + margin-right: auto; +} + +@media (max-width: 52em) { + .container { + width: 100%; + } +} + +article.single section, +.article-list article { + background-color: #f2f0ec; + color: #2d2d2d; + padding-left: 128px; + padding-left: 8rem; + padding-right: 128px; + padding-right: 8rem; + padding-top: 16px; + padding-top: 1rem; + padding-bottom: 16px; + padding-bottom: 1rem; +} + +@media (max-width: 52em) { + article.single section, + .article-list article { + padding-left: 2rem; + padding-right: 2rem; + } +} + +header, footer { + background-color: #393939; + padding-top: 16px; + padding-top: 1rem; + padding-bottom: 16px; + padding-bottom: 1rem; +} + +header { + margin-top: 16px; + margin-top: 1rem; + margin-bottom: 32px; + margin-bottom: 2rem; +} + +@media (max-width: 52em) { + header { + margin-top: 0; + padding-left: 2rem; + padding-right: 2rem; + } +} + +footer { + margin-top: 32px; + margin-top: 2rem; + margin-bottom: 16px; + margin-bottom: 1rem; + text-align: center; + font-size: 0.9em; + color: #747369; +} + +@media (max-width: 52em) { + footer { + margin-top: 0; + margin-bottom: 0; + } +} + +/* Typography */ + +h1, h2, h3, h4, h5, h6 { + font-weight: bold; + line-height: 1.25; + margin-top: 1em; + margin-bottom: .5em; +} + +p { + margin-top: 0; + margin-bottom: 16px; + margin-bottom: 1rem; +} + +h1 { font-size: 32px; font-size: 2rem } +h2 { font-size: 24px; font-size: 1.5rem } +h3 { font-size: 20px; font-size: 1.25rem } +h4 { font-size: 16px; font-size: 1rem } +h5 { font-size: 14px; font-size: .875rem } +h6 { font-size: 12px; font-size: .75rem } + +pre, code { + font-family: inherit; + font-size: inherit; +} + +/* Header Layout */ + +header a.path { + color: #6699cc; +} + +header span.caret { + color: #f2f0ec; +} + +/* Footer Layout */ + +footer a { + color: #747369; + text-decoration: none; +} + +/* 404 Page Layout */ + +.page-not-found h1 { + text-align: center; + font-size: 5em; +} + +.page-not-found h2 { + text-align: center; + font-size: 3em; + color: #a09f93; + margin-bottom: 64px; + margin-bottom: 4rem; +} + +@media (max-width: 52em) { + .page-not-found h1 { + font-size: 3em; + } + + .page-not-found h2 { + font-size: 2em; + } +} + +/* Homepage Layout */ + +@media (max-width: 52em) { + .homepage { + margin-bottom: 2rem; + } +} + +.homepage h1.site-title { + text-align: center; + font-size: 5em; + color: #66cccc; +} + +@media (max-width: 52em) { + .homepage h1.site-title { + font-size: 3em; + } +} + +.homepage h1.headline { + font-size: 3em; + color: #ffcc66; +} + +@media (max-width: 52em) { + .homepage h1.headline { + padding-left: 2rem; + padding-right: 2rem; + } +} + +.homepage .hero-logo img { + width: 100%; +} + +.homepage section.categories, +.homepage section.tags { + padding-left: 32px; + padding-left: 2rem; + padding-right: 32px; + padding-right: 2rem; +} + +.homepage .category, +.homepage .category a, +.homepage .tag, +.homepage .tag a { + color: #cc99cc; +} + +.homepage .tag { + margin-right: 2em; +} + +/* Post List Layout */ + +.article-list h1.list-title { + font-size: 3em; + color: #ffcc66; +} + +.article-list article { + padding-top: 64px; + padding-top: 4rem; + padding-bottom: 64px; + padding-bottom: 4rem; + margin-bottom: 64px; + margin-bottom: 4rem; +} + +.article-list article h2.headline, +.article-list article h2.headline a { + margin-top: 0; + color: #6699cc; +} + +.article-list article .meta { + margin-bottom: 16px; + margin-bottom: 1rem; +} + +.article-list article .meta .key { + color: #747369; +} + +.article-list article .meta .val, +.article-list article .meta .val a { + color: #cc99cc; +} + +.article-list article section.summary a { color: #d27b53; } + + +/* Single Post Layout */ + +article.single .meta { + font-size: 0.9em; + text-align: right; +} + +article.single .meta .key { + color: #747369; +} + +article.single .meta .val, article.single .meta .val a { + color: #cc99cc; +} + +@media (max-width: 52em) { + article.single .meta { + padding-left: 2rem; + padding-right: 2rem; + } +} + +article.single h1.headline { + margin-top: 0; + font-size: 3em; + color: #ffcc66; +} + +@media (max-width: 52em) { + article.single h1.headline { + padding-left: 2rem; + padding-right: 2rem; + } +} + +article.single section.body { + padding-top: 64px; + padding-top: 4rem; + padding-bottom: 48px; + padding-bottom: 3rem; +} + +@media (max-width: 52em) { + article.single section.body { + padding-top: 2rem; + padding-bottom: 1rem; + } +} + +/* Highlight Colors */ + +article.single section.body h1 { color: #6699cc; } +article.single section.body h2 { color: #99cc99; } +article.single section.body h3 { color: #f99157; } +article.single section.body h4 { color: #f2777a; } +article.single section.body h5 { color: #515151; } +article.single section.body h6 { color: #747369; } + +article.single section.body a { color: #d27b53; } + +/* Article Elements */ + +article.single pre { + margin-top: 0; + margin-bottom: 16px; + margin-bottom: 1rem; + overflow-x: scroll; + border-radius: 3px; + padding: 32px; + padding: 2rem; +} + +article.single p code { + padding: 0.2em 0.5em; + border-radius: 3px; + background: #747369; + color: #f2f0ec; +} + +article.single figure { + box-sizing: border-box; + max-width: 832px; + max-width: 52rem; + width: 832px; + width: 52rem; + margin-left: -128px; + margin-left: -8rem; + margin-right: -128px; + margin-right: -8rem; + margin-bottom: 16px; + margin-bottom: 1rem; + padding: 1em; + background-color: #393939; +} + +@media (max-width: 52em) { + article.single figure { + width: 100%; + margin-left: 0; + margin-right: 0; + border-radius: 3px; + } +} + +article.single figure img { + max-width: 100%; + width: 100%; + border-radius: 3px; +} + +article.single figure figcaption { + margin-top: 16px; + margin-top: 1rem; +} + +article.single figure figcaption h4 { + margin-top: 0; + text-align: center; + font-style: italic; + font-weight: normal; + color: #f2f0ec; +} + +article.single table { + border-collapse: separate; + border-spacing: 0; + max-width: 100%; + width: 100%; +} + +article.single th, +article.single td { + padding: 4px 16px; + padding: .25rem 1rem; + line-height: inherit; + border-bottom-width: 1px; + border-bottom-style: solid; + border-bottom-color: #a09f93; +} + +article.single tr:last-child td { + border-bottom: 0; +} + +article.single th { + text-align: left; + font-weight: bold; + vertical-align: bottom; +} + +article.single td { vertical-align: top } + +article.single blockquote { + margin-left: 32px; + margin-left: 2rem; + margin-right: 48px; + margin-right: 3rem; + padding-left: 16px; + padding-left: 1rem; + border-left: 5px solid #66cccc; +} + +article.single hr { + border: 0; + border-bottom-style: solid; + border-bottom-width: 1px; + border-bottom-color: #a09f93; +} + +/* Pygments template by Jan T. Sott (https://github.com/idleberg) */ + +pre { background: #2d2d2d; color: #f2f0ec } + +.highlight .hll { background-color: #515151 } +.highlight .c { color: #747369 } /* Comment */ +.highlight .err { color: #f2777a } /* Error */ +.highlight .k { color: #cc99cc } /* Keyword */ +.highlight .l { color: #f99157 } /* Literal */ +.highlight .n { color: #f2f0ec } /* Name */ +.highlight .o { color: #66cccc } /* Operator */ +.highlight .p { color: #f2f0ec } /* Punctuation */ +.highlight .cm { color: #747369 } /* Comment.Multiline */ +.highlight .cp { color: #747369 } /* Comment.Preproc */ +.highlight .c1 { color: #747369 } /* Comment.Single */ +.highlight .cs { color: #747369 } /* Comment.Special */ +.highlight .gd { color: #f2777a } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gh { color: #f2f0ec; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #99cc99 } /* Generic.Inserted */ +.highlight .gp { color: #747369; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #66cccc; font-weight: bold } /* Generic.Subheading */ +.highlight .kc { color: #cc99cc } /* Keyword.Constant */ +.highlight .kd { color: #cc99cc } /* Keyword.Declaration */ +.highlight .kn { color: #66cccc } /* Keyword.Namespace */ +.highlight .kp { color: #cc99cc } /* Keyword.Pseudo */ +.highlight .kr { color: #cc99cc } /* Keyword.Reserved */ +.highlight .kt { color: #ffcc66 } /* Keyword.Type */ +.highlight .ld { color: #99cc99 } /* Literal.Date */ +.highlight .m { color: #f99157 } /* Literal.Number */ +.highlight .s { color: #99cc99 } /* Literal.String */ +.highlight .na { color: #6699cc } /* Name.Attribute */ +.highlight .nb { color: #f2f0ec } /* Name.Builtin */ +.highlight .nc { color: #ffcc66 } /* Name.Class */ +.highlight .no { color: #f2777a } /* Name.Constant */ +.highlight .nd { color: #66cccc } /* Name.Decorator */ +.highlight .ni { color: #f2f0ec } /* Name.Entity */ +.highlight .ne { color: #f2777a } /* Name.Exception */ +.highlight .nf { color: #6699cc } /* Name.Function */ +.highlight .nl { color: #f2f0ec } /* Name.Label */ +.highlight .nn { color: #ffcc66 } /* Name.Namespace */ +.highlight .nx { color: #6699cc } /* Name.Other */ +.highlight .py { color: #f2f0ec } /* Name.Property */ +.highlight .nt { color: #66cccc } /* Name.Tag */ +.highlight .nv { color: #f2777a } /* Name.Variable */ +.highlight .ow { color: #66cccc } /* Operator.Word */ +.highlight .w { color: #f2f0ec } /* Text.Whitespace */ +.highlight .mf { color: #f99157 } /* Literal.Number.Float */ +.highlight .mh { color: #f99157 } /* Literal.Number.Hex */ +.highlight .mi { color: #f99157 } /* Literal.Number.Integer */ +.highlight .mo { color: #f99157 } /* Literal.Number.Oct */ +.highlight .sb { color: #99cc99 } /* Literal.String.Backtick */ +.highlight .sc { color: #f2f0ec } /* Literal.String.Char */ +.highlight .sd { color: #747369 } /* Literal.String.Doc */ +.highlight .s2 { color: #99cc99 } /* Literal.String.Double */ +.highlight .se { color: #f99157 } /* Literal.String.Escape */ +.highlight .sh { color: #99cc99 } /* Literal.String.Heredoc */ +.highlight .si { color: #f99157 } /* Literal.String.Interpol */ +.highlight .sx { color: #99cc99 } /* Literal.String.Other */ +.highlight .sr { color: #99cc99 } /* Literal.String.Regex */ +.highlight .s1 { color: #99cc99 } /* Literal.String.Single */ +.highlight .ss { color: #99cc99 } /* Literal.String.Symbol */ +.highlight .bp { color: #f2f0ec } /* Name.Builtin.Pseudo */ +.highlight .vc { color: #f2777a } /* Name.Variable.Class */ +.highlight .vg { color: #f2777a } /* Name.Variable.Global */ +.highlight .vi { color: #f2777a } /* Name.Variable.Instance */ +.highlight .il { color: #f99157 } /* Literal.Number.Integer.Long */ diff --git a/doc/blog/public/css/style.min.css b/doc/blog/public/css/style.min.css new file mode 100644 index 00000000..ed56850a --- /dev/null +++ b/doc/blog/public/css/style.min.css @@ -0,0 +1 @@ +@import url(https://fonts.googleapis.com/css?family=Roboto+Mono:400,700);.base00{color:#2d2d2d}.base01{color:#393939}.base02{color:#515151}.base03{color:#747369}.base04{color:#a09f93}.base05{color:#d3d0c8}.base06{color:#e8e6df}.base07{color:#f2f0ec}.base08{color:#f2777a}.base09{color:#f99157}.base0a{color:#fc6}.base0b{color:#9c9}.base0c{color:#6cc}.base0d{color:#69c}.base0e{color:#c9c}.base0f{color:#d27b53}body{margin:0;background-color:#2d2d2d;color:#f2f0ec;line-height:1.5;font-size:100%;font-family:Source Code Pro,monospace}.container{max-width:52em;margin-left:auto;margin-right:auto}@media (max-width:52em){.container{width:100%}}.article-list article,article.single section{background-color:#f2f0ec;color:#2d2d2d;padding-left:8pc;padding:1rem 8rem;padding-right:8pc;padding-top:1pc;padding-bottom:1pc}@media (max-width:52em){.article-list article,article.single section{padding-left:2rem;padding-right:2rem}}footer,header{background-color:#393939;padding-top:1pc;padding-top:1rem;padding-bottom:1pc;padding-bottom:1rem}header{margin-top:1pc;margin-top:1rem;margin-bottom:2pc;margin-bottom:2rem}@media (max-width:52em){header{margin-top:0;padding-left:2rem;padding-right:2rem}}footer{margin-top:2pc;margin-top:2rem;margin-bottom:1pc;margin-bottom:1rem;text-align:center;font-size:.9em;color:#747369}@media (max-width:52em){footer{margin-top:0;margin-bottom:0}}h1,h2,h3,h4,h5,h6{font-weight:700;line-height:1.25;margin-top:1em;margin-bottom:.5em}p{margin-top:0;margin-bottom:1pc;margin-bottom:1rem}h1{font-size:2pc;font-size:2rem}h2{font-size:24px;font-size:1.5rem}h3{font-size:20px;font-size:1.25rem}h4{font-size:1pc;font-size:1rem}h5{font-size:14px;font-size:.875rem}h6{font-size:9pt;font-size:.75rem}code,pre{font-family:inherit;font-size:inherit}header a.path{color:#69c}header span.caret{color:#f2f0ec}footer a{color:#747369;text-decoration:none}.page-not-found h1{text-align:center;font-size:5em}.page-not-found h2{text-align:center;font-size:3em;color:#a09f93;margin-bottom:4pc;margin-bottom:4rem}@media (max-width:52em){.page-not-found h1{font-size:3em}.page-not-found h2{font-size:2em}}@media (max-width:52em){.homepage{margin-bottom:2rem}}.homepage h1.site-title{text-align:center;font-size:5em;color:#6cc}@media (max-width:52em){.homepage h1.site-title{font-size:3em}}.homepage h1.headline{font-size:3em;color:#fc6}@media (max-width:52em){.homepage h1.headline{padding-left:2rem;padding-right:2rem}}.homepage .hero-logo img{width:100%}.homepage section.categories,.homepage section.tags{padding-left:2pc;padding-left:2rem;padding-right:2pc;padding-right:2rem}.homepage .category,.homepage .category a,.homepage .tag,.homepage .tag a{color:#c9c}.homepage .tag{margin-right:2em}.article-list h1.list-title{font-size:3em;color:#fc6}.article-list article{padding-top:4pc;padding-top:4rem;padding-bottom:4pc;padding-bottom:4rem;margin-bottom:4pc;margin-bottom:4rem}.article-list article h2.headline,.article-list article h2.headline a{margin-top:0;color:#69c}.article-list article .meta{margin-bottom:1pc;margin-bottom:1rem}.article-list article .meta .key{color:#747369}.article-list article .meta .val,.article-list article .meta .val a{color:#c9c}.article-list article section.summary a{color:#d27b53}article.single .meta{font-size:.9em;text-align:right}article.single .meta .key{color:#747369}article.single .meta .val,article.single .meta .val a{color:#c9c}@media (max-width:52em){article.single .meta{padding-left:2rem;padding-right:2rem}}article.single h1.headline{margin-top:0;font-size:3em;color:#fc6}@media (max-width:52em){article.single h1.headline{padding-left:2rem;padding-right:2rem}}article.single section.body{padding-top:4pc;padding-top:4rem;padding-bottom:3pc;padding-bottom:3rem}@media (max-width:52em){article.single section.body{padding-top:2rem;padding-bottom:1rem}}article.single section.body h1{color:#69c}article.single section.body h2{color:#9c9}article.single section.body h3{color:#f99157}article.single section.body h4{color:#f2777a}article.single section.body h5{color:#515151}article.single section.body h6{color:#747369}article.single section.body a{color:#d27b53}article.single pre{margin-top:0;margin-bottom:1pc;margin-bottom:1rem;overflow-x:scroll;border-radius:3px;padding:2pc;padding:2rem}article.single p code{padding:.2em .5em;border-radius:3px;background:#747369;color:#f2f0ec}article.single figure{box-sizing:border-box;max-width:52pc;max-width:52rem;width:52pc;width:52rem;margin-left:-8pc;margin-left:-8rem;margin-right:-8pc;margin-right:-8rem;margin-bottom:1pc;margin-bottom:1rem;padding:1em;background-color:#393939}@media (max-width:52em){article.single figure{width:100%;margin-left:0;margin-right:0;border-radius:3px}}article.single figure img{max-width:100%;width:100%;border-radius:3px}article.single figure figcaption{margin-top:1pc;margin-top:1rem}article.single figure figcaption h4{margin-top:0;text-align:center;font-style:italic;font-weight:400;color:#f2f0ec}article.single table{border-collapse:separate;border-spacing:0;max-width:100%;width:100%}article.single td,article.single th{padding:4px 1pc;padding:.25rem 1rem;line-height:inherit;border-bottom-width:1px;border-bottom-style:solid;border-bottom-color:#a09f93}article.single tr:last-child td{border-bottom:0}article.single th{text-align:left;font-weight:700;vertical-align:bottom}article.single td{vertical-align:top}article.single blockquote{margin-left:2pc;margin-left:2rem;margin-right:3pc;margin-right:3rem;padding-left:1pc;padding-left:1rem;border-left:5px solid #6cc}article.single hr{border:0;border-bottom-style:solid;border-bottom-width:1px;border-bottom-color:#a09f93}pre{background:#2d2d2d;color:#f2f0ec}.highlight .hll{background-color:#515151}.highlight .c{color:#747369}.highlight .err{color:#f2777a}.highlight .k{color:#c9c}.highlight .l{color:#f99157}.highlight .n{color:#f2f0ec}.highlight .o{color:#6cc}.highlight .p{color:#f2f0ec}.highlight .c1,.highlight .cm,.highlight .cp,.highlight .cs{color:#747369}.highlight .gd{color:#f2777a}.highlight .ge{font-style:italic}.highlight .gh{color:#f2f0ec;font-weight:700}.highlight .gi{color:#9c9}.highlight .gp{color:#747369}.highlight .gp,.highlight .gs,.highlight .gu{font-weight:700}.highlight .gu{color:#6cc}.highlight .kc,.highlight .kd{color:#c9c}.highlight .kn{color:#6cc}.highlight .kp,.highlight .kr{color:#c9c}.highlight .kt{color:#fc6}.highlight .ld{color:#9c9}.highlight .m{color:#f99157}.highlight .s{color:#9c9}.highlight .na{color:#69c}.highlight .nb{color:#f2f0ec}.highlight .nc{color:#fc6}.highlight .no{color:#f2777a}.highlight .nd{color:#6cc}.highlight .ni{color:#f2f0ec}.highlight .ne{color:#f2777a}.highlight .nf{color:#69c}.highlight .nl{color:#f2f0ec}.highlight .nn{color:#fc6}.highlight .nx{color:#69c}.highlight .py{color:#f2f0ec}.highlight .nt{color:#6cc}.highlight .nv{color:#f2777a}.highlight .ow{color:#6cc}.highlight .w{color:#f2f0ec}.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:#f99157}.highlight .sb{color:#9c9}.highlight .sc{color:#f2f0ec}.highlight .sd{color:#747369}.highlight .s2{color:#9c9}.highlight .se{color:#f99157}.highlight .sh{color:#9c9}.highlight .si{color:#f99157}.highlight .s1,.highlight .sr,.highlight .ss,.highlight .sx{color:#9c9}.highlight .bp{color:#f2f0ec}.highlight .vc,.highlight .vg,.highlight .vi{color:#f2777a}.highlight .il{color:#f99157} \ No newline at end of file diff --git a/doc/blog/public/img/base16-eighties.svg b/doc/blog/public/img/base16-eighties.svg new file mode 100644 index 00000000..ff951bd7 --- /dev/null +++ b/doc/blog/public/img/base16-eighties.svg @@ -0,0 +1,21 @@ + + + Base16 Eighties + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/doc/blog/public/index.html b/doc/blog/public/index.html new file mode 100644 index 00000000..8ac6112f --- /dev/null +++ b/doc/blog/public/index.html @@ -0,0 +1,121 @@ + + + + + + + + + + + + +brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ + + +
+ +

+ [base16] + # _ +

+ + + +
+ Add your own "layouts/partials/hero.html" to replace this text. +
+ + +

Recent Posts

+ +
+ + + + + + +
+

A small step for mankind, a big step for brig

+
+ + published on + + +
+
+ A small historic moment was achieved today: The very first file was added to brig. There was no way to get it out again, but hey - Progress comes in steps. Luckily, just two hours later there was a brig get command that could retrieve the file again from ipfs. This is also my very first devlog entry, so… Hi. I mainly write this to remember what I did (and when) on the course of the project. Read More... +
+
+ + +
+ + + + + +

Tags

+ +
+ + + brig + (1) + + + + development + (1) + + + + go + (1) + + +
+ + + +
+ + + +
+ + + + + + diff --git a/doc/blog/public/index.xml b/doc/blog/public/index.xml new file mode 100644 index 00000000..f693a042 --- /dev/null +++ b/doc/blog/public/index.xml @@ -0,0 +1,87 @@ + + + + brig devlog + http://replace-this-with-your-hugo-site.com/ + Recent content on brig devlog + Hugo -- gohugo.io + en-us + Sat, 16 Jan 2016 01:25:51 +0100 + + + + bolt_and_fingerprint + http://replace-this-with-your-hugo-site.com/post/bolt_and_fingerprint/ + Sat, 16 Jan 2016 01:25:51 +0100 + + http://replace-this-with-your-hugo-site.com/post/bolt_and_fingerprint/ + + + + + A small step for mankind, a big step for brig + http://replace-this-with-your-hugo-site.com/post/devlog/ + Fri, 15 Jan 2016 01:13:07 -0700 + + http://replace-this-with-your-hugo-site.com/post/devlog/ + <p>A small historic moment was achieved today: The very first file was added to +<code>brig</code>. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a <code>brig get</code> command that +could retrieve the file again from <code>ipfs</code>.</p> + +<p>This is also my very first devlog entry, so&hellip; Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That&rsquo;s +the same effect, I guess. If it&rsquo;s fun to read for others&hellip; that&rsquo;s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.</p> + +<p>So, back to the technical side of life. <code>brig add</code> currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to <code>ipfs</code>. The encryption and compression layer uses +<code>io.Writer</code> though, so we can&rsquo;t just stack <code>io.Reader</code> on top of each +other. Instead we need to use a nice little feature from the stdlib: +<code>io.Pipe()</code>. This function returns a <code>io.Writer</code> and a <code>io.Reader</code>. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a <code>io.Reader</code>, +but you just have a <code>io.Writer</code>, then <code>io.Pipe()</code> should pop into your mind now.</p> + +<p>Here&rsquo;s how it looks in practice:</p> + +<pre><code class="language-go">func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + // Suck the reader empty and move it to `wZip`. + // Every write to wZip will be available as read in `pr`. + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + log.Warningf(&quot;add: copy: %v&quot;, err) + } + }() + + return pr, nil +} +</code></pre> + +<p>That&rsquo;s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.</p> + + + + + \ No newline at end of file diff --git a/doc/blog/public/post/bolt_and_fingerprint/index.html b/doc/blog/public/post/bolt_and_fingerprint/index.html new file mode 100644 index 00000000..c5041627 --- /dev/null +++ b/doc/blog/public/post/bolt_and_fingerprint/index.html @@ -0,0 +1,57 @@ + + + + + + + + + + + + +bolt_and_fingerprint - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ + +
+ +
+
+ + published on + + + + +
+

bolt_and_fingerprint

+
+ +
+
+ +
+ + +
+ + + + + + diff --git a/doc/blog/public/post/devlog/index.html b/doc/blog/public/post/devlog/index.html new file mode 100644 index 00000000..3b6a8fd5 --- /dev/null +++ b/doc/blog/public/post/devlog/index.html @@ -0,0 +1,125 @@ + + + + + + + + + + + + +A small step for mankind, a big step for brig - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ + +
+ +
+
+ + published on + + + + +
+ tags: + + + Development + + Go + + brig + + + +
+

A small step for mankind, a big step for brig

+
+

A small historic moment was achieved today: The very first file was added to +brig. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a brig get command that +could retrieve the file again from ipfs.

+ +

This is also my very first devlog entry, so… Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That’s +the same effect, I guess. If it’s fun to read for others… that’s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.

+ +

So, back to the technical side of life. brig add currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to ipfs. The encryption and compression layer uses +io.Writer though, so we can’t just stack io.Reader on top of each +other. Instead we need to use a nice little feature from the stdlib: +io.Pipe(). This function returns a io.Writer and a io.Reader. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a io.Reader, +but you just have a io.Writer, then io.Pipe() should pop into your mind now.

+ +

Here’s how it looks in practice:

+ +
func NewFileReader(key []byte, r io.Reader) (io.Reader, error) {
+	pr, pw := io.Pipe()
+
+	// Setup the writer part:
+	wEnc, err := encrypt.NewWriter(pw, key)
+	if err != nil {
+		return nil, err
+	}
+
+	wZip := compress.NewWriter(wEnc)
+
+	// Suck the reader empty and move it to `wZip`.
+	// Every write to wZip will be available as read in `pr`.
+	go func() {
+		defer func() {
+			wEnc.Close()
+			pw.Close()
+		}()
+
+		if _, err := io.Copy(wZip, r); err != nil {
+			// TODO: Warn or pass to outside?
+			log.Warningf("add: copy: %v", err)
+		}
+	}()
+
+	return pr, nil
+}
+
+ +

That’s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.

+ +
+
+ +
+ + +
+ + + + + + diff --git a/doc/blog/public/post/index.html b/doc/blog/public/post/index.html new file mode 100644 index 00000000..c7e6c400 --- /dev/null +++ b/doc/blog/public/post/index.html @@ -0,0 +1,71 @@ + + + + + + + + + + + + +Posts - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ +
+

Posts

+ + + + + + +
+

A small step for mankind, a big step for brig

+
+ + published on + + +
+
+ A small historic moment was achieved today: The very first file was added to brig. There was no way to get it out again, but hey - Progress comes in steps. Luckily, just two hours later there was a brig get command that could retrieve the file again from ipfs. This is also my very first devlog entry, so… Hi. I mainly write this to remember what I did (and when) on the course of the project. Read More... +
+
+ + +
+ +
+ + + + + + diff --git a/doc/blog/public/post/index.xml b/doc/blog/public/post/index.xml new file mode 100644 index 00000000..3111475b --- /dev/null +++ b/doc/blog/public/post/index.xml @@ -0,0 +1,87 @@ + + + + Posts on brig devlog + http://replace-this-with-your-hugo-site.com/post/ + Recent content in Posts on brig devlog + Hugo -- gohugo.io + en-us + Sat, 16 Jan 2016 01:25:51 +0100 + + + + bolt_and_fingerprint + http://replace-this-with-your-hugo-site.com/post/bolt_and_fingerprint/ + Sat, 16 Jan 2016 01:25:51 +0100 + + http://replace-this-with-your-hugo-site.com/post/bolt_and_fingerprint/ + + + + + A small step for mankind, a big step for brig + http://replace-this-with-your-hugo-site.com/post/devlog/ + Fri, 15 Jan 2016 01:13:07 -0700 + + http://replace-this-with-your-hugo-site.com/post/devlog/ + <p>A small historic moment was achieved today: The very first file was added to +<code>brig</code>. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a <code>brig get</code> command that +could retrieve the file again from <code>ipfs</code>.</p> + +<p>This is also my very first devlog entry, so&hellip; Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That&rsquo;s +the same effect, I guess. If it&rsquo;s fun to read for others&hellip; that&rsquo;s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.</p> + +<p>So, back to the technical side of life. <code>brig add</code> currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to <code>ipfs</code>. The encryption and compression layer uses +<code>io.Writer</code> though, so we can&rsquo;t just stack <code>io.Reader</code> on top of each +other. Instead we need to use a nice little feature from the stdlib: +<code>io.Pipe()</code>. This function returns a <code>io.Writer</code> and a <code>io.Reader</code>. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a <code>io.Reader</code>, +but you just have a <code>io.Writer</code>, then <code>io.Pipe()</code> should pop into your mind now.</p> + +<p>Here&rsquo;s how it looks in practice:</p> + +<pre><code class="language-go">func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + // Suck the reader empty and move it to `wZip`. + // Every write to wZip will be available as read in `pr`. + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + log.Warningf(&quot;add: copy: %v&quot;, err) + } + }() + + return pr, nil +} +</code></pre> + +<p>That&rsquo;s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.</p> + + + + + \ No newline at end of file diff --git a/doc/blog/public/robots.txt b/doc/blog/public/robots.txt new file mode 100644 index 00000000..4f9540ba --- /dev/null +++ b/doc/blog/public/robots.txt @@ -0,0 +1 @@ +User-agent: * \ No newline at end of file diff --git a/doc/blog/public/sitemap.xml b/doc/blog/public/sitemap.xml new file mode 100644 index 00000000..eca2bce0 --- /dev/null +++ b/doc/blog/public/sitemap.xml @@ -0,0 +1,20 @@ + + + + + http://replace-this-with-your-hugo-site.com/ + 2016-01-16T01:25:51+01:00 + 0 + + + + http://replace-this-with-your-hugo-site.com/post/bolt_and_fingerprint/ + 2016-01-16T01:25:51+01:00 + + + + http://replace-this-with-your-hugo-site.com/post/devlog/ + 2016-01-15T01:13:07-07:00 + + + \ No newline at end of file diff --git a/doc/blog/public/tags/brig/index.html b/doc/blog/public/tags/brig/index.html new file mode 100644 index 00000000..d67d4509 --- /dev/null +++ b/doc/blog/public/tags/brig/index.html @@ -0,0 +1,56 @@ + + + + + + + + + + + + +Brig - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ +
+

Brig

+ + +
+

A small step for mankind, a big step for brig

+
+ + published on + + +
+
+ A small historic moment was achieved today: The very first file was added to brig. There was no way to get it out again, but hey - Progress comes in steps. Luckily, just two hours later there was a brig get command that could retrieve the file again from ipfs. This is also my very first devlog entry, so… Hi. I mainly write this to remember what I did (and when) on the course of the project. Read More... +
+
+ + +
+ +
+ + + + + + diff --git a/doc/blog/public/tags/brig/index.xml b/doc/blog/public/tags/brig/index.xml new file mode 100644 index 00000000..a88fa9db --- /dev/null +++ b/doc/blog/public/tags/brig/index.xml @@ -0,0 +1,78 @@ + + + + Brig on brig devlog + http://replace-this-with-your-hugo-site.com/tags/brig/ + Recent content in Brig on brig devlog + Hugo -- gohugo.io + en-us + Fri, 15 Jan 2016 01:13:07 -0700 + + + + A small step for mankind, a big step for brig + http://replace-this-with-your-hugo-site.com/post/devlog/ + Fri, 15 Jan 2016 01:13:07 -0700 + + http://replace-this-with-your-hugo-site.com/post/devlog/ + <p>A small historic moment was achieved today: The very first file was added to +<code>brig</code>. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a <code>brig get</code> command that +could retrieve the file again from <code>ipfs</code>.</p> + +<p>This is also my very first devlog entry, so&hellip; Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That&rsquo;s +the same effect, I guess. If it&rsquo;s fun to read for others&hellip; that&rsquo;s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.</p> + +<p>So, back to the technical side of life. <code>brig add</code> currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to <code>ipfs</code>. The encryption and compression layer uses +<code>io.Writer</code> though, so we can&rsquo;t just stack <code>io.Reader</code> on top of each +other. Instead we need to use a nice little feature from the stdlib: +<code>io.Pipe()</code>. This function returns a <code>io.Writer</code> and a <code>io.Reader</code>. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a <code>io.Reader</code>, +but you just have a <code>io.Writer</code>, then <code>io.Pipe()</code> should pop into your mind now.</p> + +<p>Here&rsquo;s how it looks in practice:</p> + +<pre><code class="language-go">func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + // Suck the reader empty and move it to `wZip`. + // Every write to wZip will be available as read in `pr`. + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + log.Warningf(&quot;add: copy: %v&quot;, err) + } + }() + + return pr, nil +} +</code></pre> + +<p>That&rsquo;s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.</p> + + + + + \ No newline at end of file diff --git a/doc/blog/public/tags/development/index.html b/doc/blog/public/tags/development/index.html new file mode 100644 index 00000000..b7cd332b --- /dev/null +++ b/doc/blog/public/tags/development/index.html @@ -0,0 +1,56 @@ + + + + + + + + + + + + +Development - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ +
+

Development

+ + +
+

A small step for mankind, a big step for brig

+
+ + published on + + +
+
+ A small historic moment was achieved today: The very first file was added to brig. There was no way to get it out again, but hey - Progress comes in steps. Luckily, just two hours later there was a brig get command that could retrieve the file again from ipfs. This is also my very first devlog entry, so… Hi. I mainly write this to remember what I did (and when) on the course of the project. Read More... +
+
+ + +
+ +
+ + + + + + diff --git a/doc/blog/public/tags/development/index.xml b/doc/blog/public/tags/development/index.xml new file mode 100644 index 00000000..7db838de --- /dev/null +++ b/doc/blog/public/tags/development/index.xml @@ -0,0 +1,78 @@ + + + + Development on brig devlog + http://replace-this-with-your-hugo-site.com/tags/development/ + Recent content in Development on brig devlog + Hugo -- gohugo.io + en-us + Fri, 15 Jan 2016 01:13:07 -0700 + + + + A small step for mankind, a big step for brig + http://replace-this-with-your-hugo-site.com/post/devlog/ + Fri, 15 Jan 2016 01:13:07 -0700 + + http://replace-this-with-your-hugo-site.com/post/devlog/ + <p>A small historic moment was achieved today: The very first file was added to +<code>brig</code>. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a <code>brig get</code> command that +could retrieve the file again from <code>ipfs</code>.</p> + +<p>This is also my very first devlog entry, so&hellip; Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That&rsquo;s +the same effect, I guess. If it&rsquo;s fun to read for others&hellip; that&rsquo;s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.</p> + +<p>So, back to the technical side of life. <code>brig add</code> currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to <code>ipfs</code>. The encryption and compression layer uses +<code>io.Writer</code> though, so we can&rsquo;t just stack <code>io.Reader</code> on top of each +other. Instead we need to use a nice little feature from the stdlib: +<code>io.Pipe()</code>. This function returns a <code>io.Writer</code> and a <code>io.Reader</code>. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a <code>io.Reader</code>, +but you just have a <code>io.Writer</code>, then <code>io.Pipe()</code> should pop into your mind now.</p> + +<p>Here&rsquo;s how it looks in practice:</p> + +<pre><code class="language-go">func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + // Suck the reader empty and move it to `wZip`. + // Every write to wZip will be available as read in `pr`. + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + log.Warningf(&quot;add: copy: %v&quot;, err) + } + }() + + return pr, nil +} +</code></pre> + +<p>That&rsquo;s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.</p> + + + + + \ No newline at end of file diff --git a/doc/blog/public/tags/go/index.html b/doc/blog/public/tags/go/index.html new file mode 100644 index 00000000..c08a6f0d --- /dev/null +++ b/doc/blog/public/tags/go/index.html @@ -0,0 +1,56 @@ + + + + + + + + + + + + +Go - brig devlog + + + +
+
+ [brig devlog] + # _ +
+
+ +
+ +
+

Go

+ + +
+

A small step for mankind, a big step for brig

+
+ + published on + + +
+
+ A small historic moment was achieved today: The very first file was added to brig. There was no way to get it out again, but hey - Progress comes in steps. Luckily, just two hours later there was a brig get command that could retrieve the file again from ipfs. This is also my very first devlog entry, so… Hi. I mainly write this to remember what I did (and when) on the course of the project. Read More... +
+
+ + +
+ +
+ + + + + + diff --git a/doc/blog/public/tags/go/index.xml b/doc/blog/public/tags/go/index.xml new file mode 100644 index 00000000..11f1d045 --- /dev/null +++ b/doc/blog/public/tags/go/index.xml @@ -0,0 +1,78 @@ + + + + Go on brig devlog + http://replace-this-with-your-hugo-site.com/tags/go/ + Recent content in Go on brig devlog + Hugo -- gohugo.io + en-us + Fri, 15 Jan 2016 01:13:07 -0700 + + + + A small step for mankind, a big step for brig + http://replace-this-with-your-hugo-site.com/post/devlog/ + Fri, 15 Jan 2016 01:13:07 -0700 + + http://replace-this-with-your-hugo-site.com/post/devlog/ + <p>A small historic moment was achieved today: The very first file was added to +<code>brig</code>. There was no way to get it out again, but hey - Progress comes in +steps. Luckily, just two hours later there was a <code>brig get</code> command that +could retrieve the file again from <code>ipfs</code>.</p> + +<p>This is also my very first devlog entry, so&hellip; Hi. I mainly write this to +remember what I did (and when) on the course of the project. Also it sometimes +is really useful to reflect on what kind of boolshit I wrote today. Ever +noticed that you get the best ideas doing arbitrary things like peeing? That&rsquo;s +the same effect, I guess. If it&rsquo;s fun to read for others&hellip; that&rsquo;s okay too. +I try to keep it updated after every more or less productive session. +That might mean daily, that might also mean once a week.</p> + +<p>So, back to the technical side of life. <code>brig add</code> currently works a bit +confusing. It is supposed to read a regular file on the disk, compress and +encrypt it and add it to <code>ipfs</code>. The encryption and compression layer uses +<code>io.Writer</code> though, so we can&rsquo;t just stack <code>io.Reader</code> on top of each +other. Instead we need to use a nice little feature from the stdlib: +<code>io.Pipe()</code>. This function returns a <code>io.Writer</code> and a <code>io.Reader</code>. Every +write on the writer produces a corresponding read on the reader - without internal +copying of the data. Yay. If you have a piece of API that needs a <code>io.Reader</code>, +but you just have a <code>io.Writer</code>, then <code>io.Pipe()</code> should pop into your mind now.</p> + +<p>Here&rsquo;s how it looks in practice:</p> + +<pre><code class="language-go">func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + // Suck the reader empty and move it to `wZip`. + // Every write to wZip will be available as read in `pr`. + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + log.Warningf(&quot;add: copy: %v&quot;, err) + } + }() + + return pr, nil +} +</code></pre> + +<p>That&rsquo;s all for today! For tomorrow a cleanup session is planned and the piece +of code that derives the AES-Key from an unencrypted file.</p> + + + + + \ No newline at end of file diff --git a/doc/blog/themes/base16 b/doc/blog/themes/base16 new file mode 160000 index 00000000..694fe64b --- /dev/null +++ b/doc/blog/themes/base16 @@ -0,0 +1 @@ +Subproject commit 694fe64bbb1ff426a4554fc796c044d4727d14db diff --git a/doc/brainstorming.rst b/doc/brainstorming.rst new file mode 100644 index 00000000..95139fff --- /dev/null +++ b/doc/brainstorming.rst @@ -0,0 +1,123 @@ +============= +BRAINSTORMING +============= + +Technology +========== + +IPFS +---- + +IPFS is short for the InterPlanetary FileSystem. It is a fast and efficient p2p +network. We use IPFS to form little sub-nets that can share files each other. +With IPFS we do not need to re-invent a lot of basic boilerplate features. +Also, by using IPFS we're not restricted to local network, but can also use the +whole IPFS network if necessary. + +XMPP +---- + +XMPP (also called Jabber) is used as existing infrastructure to easily pair +devices with IDs. In the case of XMPP the ID is a JabberID like this: + + sahib@nullcat.de/laptop + sahib@nullcat.de/desktop + +The part before the slash is called a bare id, which is meant to be unique +for your devices. Together with the resource behind the slash, the bare id forms +the full id which refers to a single device. + +Zeroconf +-------- + +Easy decovery of other peers in the local network. The software needs to act as +client and server. This needs an Zeroconf server (e.g. Avahi) to run on all +sides to work. + +inotify +------- + +Watches a directory for modifications. Enables brig to +automatically commit + +This is not portable to windows? (or some other unices) + + +btrfs, zfs, rsync algorithm? +---------------------------- + +For archive nodes, old snapshots up to a certain depth would be valuable. +There are several ways to achieve this, for example using btrfs snapshots. +A more portable and self-contained alternative would be using the rsync +using the rsync algorithm to create incremental layers. + +Terms +===== + +Repository (Port) +----------------- + +A repository is a folder with files and some special semantic in it. It can be +shared over several peers, either with full or partial content. A repository +does not necessarily sync all of it's contents. Like with ``git`` the +synchronization might be triggered automatically, or the directory might be +watched with ``inotify`` for autosync. + +Open questions: + +- Repo structure? Hidden directory? .brignore files? +- More than one repository possible with ipfs? Probably, how? + +A file index is associated to each repository, which is shared fully with each +peer. + +Peers (ships) +------------- + +Other peers in the network you are authenticated too. These are either close +peers (same bare id as you or explicitly trusted) or friend peers. +Every peer will be added as ipfs bootstrap peer. +To be added as peers, other devices need to be authenticated. + +Open questions: + +- How does the auth work? OTR? Using the ipfs PGP key? + +Security +======== + +Authentication +-------------- + +- Question/Answer? +- Verify public key? + +File transfer +------------- + +- Share one-time-keys over xmpp and encrypt files before sending with ipfs? + +Libraries +========= + +XMPP +---- + +go-xmpp2 + + + + + + + + + + +Ablauf +====== + + + + +Random n diff --git a/doc/expose/.gitignore b/doc/expose/.gitignore new file mode 100644 index 00000000..a1027a3a --- /dev/null +++ b/doc/expose/.gitignore @@ -0,0 +1,10 @@ +*.aux +*.fdb_latexmk +*.fls +*.log +*.out +*.pdf +*.tex +*.toc +*.lof +*.lot diff --git a/doc/expose/Makefile b/doc/expose/Makefile new file mode 100644 index 00000000..d90fbc99 --- /dev/null +++ b/doc/expose/Makefile @@ -0,0 +1,4 @@ +all: + pandoc --smart --bibliography expose.bib --csl ieee.csl expose.md -B title.tex -H header.tex --filter pandoc-fignos --filter pandoc-tablenos -N -o expose.tex -V lang=de-DE --chapters + latexmk -pdf expose.tex + diff --git a/doc/expose/expose.bib b/doc/expose/expose.bib new file mode 100644 index 00000000..b3b44b2e --- /dev/null +++ b/doc/expose/expose.bib @@ -0,0 +1,57 @@ +@book{go_programming_language, + title={The Go Programming Language}, + author={Alan A. A. Donovan, Brian W. Kernighan}, + isbn={0134190440}, + url={http://www.gopl.io/}, + year={2015}, + publisher={Addison-Wesley} +} + +@book{git, + title={Git - Verteile Versionsverwaltung für Code und Dokumente}, + author={Valentin Haenel, Julius Plenz}, + isbn={9783941841420}, + url={http://www.opensourcepress.de/de/produkte/Git/33237/978-3-95539-120-1}, + year={2011}, + publisher={open source Press} +} + +@book{peer2peer, + title={Peer--to--Peer--Netzwerke}, + author={Peter Mahlmann, Christian Schindelhauer}, + isbn={9783540339915}, + year={2007}, + publisher={eXamen.press} +} + +@book{everyday_crypto, + title={Everyday Cryptography}, + author={Keith M. Martin}, + isbn={9780199695591}, + year={2012}, + publisher={Oxford University Press} +} + +@book{peer2peer_arch, + title={Software--Architekturen: für Verteilte Systeme}, + author={Schahram Dustdar, Harald Gall, Manfred Hauswirth}, + isbn={9783642627699}, + year={2003}, + publisher={Springer} +} + +@book{xmpp, + title={XMPP: THe Definitive Guide}, + author={Peter Saint--Andre, Kevin Smith, Remko Tronçon}, + isbn={9780596521264}, + year={2009}, + publisher={O'Reilly} +} + +@electronic{wiki_filesync, + author = "Wikipedia", + title = "Comparison of file synchronization software", + year = "2015", + url = "https://en.wikipedia.org/wiki/Comparison_of_file_synchronization_software", + note = "[Online; zugegriffen am 06-Januar-2016]" +} diff --git a/doc/expose/expose.md b/doc/expose/expose.md new file mode 100644 index 00000000..c64e28e2 --- /dev/null +++ b/doc/expose/expose.md @@ -0,0 +1,623 @@ +--- +documentclass: scrreprt +classoption: toc=listof,index=totoc +include-headers: + - \usepackage{url} + - \usepackage[ngerman]{babel} + - \usepackage{csquotes} + - \usepackage[babel, german=quotes]{csquotes} +fontsize: 11pt +sections: yes +toc: yes +lof: no +lot: no +date: \today +--- + +\newpage +\pagenumbering{arabic} +\setcounter{page}{1} + +# Zusammenfassung der Projektziele + +Ziel des Projektes ist die Entwicklung einer sicheren und dezentralen +Alternative zu Cloud--Storage Lösungen wie Dropbox, die sowohl für Unternehmen, +als auch für Heimanwender nutzbar ist. Trotz der Prämisse, einfache Nutzbarkeit +zu gewährleisten, wird auf Sicherheit sehr großen Wert gelegt. Aus Gründen der +Transparenz wird die Software mit dem Namen »``brig``« dabei quelloffen unter der +``AGPL`` Lizenz entwickelt. + +Nutzbar soll das resultierende Produkt, neben dem Standardanwendungsfall der +Dateisynchronisation, auch als Backup- bzw. Archivierungs--Lösung sein. +Des Weiteren kann es auch als verschlüsselter Daten--Safe oder als Plattform für +andere, verteilte Anwendungen (wie beispielsweise aus dem Industrie 4.0 Umfeld) +dienen. + +Von anderen Softwarelösungen soll es sich stichpunkthaft durch folgende Merkmale +abgrenzen: + +- Verschlüsselte Übertragung *und* Speicherung. +- Unkomplizierte Installation und einfache Nutzung durch simplen Ordner im + Dateimanager. +- Transparenz, Anpassbarkeit und Sicherheit durch *Free Open Source Software (FOSS)*. +- Kein *Single Point of Failure* (*SPoF*), wie bei zentralen Diensten. +- Dezentrales Peer--to--Peer--Netzwerk auf Basis von ``ipfs``. +- Benutzerverwaltung auf Basis der ``XMPP``--Infrastruktur. +- Versionsverwaltung großer Dateien mit definierbarer Tiefe. + +# Steckbrief + +## Einleitung + +Viele Unternehmen haben sehr hohe Ansprüche an die Sicherheit, welche zentrale +Alternativen wie beispielsweise Dropbox[^Dropbox] nicht bieten können. Zwar wird +die Übertragung von Daten zu den zentralen Dropbox-Servern verschlüsselt, was +allerdings danach mit den Daten »in der Cloud« passiert liegt nicht mehr unter der +Kontrolle der Nutzer. Dort sind die Daten schon manchmal für andere Nutzer wegen Bugs +einsehbar[^BUGS] oder müssen gar von Dropbox an amerikanische Geheimdienste[^NSA] +weitergegeben werden. + +[^NSA]: Siehe auch \url{http://www.spiegel.de/netzwelt/web/dropbox-edward-snowden-warnt-vor-cloud-speicher-a-981740.html} +[^BUGS]: Siehe dazu auch \url{http://www.cnet.com/news/dropbox-confirms-security-glitch-no-password-required/} +[^Dropbox]: Mehr Informationen unter \url{https://www.dropbox.com/} + +Sprichwörtlich gesagt, kann man nicht kontrollieren wo die Daten aus der Cloud +abregnen. Tools wie Boxcryptor[^Boxcryptor] lindern diese Problematik zwar +etwas indem sie die Dateien verschlüsseln, heilen aber nur die Symptome und nicht +das zugrunde liegende Problem. + +[^Boxcryptor]: Krypto-Layer für Cloud-Dienste, siehe \url{https://www.boxcryptor.com/de} + +Dropbox ist leider kein Einzelfall --- beinahe alle Cloud--Dienste haben, oder +hatten, architekturbedingt ähnliche Sicherheitslecks. Für ein Unternehmen wäre +es vorzuziehen ihre Daten auf Servern zu speichern, die sie selbst +kontrollieren. Dazu gibt es bereits einige Werkzeuge wie *ownCloud*[^OWNCLOUD] +oder Netzwerkdienste wie *Samba*, doch technisch bilden diese nur die zentrale +Architektur von Cloud--Diensten innerhalb eines Unternehmens ab. + +[^OWNCLOUD]: *ownCloud*--Homepage: \url{https://owncloud.org/} + +## Ziele + +Ziel ist daher die Entwicklung einer sicheren, dezentralen und unternehmenstauglichen +Dateisynchronisationssoftware namens ``brig``. Die »Tauglichkeit« für ein +Unternehmen ist natürlich sehr individuell. Wir meinen damit im Folgenden diese Punkte: + +- *Einfache Benutzbarkeit:* Sichtbar soll nach der + Einrichtung nur ein Ordner im Dateimanager sein. +- *Effiziente Übertragung von Dateien:* Intelligentes Routing vom Speicherort zum Nutzer. +- *Speicherquoten:* Nur relevante Dateien müssen synchronisiert werden. +- *Automatische Backups:* Versionsverwaltung auf Knoten mit großem Speicherplatz. +- *Schnelle Auffindbarkeit:* Kategorisierung durch optionale Verschlagwortung. + +Um eine solche Software zu entwickeln, wollen wir auf bestehende Komponenten wie +dem *InterPlanetaryFileSystem* (kurz ``ipfs``, ein flexibles P2P +Netzwerk[@peer2peer]) und *XMPP* (ein Messanging Protokoll und Infrastruktur, +siehe [@xmpp]) aufsetzen. Dies macht die Entwicklung eines Prototypen mit +vertretbaren Aufwand möglich. + +Von einem Prototypen zu einer marktreifen Software ist es allerdings stets ein +weiter Weg. Daher wollen wir einen großen Teil der Zeit nach dem Prototyp damit +verbringen, die Software bezüglich Sicherheit, Performance und +Benutzerfreundlichkeit zu optimieren. Da es dafür nun mal keinen +standardisierten Weg gibt, ist dafür ein gewisses Maß an Forschung nötig. + +## Einsatzmöglichkeiten + +``brig`` soll deutlich flexibler nutzbar sein als zentrale Dienste. Nutzbar soll +es sein als… + +- *Synchronisationslösung*: Spiegelung von zwei oder mehr Ordnern. +- *Transferlösung*: »Veröffentlichen« von Dateien nach Außen mittels Hyperlinks. +- *Versionsverwaltung*: + Bis zu einer bestimmten Tiefe können alte Dateien wiederhergestellt werden. +- *Backup- und Archivierungslösung*: Verschiedene »Knoten--Typen« möglich. +- *Verschlüsselter Safe*: ein »Repository«[^REPO] kann »verschlossen« und + wieder »geöffnet« werden. +- *Semantisch durchsuchbares* Tag-basiertes Dateisystem[^TAG]. +- *Plattform* für verteilte Anwendungen. +- einer beliebigen Kombination der oberen Punkte. + +[^TAG]: Mit einem ähnlichen Ansatz wie \url{https://en.wikipedia.org/wiki/Tagsistant} +[^REPO]: *Repository:* Hier ein »magischer« Ordner in denen alle Dateien im Netzwerk angezeigt werden. + +## Zielgruppen + +Die primäre Zielgruppe von ``brig`` sind Unternehmenskunden und Heimanwender. +Wie man unten sehen kann, sind noch weitere sekundäre Zielgruppen denkbar. + +### Unternehmen + +Unternehmen können ``brig`` nutzen, um ihre Daten und Dokumente intern zu +verwalten. Besonders sicherheitskritische Dateien entgehen so der Lagerung in +Cloud--Services oder der Gefahr von Kopien auf unsicheren +Mitarbeiter--Endgeräten. Größere Unternehmen verwalten dabei meist ein +Rechenzentrum in dem firmeninterne Dokumente gespeichert werden. Von den +Nutzern werden diese dann meist mittels Diensten wie *ownCloud* oder *Samba* +»händisch« heruntergeladen. + +In diesem Fall könnte man ``brig`` im Rechenzentrum und auf allen Endgeräten +installieren. Das Rechenzentrum würde die Datei mit tiefer Versionierung +vorhalten. Endanwender würden alle Daten sehen, aber auf ihren Gerät nur die +Daten tatsächlich speichern, die sie auch benutzen. Hat beispielsweise ein +Kollege im selben Büro die Datei bereits vorliegen, kann brig diese dann direkt +blockweise vom Endgerät des Kollegen holen. + + +Kleinere Unternehmen, wie Ingenieurbüros, können ``brig`` dazu nutzen Dokumente nach +Außen freizugeben, ohne dass sie dazu vorher irgendwo »hochgeladen« werden +müssen. Dies wird dadurch möglich gemacht, dass Dateien mittels eines +*Hyperlinks* nach außen publik gemacht werden können. So muss die Gegenseite +``brig`` nicht installiert haben. + +### Privatpersonen / Heimanwender + +Heimanwender können ``brig`` für ihren Datenbestand aus Fotos, Filmen, Musik und +sonstigen Dokumenten nutzen. Ein typischer Anwendungsfall wäre dabei auf einem +NAS Server, der alle Dateien mit niedriger Versionierung speichert. Die +Endgeräte, wie Laptops und Smartphones, würden dann ebenfalls ``brig`` nutzen, +aber mit deutlich geringeren Speicherquotas (maximales Speicherlimit), so dass +nur die aktuell benötigten Dateien physikalisch auf dem Gerät vorhanden sind. +Die anderen Dateien lagern »im Netz« und können transparent von ``brig`` von +anderen verfügbaren Knoten geholt werden. Sollte der Nutzer, beispielsweise auf +einer längeren Zugfahrt, offline sein, so kann er benötigte Dateien vorher +»pinnen«, um sie lokal zwischenzuspeichern. + +### Plattform für industrielle Anwendungen + +Da ``brig`` auch komplett automatisiert und ohne Interaktion nutzbar sein soll, kann +es auch als Plattform für jede andere Anwendungen genutzt werden, die Dateien +austauschen und synchronisieren müssen. Eine Anwendung in der Industrie 4.0 wäre +beispielweise die Synchronisierung von Konfigurationsdateien im gesamten +Netzwerk. + +### Einsatz im öffentlichen Bereich + +Aufgrund seiner Transparenz und einfachen Benutzbarkeit wäre ebenfalls eine +Nutzung an Schulen, Universitäten oder auch in Behörden zum Dokumentenaustausch +denkbar. Vorteilhaft wäre für die jeweiligen Institutionen hierbei vor allem, +dass man sich aufgrund des Open--Source Modells an keinen Hersteller bindet +(Stichwort: *Vendor Lock*) und keine behördlichen Daten in der »Cloud« landen. +Eine praktische Anwendung im universitären Bereich wäre die Verteilung von +Studienunterlagen an die Studenten. + +# Stand der Technik + +Die Innovation bei unserem Projekt besteht daher hauptsächlich darin, bekannte +Technologien »neu zusammen zu stecken«, woraus sich viele neue Möglichkeiten +ergeben. Wie im nächsten Kapitel beleuchtet wird, ist ``brig`` der Versuch +viele gute, bestehende und praxisgeprüfte Ideen in einem konsistenten Programm +zu vereinen. + +## Stand der Wissenschaft + +Zwar ist das Projekt stark anwendungsorientiert, doch basiert es auf gut +erforschten Technologien wie Peer--to--Peer-Netzwerken (kurz *P2P*, siehe auch +[@peer2peer_arch]), von der NIST[^NIST] zertifizierten kryptografischen +Standard-Algorithmen[@everyday_crypto] und verteilten Systemen im Allgemeinen +(wie der freie XMPP Standard). P2P--Netzwerke wurden in den letzten +Jahren gut erforscht und haben sich auch in der Praxis bewährt: Skype ist +vermutlich das bekannteste, kommerzielle P2P Netzwerk (siehe auch @peer2peer, S.2). + +Allerdings ist uns keine für breite Massen nutzbare Software bekannt, die es +Nutzern ermöglicht selbst ein P2P Netzwerk aufzuspannen und darin Dateien +auszutauschen. Am nähsten kommen dem die beiden Softwareprojekte +»``Syncthing``« (OpenSource, [^SYNCTHING]) und »``BitTorrent Sync``« +(proprietär, [^BITSYNC]). Beide nutzen zwar P2P--Technologie zum Austausch der +Dateien, modellieren aber kein »echtes« P2P--Netzwerk, bei dem nicht jeder +Teilnehmer eine volle Kopie sämtlicher Daten haben muss. + +[^SYNCTHING]: Siehe auch dazu: \url{https://syncthing.net/} +[^BITSYNC]: Siehe Hersteller--Webpräsenz: \url{https://www.getsync.com/} + +Der wissenschaftliche Beitrag unserer Arbeit wäre daher die Entwicklung einer +freien Alternative, die von allen eingesehen, auditiert und studiert werden +kann. Diese freie Herangehensweise ist insbesondere für sicherheitskritische +Software relevant, da keine (offensichtlichen) »Backdoors« in die Software +eingebaut werden können. + +[^NIST]: NIST: *National Institute of Standards and Technology* + +## Markt und Wettbewerber + +Bereits ein Blick auf Wikipedia[@wiki_filesync] zeigt, dass der momentane Markt +an Dateisynchronisationssoftware (im weitesten Sinne) sehr unübersichtlich ist. +Ein näherer Blick zeigt, dass die Software dort oft nur +in Teilaspekten gut funktioniert oder mit anderen unlösbaren Problemen +behaftet sind. + +### Verschiedene Alternativen + +Im Folgenden geben wir eine unvollständige Übersicht über bekannte +Dateisynchronisations--Programmen. Davon stehen nicht alle in Konkurrenz zu +``brig``, sind aber aus Anwendersicht ähnlich. + +#### Dropbox + Boxcryptor + +Der vermutlich bekannteste und am weitesten verbreitete zentrale Dienst zur +Dateisynchronisation. Verschlüsselung kann man mit Tools wie ``encfs`` +(Open--Source, siehe auch [^ENCFS]) oder dem ähnlich umfangreichen, proprietären +*Boxcryptor* nachrüsten. Was das Backend genau tut ist leider das +Geheimnis von Dropbox --- es ist nicht Open--Source. + +[^ENCFS]: Mehr Informationen unter \url{https://de.wikipedia.org/wiki/EncFS} + +Die Server von Dropbox stehen in den Vereinigten Staaten, was spätestens +seit den Snowden--Enthüllungen für ein mulmiges Gefühl sorgen sollte. Wie oben +erwähnt, kann diese Problematik durch die Verschlüsselungssoftware *Boxcryptor* +abgemildet werden. Diese kostet aber zusätzlich und benötigt noch einen +zusätzlichen zentralen Keyserver[^KEYSERVER]. + +[^KEYSERVER]: Mehr Informationen zum Keyserver unter \url{https://www.boxcryptor.com/de/technischer-\%C3\%BCberblick\#anc09} + +Technisch nachteilhaft ist vor allem, dass jede Datei »über den Pazifik« hinweg +synchronisiert werden muss, nur um eventuell auf dem Arbeitsrechner nebenan +anzukommen. + +#### ownCloud + +Aus dieser Problemstellung heraus entstand die Open--Source Lösung *ownCloud*. +Nutzer hosten auf ihren Servern selbst eine ownCloud--Instanz und stellen +ausreichend Speicherplatz bereit. Vorteilhaft ist also, dass die Daten auf den +eigenen Servern liegen. Nachteilig hingegen, dass das zentrale Modell von Dropbox +lediglich auf eigene Server übertragen wird. Die Daten müssen zudem von einer +Weboberfläche geholt werden und liegen nicht in einem »magischen«, +selbst--synchronisierenden Ordner. + +#### Syncthing + +Das 2013 veröffentliche quelloffene *Syncthing* versucht diese zentrale +Instanz zu vermeiden, indem die Daten jeweils von Peer zu Peer übertragen +werden. Es ist allerdings kein vollständiges Peer--to--peer--Netzwerk: Geteilte +Dateien liegen immer als Kopie bei allen Teilnehmern, die die Datei haben. +Alternativ ist aber auch selektives Synchronisieren von Dateien möglich. + +Besser als bei ownCloud ist hingegen gelöst, dass ein »magischer« Ordner +existiert in dem man einfach Dateien legen kann, um sie zu teilen. Zudem wird die +Datei vom nächstgelegenen Knoten übertragen. Praktisch ist auch, dass +*Syncthing* Instanzen mittels eines zentralen Discovery--Servers entdeckt werden +können. Nachteilig hingegen ist die fehlende Benutzerverwaltung: Man kann nicht +festlegen von welchen Nutzern man Änderungen empfangen will und von welchen +nicht. + +#### BitTorrent Sync + +In bestimmten Kreisen scheint auch das kommerzielle und proprietäre +*BitTorrent Sync* beliebt zu sein. Hier wird das bekannte und freie BitTorrent +Protokoll zur Übertragung genutzt. Vom Feature--Umfang ist es in etwa +vergleichbar mit *Syncthing*. Die Dateien werden allerdings noch zusätzlich +AES--verschlüsselt abgespeichert. + +Genauere Aussagen kann man leider aufgrund der geschlossenen Natur des Programms +und der eher vagen Werbeprosa nicht treffen. Ähnlich zu *Syncthing* ist +allerdings, dass eine Versionsverwaltung nur mittels eines »Archivordners« +vorhanden ist. Gelöschte Dateien werden schlicht in diesen Ordner verschoben und +können von dort wiederhergestellt werden. Die meisten anderen Vor- und Nachteile +von *Syncthing* treffen auch hier zu. + +#### ``git-annex`` + +Das 2010 erstmals veröffentlichte ``git-annex``[^ANNEX] geht in vielerlei Hinsicht +einen anderen Weg. Einerseits ist es in der funktionalen Programmiersprache +Haskell geschrieben, andererseits nutzt es das Versionsverwaltungssystem ``git``[@git], +um die Metadaten zu den Dateien abzuspeichern, die es verwaltet. Auch werden +Dateien standardmäßig nicht automatisch synchronisiert, man muss Dateien selbst +»pushen«, beziehungsweise »pullen«. + +[^ANNEX]: Webpräsenz: \url{https://git-annex.branchable.com/} + +Dieser »Do-it-yourself« Ansatz ist sehr nützlich, um ``git-annex`` als Teil der +eigenen Anwendung einzusetzen. Für den alltäglichen Gebrauch ist es aber selbst +für erfahrene Anwender zu kompliziert, um es praktikabel einzusetzen. + +Trotzdem sollen zwei interessante Features nicht verschwiegen werden, welche wir +langfristig gesehen auch in ``brig`` realisieren wollen: + +* *Special Remotes:* »Datenablagen« bei denen ``git-annex`` nicht installiert sein muss. + Damit können beliebige Cloud--Dienste als Speicher genutzt werden. ++ *N-Copies:* Von wichtigen Dateien kann ``git-annex`` bis zu ``N`` Kopien speichern. + Versucht man eine Kopie zu löschen, so verweigert ``git-annex`` dies. + +### Zusammenfassung + +Obwohl ``brig`` eine gewisse Ähnlichkeit mit verteilten Dateisystemen, wie +*GlusterFS* hat, wurden diese in der Übersicht weggelassen --- einerseits aus +Gründen der Übersicht, andererseits weil diese andere Ziele verfolgen und von +Heimanwendern kaum genutzt werden. + +Zusammengefasst findet sich hier noch eine tabellarische Übersicht mit den aus +unserer Sicht wichtigsten Eigenschaften: + +| | **FOSS** | **Dezentral** | **Kein SPoF** | **Versionierung** | **Einfach nutzbar** | **P2P** | +| -------------------- | ------------------- | ------------------- | --------------------------- | -------------------------------------- | ------------------- |------------------| +| *Dropbox/Boxcryptor* | \xmark | \xmark | \xmark | \textcolor{YellowOrange}{Rudimentär} | \cmark | \xmark | +| *ownCloud* | \cmark | \xmark | \xmark | \textcolor{YellowOrange}{Rudimentär} | \cmark | \xmark | +| *Syncthing* | \cmark | \cmark | \cmark | \textcolor{YellowOrange}{Archivordner} | \cmark | \xmark | +| *BitTorrent Sync* | \xmark | \cmark | \cmark | \textcolor{YellowOrange}{Archivordner} | \cmark | \xmark | +| ``git-annex`` | \cmark | \cmark | \cmark | \cmark | \xmark | \xmark | +| ``brig`` | \cmark | \cmark | \cmark | \cmark | \cmark | \cmark | + +# Das Projekt ``brig`` + +Optimal wäre also eine Kombination aus den Vorzügen von *Syncthing*, *BitTorrent +Sync* und ``git-annex``. Wie wir diese technichen Vorzüge ohne große Nachteile +erreichen wollen, wird im Folgenden beleuchtet. + +## Der Name + +Eine »Brigg« (englisch »brig«) ist ein kleines und wendiges +Zweimaster--Segelschiff aus dem 18-ten Jahrhundert. Passend erschien uns der Name +einerseits, weil wir flexibel »Güter« (in Form von Dateien) in der ganzen Welt +verteilen, andererseits weil ``brig`` auf (Datei-)Strömen operiert. + +Dass der Name ähnlich klingt und kurz ist wie ``git``, ist kein Zufall. Das +Versionsverwaltungssystem (kurz VCS) hat durch seine sehr flexible und dezentrale +Arbeitsweise bestehende zentrale Alternativen wie ``svn`` oder ``cvs`` fast +vollständig abgelöst. Zusätzlich ist der Gesamteinsatz von +Versionsverwaltungssystemen durch die verhältnismäßige einfache Anwendung +gestiegen. +Wir hoffen mit ``brig`` eine ähnlich flexible Lösung für große Dateien +etablieren zu können. + +## Wissenschaftliche und technische Arbeitsziele + +Um die oben genannten Ziele zu realisieren ist eine sorgfältige Auswahl der +Technologien wichtig. Der Einsatz eines Peer--to--Peer Netzwerk zum Dateiaustausch +ermöglicht interessante neue Möglichkeiten. Bei zentralen Ansätzen müssen +Dateien immer vom zentralen Server (der einen *Single Point of Failure* +darstelle) geholt werden. Dies ist relativ ineffizient, besonders wenn viele +Teilnehmer im selben Netz die selbe große Videodatei empfangen wollen. Bei ``brig`` +würde der Fortschritt beim Ziehen der Datei unter den Teilnehmern aufgeteilt +werden. Hat ein Teilnehmer bereits einen Block einer Datei, so kann er sie mit +anderen direkt ohne Umweg über den Zentralserver teilen. + +Zudem reicht es prinzipiell wenn eine Datei nur einmal im Netz vorhanden ist. +Ein Rechenzentrum mit mehr Speicherplatz könnte alle Dateien zwischenhalten, +während ein *Thin--Client* nur die Dateien vorhalten muss mit denen gerade +gearbeitet wird. +Zu den bereits genannten allgemeinen Zielen kommen also noch folgende technischen Ziele: + +* Verschlüsselte Übertragung *und* Speicherung. +* *Deduplizierung*: Gleiche Dateien werden nur einmal im Netz gespeichert. +* *Benutzerverwaltung* mittels XMPP--Logins. +* *Speicherquoten* & Pinning (Dateien werden lokal »festgehalten«) +* Kein offensichtlicher *Single Point of Failure*. +* Optionale *Kompression* mittels der Algorithmen ``snappy`` oder ``brotli``. +* *Zweifaktor-Authentifizierung* und *paranoide* Sicherheit--Standards »Made in Germany«. + +## Lösungsansätze + +Als Peer--to--Peer Filesystem werden wir das InterPlanetaryFileSystem[^IPFS] +nutzen. Dieses implementiert für uns bereits den Dateiaustausch zwischen den +einzelnen ``ipfs``--Knoten. Damit die Dateien nicht nur verschlüsselt übertragen +sondern auch abgespeichert werden, werden sie vor dem Hinzufügen zu IPFS mittels +AES im GCM--Modus von ``brig`` verschlüsselt und optional komprimiert. Zur +Nutzerseite hin bietet ``brig`` dann eine Kommandozeilenanwendung und ein +FUSE-Dateisystem[^FUSE], welches alle Daten in einem ``brig`` Repository wie normale +Dateien in einem Ordner aussehen lässt. Beim »Klick« auf eine Datei wird diese +von ``brig`` dann, für den Nutzer unsichtbar, im Netzwerk lokalisiert, +empfangen, entschlüsselt und als Dateistrom nach außen gegeben. + +[^IPFS]: Mehr Informationen unter \url{http://ipfs.io/} +[^FUSE]: FUSE: *Filesystem in Userspace*, siehe auch \url{https://de.wikipedia.org/wiki/Filesystem_in_Userspace} + +![Übersicht über die Kommunikation zwischen zwei Partnern/Repositories, mit den relevanten Sicherheits--Protokollen](images/security.png){#fig:security} + +Der AES--Schlüssel wird dabei an ein Passwort geknüpft, welches der Nutzer beim +Anlegen des Repositories angibt. Das Passwort wiederum ist an einen +XMPP--Account der Form ``nutzer@server.de/ressource`` geknüpft. +Ein Überblick über die sicherheitsrelevanten Zusammenhänge findet sich +in Abbildung {@fig:security}. + +Alle Änderungen an einem Repository werden in einer Metadatendatenbank +gespeichert. Diese kann dann mit anderen Teilnehmern über XMPP, und +verschlüsselt via OTR[^OTR], ausgetauscht werden. Jeder Teilnehmer hat dadurch +den gesamten Dateiindex. Die eigentlichen Dateien können aber »irgendwo« im +Teilnehmernetz sein. Sollte eine Datei lokal benötigt werden, so kann man sie +»pinnen«, um sie lokal zu speichern. Ansonsten werden nur selbst erstellte +Dateien gespeichert und andere Dateien maximal solange vorgehalten, bis die +Speicherquote erreicht ist. + +[^OTR]: *Off--the--Record--Messaging:* Mehr Informationen unter \url{https://de.wikipedia.org/wiki/Off-the-Record_Messaging} + +Nutzer die ``brig`` nicht installiert haben, oder mit denen man aus +Sicherheitsgründen nicht das gesamte Repository teilen möchte, können einzelne +Dateien ganz normal aus ihrem Browser heraus herunterladen. Dazu muss die Datei +vorher »publik« gemacht werden. Der außenstehende Nutzer kann dann die Datei +über ein von ``brig`` bereitgestelltes »Gateway« von einem öffentlich +erreichbaren Rechner mittels einer ``URL`` herunterladen. + +Um Portabilität zu gewährleisten wird die Software in der Programmiersprache +``Go``[@go_programming_language] geschrieben sein. Der Vorteil hierbei ist, dass am +Ende eine einzige sehr portable, statisch gelinkte Binärdatei erzeugt wird. +Weitere Vorteile sind die hohe Grundperformanz und die sehr angenehmen +Werkzeuge, die mit der Sprache mitgeliefert werden. Die Installation von +``brig`` ist beispielsweise unter Unix nur ein einzelner Befehl: + +```bash +$ go get github.com/disorganizer/brig +``` + +## Technische Risiken + +Der Aufwand für ein Softwareprojekt dieser Größe ist schwer einzuschätzen. Da +wir auf relativ junge Technologien wie ``ipfs`` setzen, ist zu erwarten, dass +sich in Details noch Änderungen ergeben. Auch die Tauglichkeit bezüglich +Performance ist momentan noch schwer einzuschätzen. Aus diesen Gründen werden +wir zwischen ``brig`` und ``ipfs`` eine Abstraktionsschicht bauen, um notfalls +den Einsatz anderer Backends zu ermöglichen. + +Erfahrungsgemäß nimmt auch die Portierung und Wartung auf anderen Plattformen +sehr viel Zeit in Anspruch. Durch die Wahl der hochportablen Programmiersprache +Go minimieren wir dies drastisch. + +Wie für jede sicherheitsrelevante Software ist die Zeit natürlich ein Risiko. +Ein Durchbruch im Bereich der Quantencomputer könnte daher in absehbarer +Zeit zu einem Sicherheitsrisiko werden. + +# Wirtschaftliche Verwertung + +## Open--Source--Lizenz und Monetarisierung + +Als Lizenz für ``brig`` soll die Copyleft--Lizenz ``AGPL`` zum Einsatz kommen. +Diese stellt sicher, dass Verbesserungen am Projekt auch wieder in dieses +zurückfließen müssen. + +Dass die Software quelloffen ist, ist kein Widerspruch zur wirtschaftlichen +Verwertung. Statt auf Softwareverkäufe zu setzen lässt sich mit dem Einsatz und +der Anpassung der Software Geld verdienen. Das Open--Source Modell bietet aus +unserer Sicht hierbei sogar einige Vorteile: + +- Schnellere Verbreitung durch fehlende Kostenbarriere auf Nutzerseite. +- Kann von Nutzern und Unternehmen ihren Bedürfnissen angepasst werden. +- Transparenz in Punkto Sicherheit (keine offensichtlichen Backdoors möglich). +- Fehlerkorrekturen, Weiterentwicklung und Testing aus der Community. + +## Verwertungskonzepte + +Es folgen einige konkrete Verwertungs--Strategien, die teilweise auch in +Partnerschaft mit dazu passenden Unternehmen ausgeführt werden könnten. +Prinzipiell soll die Nutzung für private und gewerbliche Nutzer kostenfrei sein, +weitergehende Dienstleistungen aber nicht. + +### Bezahlte Entwicklung spezieller Features + +Für sehr spezielle Anwendungsfälle wird auch ``brig`` nie alle Features +anbieten können, die der Nutzer sich wünscht. Das ist auch gut so, da es die +Programmkomplexität niedriger hält. Für Nutzer, die bereit sind für Features zu +zahlen, wären zwei Szenarien denkbar: + +*Allgemein nützliche Änderungen:* Diese werden direkt in ``brig`` integriert und +sind daher als Open--Source für andere nutzbar. Dies bietet Unternehmen die +Möglichkeit, die weitere Entwicklung von ``brig`` mittels finanziellen Mitteln +zu steuern. + +*Spezielle Lösungen:* Lösungen die nur für spezifische Anwendungsfälle Sinn +machen. Ein Beispiel wäre ein Skript, dass für jeden Unternehmens--Login einen +XMPP--Account anlegt. + +### Supportverträge + +Normalerweise werden Fehler bei Open--Source--Projekten auf einen dafür +eingerichteten Bugtracker gemeldet. Die Entwickler können dann, nach einiger +Diskussion und Zeit, den Fehler reparieren. Unternehmen haben aber für +gewöhnlich kurze Deadlines bis etwas funktionieren muss. + +Unternehmen mit Supportverträgen würden daher von folgenden Vorteilen profitieren: + +- *Installation* der Software. +- *Priorisierung* bei Bug--Reports. +- Persönlicher *Kontakt* zu den Entwicklern. +- *Wartung* von nicht--öffentlichen Spezialfeatures +- Installation von *YubiKeys*[^YUBI] oder anderer Zwei--Faktor--Authentifizierung. + +[^YUBI]: Ein flexibles 2FA-Token. Mehr Informationen unter \url{https://www.yubico.com/faq/yubikey} + +### Mehrfachlizensierung + +Für Unternehmen, die unsere Software als Teil ihres eigenen Angebots nutzen +wollen, kann die Erteilung einer anderen Lizenz in Frage kommen: + +- Eine Consulting Firma könnte eine Lizenz bei uns erwerben, um selbst + Speziallösungen zu entwickeln, die sie dann nicht als *Open--Source* + veröffentlichen müssen. + +- Ein Hosting Anbieter der ``brig`` nutzen möchte, müsste wegen der ``AGPL`` + dazu erst die Erlaubnis bei uns einholen. Je nach Fall könnte dann ein + entsprechender Vertrag ausgehandelt werden. + +### Zertifizierte NAS-Server + +Besonders für Privatpersonen oder kleine Unternehmen wie Ingenieurbüros wäre +ein vorgefertigter Rechner mit vorinstallierter Software interessant. Das +Software- und Hardware--Zusammenspiel könnte dann vorher von uns +getestet werden und mit der Zeit auch dem technischen Fortschritt angepasst +werden. + +### Lehrmaterial und Consulting. + +Auf lange Sicht wären auch Lehrmaterial, Schulungen und Consulting im +Allgemeinen als Eingabequelle denkbar. +Respektable Einnahmen könnte man auch mit Merchandise, wie beispielsweise +Flaschenschiffen, erzielen. \smiley{} + +# Beschreibung des Arbeitsplans + +## Technische Arbeitsschritte + +Im Rahmen unserer Masterarbeiten werden wir einen Prototypen entwickeln, der +bereits in Grundzügen die oben beschriebenen Technologien demonstriert. +Gute Performanz, Portabilität und Anwenderfreundlichkeit sind zu diesem Zeitpunkt aus +Zeitmangel allerdings noch keine harten Anforderungen. + +Die im ersten Prototypen gewonnen Erkenntnisse wollen wir dazu nutzen, +nötigenfalls eine technische »Kurskorrektur« durchzuführen und den ersten +Prototypen nach Möglichkeit zu vereinfachen und zu stabilisieren. + +Zu diesem zweiten Prototypen werden dann in kleinen Iterationen Features +hinzugefügt. Jedes dieser Feature sollte für sich alleine stehen, daher sollte +zu diesem Zeitpunkt bereits die grundlegende Architektur relativ stabil sein. + +Nachdem ein gewisses Mindestmaß an nützlichen Features hinzugekommen ist, wäre +ein erstes öffentliches Release anzustreben. Dies hätte bereits eine gewisse +Verbreitung zur Folge und die in ``brig`` eingesetzten Sicherheitstechnologien +könnten von externen Sicherheitsexperten auditiert werden. + +## Meilensteinplanung + +Der oben stehende Zeitplan ist nochmal in Abbildung {@fig:milestones} auf drei Jahre +gerechnet zu sehen. + +![Grobe Meilensteinplanung von 2016 bis 2019.](images/milestones.png){#fig:milestones} + +Dabei sollen Prototyp I & II mindestens folgende Features beinhalten: + +*Prototyp I:* + +- Grundlegende Dateiübertragung. +- Verschlüsselte Speicherung. +- FUSE Layer zum Anzeigen der Dateien in einem »magischen« Ordner. + + +*Prototyp II:* + +- Sichere XMPP--Benutzerverwaltung. +- Erste Effizienzsteigerungen. +- Tag--basierte Ansicht im FUSE Layer. +- Verlässliche Benutzung auf der Kommandozeile (ähnlich ``git``). + +Weitere Features kommen dann in kleinen, stärker abgekapselten, Iterationen hinzu. + +# Finanzierung des Vorhabens + +Eine mögliche Finanzierungstrategie bietet das IuK--Programm[^IUK] des +Freistaates Bayern. Dabei werden Kooperation zwischen Fachhochschulen und +Unternehmen mit bis zu 50% des Fördervolumens gefördert. Gern gesehen ist dabei +ein Großunternehmen, welches zusammen mit einem kleinen bis mittleren +Unternehmen (``KMU``) das Fördervolumen aufbringt. Aus diesen Mitteln +könnte die Hochschule Augsburg dann zwei Stellen für wissenschaftliche Mitarbeiter +über eine gewisse Dauer finanzieren. + +Die Höhe des Fördervolumens richtet sich primär nach der Dauer der Förderung und +dem jeweiligen akademischen Abschluss. Die Dauer würden wir dabei auf mindestens +zwei, optimalerweise drei Jahre ansetzen. Sehr grob überschlagen kommen wir +dabei für das nötige Fördervolumen auf folgende Summe: + +```python +>>> gehalt = 3500 + 2000 # Bruttogehalt + Arbeitgeberanteil +>>> spesen = 30000 # Anschaffungen, Büro, etc. +>>> pro_mann = 12 * gehalt # = 66000 Euro +>>> pro_jahr = 2 * pro_mann + spesen # = 162000 Euro +>>> budget = 3 * pro_jahr # = 486000 Euro ~ 500.000 Euro +``` + +Für einen erfolgreichen Projektstart sollten daher zwei Unternehmen bereit sein, +diese Summe gemeinsam aufzubringen. Die Gegenleistung bestünde dann einerseits +natürlich aus der fertigen Software, andererseits aus möglichen weiteren daraus +resultierenden Kooperationen. + +[^IUK]: Mehr Informationen unter \url{http://www.iuk-bayern.de/} + +\newpage + +# Literaturverzeichnis diff --git a/doc/expose/ieee.csl b/doc/expose/ieee.csl new file mode 100644 index 00000000..0bde5113 --- /dev/null +++ b/doc/expose/ieee.csl @@ -0,0 +1,339 @@ + + diff --git a/doc/expose/images/milestones.png b/doc/expose/images/milestones.png new file mode 100644 index 00000000..99262e1e Binary files /dev/null and b/doc/expose/images/milestones.png differ diff --git a/doc/expose/images/milestones.svg b/doc/expose/images/milestones.svg new file mode 100644 index 00000000..06b4bf31 --- /dev/null +++ b/doc/expose/images/milestones.svg @@ -0,0 +1,614 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + 2016 + 2017 + 2018 + 2019 + + + + + + + + Iterationen + + Forschung & Promotion + Prototyp I + Masterarbeit + Juli + April + Prototyp II + Feature #1 + + Feature #2 + + Feature #2 + + Feature #3 + + Feature #4 + + ... + + + + + Studium + IuK Förderung + + März + + + + + diff --git a/doc/expose/images/security.png b/doc/expose/images/security.png new file mode 100644 index 00000000..c41e5d49 Binary files /dev/null and b/doc/expose/images/security.png differ diff --git a/doc/expose/images/title.png b/doc/expose/images/title.png new file mode 100644 index 00000000..f4c1a55d Binary files /dev/null and b/doc/expose/images/title.png differ diff --git a/doc/random.rst b/doc/random.rst new file mode 100644 index 00000000..8b4274cd --- /dev/null +++ b/doc/random.rst @@ -0,0 +1,35 @@ + + +Brig in der Welt: + +- Zwischen Dropbox und GlusterFS. +- Zwischen syncthing und git-annex. +- "bazil" ist relativ ähnlich. +- Brig ist das encfs für ipfs. + +Special features von brig: + +- Daten liegen in einem echten P2P netz. +- Fokus auf Sicherheit: Daten sind tatsächlich physikalisch verschlüsselt. +- Echte Benutzerverwaltung mit existierender Infrastruktur. +- Keinerlei zentrale Instanz. +- Kompression mit Brotli? + +"Konkurrenz": + +https://bazil.org/ +https://github.com/cryptosphere/cryptosphere + +Was brig erstmal nicht können wird: + +- Masterschlüssel für admin. +- Special-remotes +- N-Copies (je nachdem) + + +Brig als Name: + +- Man operiert auf Streams. (Wie eine Brig auf dem Golfstrom) +- Repositories sind Datenhäfen. +- Sync könnte man "ship" nennen +- Der Besitzer eines Repo ist der Käpt'n. diff --git a/doc/roadmap.rst b/doc/roadmap.rst new file mode 100644 index 00000000..bbda995c --- /dev/null +++ b/doc/roadmap.rst @@ -0,0 +1,8 @@ +======= +ROADMAP +======= + +Roadmap +======= + +1. Sync a single file :-) (Make it twitch! aka ,,The system is twitching!'') diff --git a/doc/testacc.rst b/doc/testacc.rst new file mode 100644 index 00000000..fa1a2379 --- /dev/null +++ b/doc/testacc.rst @@ -0,0 +1,3 @@ +alice bob mallory bruce +ThiuJ9wesh eecot3oXan ief4que6Le Daef5aquin + diff --git a/doc/usage.rst b/doc/usage.rst new file mode 100644 index 00000000..83125d8c --- /dev/null +++ b/doc/usage.rst @@ -0,0 +1,161 @@ +===== +USAGE +===== + +Usage: + + brig SUBCOMMAND [OPTIONS …] + +brig is a distributed file synchronization tool based on IFPS and XMPP. +Every file repository (called a "port") is assigned to a JabberID like this: + + sahib@jabber.nullcat.de/laptop + +REPOSITORIY COMMANDS: + + brig init [] Initialize an empty port with no files at + brig clone Clone an existing port fully or shallow to + brig open Open an encrypted port. Asks for passphrase. + brig close Closes an encrypted port. + +DAEMON COMMANDS: + + brig watch [--pause] Watch a port for changes and add them automatically. + brig daemon Start a communication daemon manually. + brig sync [--peek] [-p ] Start a synchronization manually or look what would happen. + brig push [--peek] [-p ] Push last added files to network (do not pull) + brig pull [--peek] [-p ] Pull changes from peers (no push) + +XMPP HELPER COMMANDS: + + brig discover Search network for potential peers (via Zeroconf locally). + brig friends List all reachable and offline peers ("Buddy list") + brig auth [--qa] Send auth request to (potential) peer at + brig ban Discontinue friendship with + brig prio Set priority of peer to + +WORKING DIR COMMANDS: + + brig status Give a overview of brig's current state. + brig add [-p ] Make managed by brig. + brig copies Keep at least copies of + brig find Find filenames locally and in the net. + brig rm Puts copy of in the trash bin or removes it directly. + +DATA INTEGRITY COMMANDS: + + brig lock|unlock Disallow or allow local or remote modifications of the port. + brig verify [--fix] Verify, and possibly fix, broken files. + +REVISION COMMANDS: + + brig log Show all known versions of this file. + brig checkout Checkout old version of this file, if available. + +SECURITY COMMANDS: + + brig yubi Manage yubikeys. + brig key Manage your PGP key. + +MISC COMMANDS: + + brig config Set a config key. + brig config Get a config key. + brig config -l List all available keys. + brig update Try to securely update brig. + brig help Show detailed help. + +Files that match the patterns in .brignore files are not watched. + +Config Values +============= + +Similar to ``git`` there is a global configuration, where also all +repositories on the device is stored. + +- Node Type: [archive, backup, desktop, checkout, hold] + + - archive => revision control to certain depth, autosync to archive. + - backup => revision control with depth of 1. + - client => No revision control [default] + - checkout => No autosync, only checkout certain files + - hold => Hold repository that removes file after certain time. + +- Merge Priorities + + - Give certain peers a priority. In case of merge conflicts highest ranking + will be kept. + - Repositories with same bare jabber id get higher priority by default. + - Maybe introduce a merge bin with files that need manual review? + +- ... + +Repositoy Layout +================ + +.. code-block:: bash + + repo/ + ├── .brig + │   ├── branches + │ └── HEAD + │   ├── config + │   ├── index + │   ├── otr.key + │   └── keys.db + └── .ipfs + ├── blocks + ├── config + ├── datastore + ├── logs + └── version + +Example session +=============== + +Init +---- + +.. code-block:: bash + + # Will take "photos" as repo name: + # Creates above directory structure (unlocked). + $ brig init alice@jabber.nullcat.de/photos + # Encrypt the repo metadata (ipfs data is encrypted anyways) + # This should make the xmpp client go offline. + $ brig close photos/ + # Open it again, start an xmpp client/make it online: + $ brig open photos/ + PGP Passphrase: clitteh + + +Clone +----- + +.. code-block:: bash + + $ brig discover + alice@jabber.nullcat.de/photos + alice@jabber.nullcat.de/music + alice@jabber.nullcat.de/porns # Do not over-use resource names ;-) + + # TODO: Problem: When is bob authorised to clone alice' repo? + $ brig auth alice@jabber.nullcat.de/photos + + # If succesful: + $ brig friends + alice@jabber.nullcat.de/photos + $ brig clone alice@jabber.nullcat.de/photos bob@jabber.nullcat.de/alice-photos + + +(Manual) sync +------------- + +At Bob's: + +.. code-block:: bash + + # Sync with all friendly peers: + $ brig sync + + file.png + - other.jpg diff --git a/fuse/directory.go b/fuse/directory.go new file mode 100644 index 00000000..dd21faeb --- /dev/null +++ b/fuse/directory.go @@ -0,0 +1,49 @@ +package fuse + +import ( + "os" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/disorganizer/brig/util/trie" + "golang.org/x/net/context" +) + +type Dir struct { + *trie.Node + + fs *FS +} + +func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0755 + return nil +} + +func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) { + // TODO: Actually lookup `name` (no path) and create File or Dir. + // - Lookup d.Children[name] + // - Check if it's a leaf: + // - If yes, create a File and return it. + // - If no, create a Dir and return it. + return nil, nil +} + +func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { + // TODO: Actually create Dir and return it. + // - Create dir c. + // - Insert it to to d.Root() at join(d.Path(), req.Name) + // - Return dir c. + return nil, nil +} + +func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { + // TODO: Create File/Dir and return Node + open Handle. + // - Honour req.Name, req.Mode, req.Umask + return nil, nil, nil +} + +func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { + // TODO: Remove File/Dir. + return nil +} diff --git a/fuse/doc.go b/fuse/doc.go new file mode 100644 index 00000000..8a3df6f0 --- /dev/null +++ b/fuse/doc.go @@ -0,0 +1,21 @@ +// Package fuse implements a FUSE layer for brig. +// Using it, a repository may be represented as a "normal" directory. +// There are three different structs in the FUSE API: +// +// - fuse.Node : A file or a directory (depending on it's type) +// - fuse.FS : The filesystem. Used to find out the root node. +// - fuse.Handle: An open file. +// +// This implementation offers File (a fuse.Node and fuse.Handle), +// Dir (fuse.Node) and FS (fuse.FS). +// +// Fuse will call the respective handlers if it needs information about your +// nodes. Each request handlers will usually get a `ctx` used to cancel +// operations, a request structure `req` with detailed query infos and +// a reponse structure `resp` where results are written. Usually the request +// handlers might return an error or a new node/handle/fs. +// +// Every request handle that may run for a long time should be +// made interruptable. Especially read and write operations should +// check the ctx.Done() channel passed to each request handler. +package fuse diff --git a/fuse/file.go b/fuse/file.go new file mode 100644 index 00000000..2e6185c3 --- /dev/null +++ b/fuse/file.go @@ -0,0 +1,51 @@ +package fuse + +import ( + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/disorganizer/brig/util/trie" + "golang.org/x/net/context" +) + +type File struct { + *trie.Node +} + +func (f *File) Attr(ctx context.Context, a *fuse.Attr) error { + // TODO: Store special permissions? Is this allowed? + a.Mode = 0755 + return nil +} + +func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { + // TODO: Open the file and return a fs.Handle. + // actual data will be read by Read() + return nil, nil +} + +func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error { + // TODO: Close the file and sync/flush data. + return nil +} + +func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + // TODO: Read file at req.Offset for req.Size bytes and set resp.Data. + return nil +} + +func (f *File) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + // TODO: Write req.Data at req.Offset to file. + // Expand file if necessary and update Size. + // Return the number of written bytes in resp.Size + return nil +} + +func (f *File) Flush(ctx context.Context, req *fuse.FlushRequest) error { + // TODO: Flush any pending data. Maybe a No-Op? + return nil +} + +func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { + // TODO: Update {m,c,a}time? Maybe not needed/Unsure when this is called. + return nil +} diff --git a/fuse/fs.go b/fuse/fs.go new file mode 100644 index 00000000..1e3ac45c --- /dev/null +++ b/fuse/fs.go @@ -0,0 +1,15 @@ +package fuse + +import ( + "bazil.org/fuse/fs" + "github.com/disorganizer/brig/util/trie" +) + +type FS struct { + trie trie.Trie +} + +func (f *FS) Root() (fs.Node, error) { + // TODO + return nil, nil +} diff --git a/fuse/mount.go b/fuse/mount.go new file mode 100644 index 00000000..724eaccf --- /dev/null +++ b/fuse/mount.go @@ -0,0 +1,35 @@ +package fuse + +import ( + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/disorganizer/brig/util/trie" +) + +func mount(mountpoint string) error { + c, err := fuse.Mount(mountpoint) + if err != nil { + return err + } + defer c.Close() + + filesys := &FS{} + if err := fs.Serve(c, filesys); err != nil { + return err + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + return err + } + + return nil +} + +func Mount(mountpoint string) error { + trie := trie.NewTrie() + trie.Insert("/home/sahib/test") + + return mount(mountpoint) +} diff --git a/glide.lock b/glide.lock new file mode 100644 index 00000000..ab31b815 --- /dev/null +++ b/glide.lock @@ -0,0 +1,408 @@ +hash: 54a9f225dde0313908a024d14e30bee5b1b825a42a97dae6a693b4038bef0e01 +updated: 2015-12-26T15:39:47.648301717+01:00 +imports: +- name: ../xmpp + version: "" +- name: bazil.org/fuse + version: e4fcc9a2c7567d1c42861deebeb483315d222262 + subpackages: + - fs +- name: bitbucket.org/ww/goautoneg + version: 75cd24fc2f2c +- name: code.google.com/p/go-uuid + version: 35bc42037350 + subpackages: + - /uuid +- name: code.google.com/p/go.crypto + version: 69e2a90ed92d +- name: collectd.org + version: 9fc824c70f713ea0f058a07b49a4c563ef2a3b98 + repo: https://collectd.org +- name: github.com/alecthomas/kingpin + version: aedd5430ecd39ba1396fee0c00308b494c552b1e +- name: github.com/alecthomas/template + version: b867cc6ab45cece8143cfcc6fc9c77cf3f2c23c0 +- name: github.com/alecthomas/units + version: 6b4e7dc5e3143b85ea77909c72caf89416fc2915 +- name: github.com/anacrolix/envpprof + version: 27041c5a2f396c55f0ae730be4266f1a163e46e3 + repo: https://github.com/anacrolix/envpprof +- name: github.com/anacrolix/jitter + version: 2ea5c18645100745b24e9f5cfc9b3f6f7eac51ef +- name: github.com/anacrolix/missinggo + version: 4e1ca5963308863b56c31863f60c394a7365ec29 +- name: github.com/anacrolix/utp + version: 0bb24de92c268452fb9106ca4fb9302442ca0dee +- name: github.com/araddon/gou + version: 0c2ab7394d785afff14c983fedce4be70ccc431f + repo: https://github.com/araddon/gou +- name: github.com/armon/go-metrics + version: 06b60999766278efd6d2b5d8418a58c3d5b99e87 + repo: https://github.com/armon/go-metrics +- name: github.com/beorn7/perks + version: b965b613227fddccbfffe13eae360ed3fa822f8d + subpackages: + - quantile +- name: github.com/bitly/go-simplejson + version: aabad6e819789e569bd6aabf444c935aa9ba1e44 + repo: https://github.com/bitly/go-simplejson +- name: github.com/bmizerany/pat + version: b8a35001b773c267eb260a691f4e5499a3531600 + repo: https://github.com/bmizerany/pat +- name: github.com/boltdb/bolt + version: 827f56dfb2091be2edb284ffb6113c198f27b033 +- name: github.com/bradfitz/iter + version: 454541ec3da2a73fc34fd049b19ee5777bf19345 +- name: github.com/bren2010/proquint + version: 5958552242606512f714d2e93513b380f43f9991 +- name: github.com/briantigerchow/pubsub + version: 39ce5f556423a4c7223b370fa17a3bbd75b2d197 +- name: github.com/BurntSushi/toml + version: 056c9bc7be7190eaa7715723883caffa5f8fa3e4 + repo: https://github.com/BurntSushi/toml +- name: github.com/camlistore/lock + version: ae27720f340952636b826119b58130b9c1a847a0 +- name: github.com/cathalgarvey/base58 + version: 5e83fd6f66e365827c0ecda064cd809a83939130 +- name: github.com/cathalgarvey/go-minilock + version: 2d98587907300d8eda9f7a38ae6d0680fd4d0333 + subpackages: + - taber +- name: github.com/cenkalti/backoff + version: 9831e1e25c874e0a0601b6dc43641071414eec7a +- name: github.com/cheggaaa/pb + version: d7729fd7ec1372c15b83db39834bf842bf2d69fb +- name: github.com/chzyer/readline + version: 94a70819f40e8ea2c1bda8ff92717bf26fc4c119 +- name: github.com/codahale/blake2 + version: 3fa823583afba430e8fc7cdbcc670dbf90bfacc4 +- name: github.com/codahale/chacha20 + version: ec07b4f69a3f70b1dd2a8ad77230deb1ba5d6953 +- name: github.com/codahale/chacha20poly1305 + version: f8a5c48301822c3d7dd26d78e68ea2968db0ab20 +- name: github.com/codahale/hdrhistogram + version: 5fd85ec0b4e2dd5d4158d257d943f2e586d86b62 +- name: github.com/codahale/metrics + version: 7c37910bc765e705301b159683480bdd44555c91 +- name: github.com/codegangsta/inject + version: 33e0aa1cb7c019ccc3fbe049a8262a6403d30504 +- name: github.com/codegangsta/negroni + version: c7477ad8e330bef55bf1ebe300cf8aa67c492d1b + repo: https://github.com/codegangsta/negroni +- name: github.com/coreos/go-semver + version: 568e959cd89871e61434c1143528d9162da89ef2 + subpackages: + - semver +- name: github.com/crowdmob/goamz + version: 82345796204222aa56be89cf930c316b1297f906 + subpackages: + - aws + - s3 +- name: github.com/cryptix/mdns + version: 04ff72a32679d57d009c0ac0fc5c4cda10350bad +- name: github.com/cznic/b + version: 01b13d79526a9ce9aa2c6e11d6bd4540205e4c03 + repo: https://github.com/cznic/b +- name: github.com/DataDog/datadog-go + version: b050cd8f4d7c394545fd7d966c8e2909ce89d552 + repo: https://github.com/DataDog/datadog-go +- name: github.com/dchest/blake2s + version: bddf4a4036dcf791d45e4402684a8382adcc8f78 +- name: github.com/dgryski/go-bits + version: 86c69b3c986f9d40065df5bd8f765796549eef2e + repo: https://github.com/dgryski/go-bits +- name: github.com/dgryski/go-bitstream + version: 27cd5973303fde7d914860be1ea4b927a6be0c92 + repo: https://github.com/dgryski/go-bitstream +- name: github.com/docker/spdystream + version: b2c3287865f3ad6aa22821ddb7b4692b896ac207 +- name: github.com/dustin/go-humanize + version: 00897f070f09f194c26d65afae734ba4c32404e8 +- name: github.com/dustin/randbo + version: 7f1b564ca7242d22bcc6e2128beb90d9fa38b9f0 +- name: github.com/facebookgo/atomicfile + version: 6f117f2e7f224fb03eb5e5fba370eade6e2b90c8 +- name: github.com/fd/go-nat + version: 50e7633d5f27d81490026a13e5b92d2e42d8c6bb +- name: github.com/feyeleanor/raw + version: 724aedf6e1a5d8971aafec384b6bde3d5608fba4 + repo: https://github.com/feyeleanor/raw +- name: github.com/feyeleanor/sets + version: 6c54cb57ea406ff6354256a4847e37298194478f + repo: https://github.com/feyeleanor/sets +- name: github.com/feyeleanor/slices + version: bb44bb2e4817fe71ba7082d351fd582e7d40e3ea + repo: https://github.com/feyeleanor/slices +- name: github.com/fzzy/radix + version: 27a863cdffdb0998d13e1e11992b18489aeeaa25 + subpackages: + - redis +- name: github.com/glycerine/rbuf + version: cc7a248274083ac0089c1d5474f6a864a013ce01 +- name: github.com/go-martini/martini + version: 15a47622d6a9b3e6a1eaca2681e4850f612471ea + repo: https://github.com/go-martini/martini +- name: github.com/go-ole/go-ole + version: 4246eab2a27c71c143f965432ace52990308d362 +- name: github.com/gogo/protobuf + version: 0ac967c269268f1af7d9bcc7927ccc9a589b2b36 + subpackages: + - /proto +- name: github.com/goji/param + version: da86c81e3e3c23b1948bc7a003d381250a032aa7 + repo: https://github.com/goji/param +- name: github.com/golang/protobuf + version: aece6fb931241ad332956db4f62798dfbea944b3 + subpackages: + - /proto +- name: github.com/golang/snappy + version: 723cc1e459b8eea2dea4583200fd60757d40097a +- name: github.com/gorilla/websocket + version: 3986be78bf859e01f01af631ad76da5b269d270c + repo: https://github.com/gorilla/websocket +- name: github.com/hashicorp/go-msgpack + version: fa3f63826f7c23912c15263591e65d54d080b458 + repo: https://github.com/hashicorp/go-msgpack +- name: github.com/hashicorp/golang-lru + version: 253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116 +- name: github.com/hashicorp/raft + version: d136cd15dfb7876fd7c89cad1995bc4f19ceb294 + repo: https://github.com/hashicorp/raft +- name: github.com/hashicorp/raft-boltdb + version: d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee + repo: https://github.com/hashicorp/raft-boltdb +- name: github.com/hashicorp/yamux + version: 9feabe6854fadca1abec9cd3bd2a613fe9a34000 +- name: github.com/howeyc/gopass + version: ae71a9cc54fddb61d946abe9191d05a24ac0e21b +- name: github.com/huin/goupnp + version: 223008361153d7d434c1f0ac990cd3fcae6931f5 +- name: github.com/inconshreveable/go-update + version: 68f5725818189545231c1fd8694793d45f2fc529 +- name: github.com/inconshreveable/muxado + version: f693c7e88ba316d1a0ae3e205e22a01aa3ec2848 +- name: github.com/influxdb/influxdb + version: cbbb01ce8e7ce70b8432b91e4bf32b6e0f6df883 + repo: https://github.com/influxdb/influxdb +- name: github.com/influxdb/usage-client + version: 475977e68d79883d9c8d67131c84e4241523f452 + repo: https://github.com/influxdb/usage-client +- name: github.com/ipfs/go-ipfs + version: e007d1ec73550a83b8d8241624debe70b45e6926 + subpackages: + - /repo/config + - repo/fsrepo +- name: github.com/ipfs/go-log + version: ee5cb9834b33bcf29689183e0323e328c8b8de29 +- name: github.com/jackpal/go-nat-pmp + version: a45aa3d54aef73b504e15eb71bea0e5565b5e6e1 +- name: github.com/jbenet/go-base58 + version: 6237cf65f3a6f7111cd8a42be3590df99a66bc7d +- name: github.com/jbenet/go-context + version: d14ea06fba99483203c19d92cfcd13ebe73135f4 + subpackages: + - frac + - io +- name: github.com/jbenet/go-datastore + version: c835c30f206c1e97172e428f052e225adab9abde +- name: github.com/jbenet/go-detect-race + version: 3463798d9574bd0b7eca275dccc530804ff5216f +- name: github.com/jbenet/go-fuse-version + version: b733dfc0597e1f6780510ee7afad8b6e3c7af3eb +- name: github.com/jbenet/go-is-domain + version: 93b717f2ae17838a265e30277275ee99ee7198d6 +- name: github.com/jbenet/go-msgio + version: 9399b44f6bf265b30bedaf2af8c0604bbc8d5275 +- name: github.com/jbenet/go-multiaddr + version: c13f11bbfe6439771f4df7bfb330f686826144e8 +- name: github.com/jbenet/go-multiaddr-net + version: 4a8bd8f8baf45afcf2bb385bbc17e5208d5d4c71 +- name: github.com/jbenet/go-multihash + version: e8d2374934f16a971d1e94a864514a21ac74bf7f +- name: github.com/jbenet/go-net-resolve-addr + version: 689a613d9d5d5f6f62a500c7b5d02531efa3f20c + repo: https://github.com/jbenet/go-net-resolve-addr +- name: github.com/jbenet/go-os-rename + version: 3ac97f61ef67a6b87b95c1282f6c317ed0e693c2 +- name: github.com/jbenet/go-peerstream + version: f3ab20739a88aa79306dc039c1b5a39e7afa45d6 +- name: github.com/jbenet/go-random + version: cd535bd25356746b9b1e824871dda7da932460e2 +- name: github.com/jbenet/go-random-files + version: 737479700b40b4b50e914e963ce8d9d44603e3c8 +- name: github.com/jbenet/go-reuseport + version: 48959f1fad204b6cf2c0e8d086ef69f03f2de961 +- name: github.com/jbenet/go-sockaddr + version: da304f94eea1af8ba8d1faf184623e1f9d9777dc + subpackages: + - net +- name: github.com/jbenet/go-stream-muxer + version: 4a97500beeb081571128d41d539787e137f18404 +- name: github.com/jbenet/go-temp-err-catcher + version: aac704a3f4f27190b4ccc05f303a4931fd1241ff +- name: github.com/jbenet/goprocess + version: 64a8220330a485070813201cc05b0c6777f6a516 +- name: github.com/justinas/alice + version: 82db91fe6d1db4c070fed544cb5d0b5cc08fe276 + repo: https://github.com/justinas/alice +- name: github.com/jwilder/encoding + version: 07d88d4f35eec497617bee0c7bfe651a796dae13 + repo: https://github.com/jwilder/encoding +- name: github.com/kardianos/osext + version: 8fef92e41e22a70e700a96b29f066cda30ea24ef +- name: github.com/kimor79/gollectd + version: 61d0deeb4ffcc167b2a1baa8efd72365692811bc + repo: https://github.com/kimor79/gollectd +- name: github.com/kr/binarydist + version: 9955b0ab8708602d411341e55fffd7e0700f86bd +- name: github.com/martini-contrib/render + version: ec18f8345a1181146728238980606fb1d6f40e8c + repo: https://github.com/martini-contrib/render +- name: github.com/mattbaird/elastigo + version: 041b88c1fcf6489a5721ede24378ce1253b9159d + subpackages: + - api + - core +- name: github.com/matttproud/golang_protobuf_extensions + version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a + subpackages: + - pbutil +- name: github.com/miekg/dns + version: 82ffc45b1f84ff71bd1cebed8b210118ce3d181e +- name: github.com/mitchellh/go-homedir + version: 1f6da4a72e57d4e7edd4a7295a585e0a3999a2d4 +- name: github.com/mtchavez/jenkins + version: 5a816af6ef21ef401bff5e4b7dd255d63400f497 +- name: github.com/nbutton23/zxcvbn-go + version: d490adbfd16a8f3be969c0305f0e4517b933e944 +- name: github.com/nightlyone/lockfile + version: 22754258d2b05a18f75f228588041de6fe9fdcc8 +- name: github.com/olebedev/config + version: ed583f7fafbccbd73013b0379ab29943ab0da34b +- name: github.com/olekukonko/ts + version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8 +- name: github.com/onsi/ginkgo + version: e43390e35a4a88f3f95d5ddf9055efb7a1170469 + repo: https://github.com/onsi/ginkgo +- name: github.com/onsi/gomega + version: 0fe204460da2c8fa1babcaac196e694de8f1aaa1 + repo: https://github.com/onsi/gomega +- name: github.com/oxtoacart/bpool + version: 4e1c5567d7c2dd59fa4c7c83d34c2f3528b025d6 + repo: https://github.com/oxtoacart/bpool +- name: github.com/paulbellamy/ratecounter + version: 5a11f585a31379765c190c033b6ad39956584447 + repo: https://github.com/paulbellamy/ratecounter +- name: github.com/peterh/liner + version: 3f1c20449d1836aa4cbe38731b96f95cdf89634d + repo: https://github.com/peterh/liner +- name: github.com/prometheus/client_golang + version: 00e4c4629626a912cb8b44f8839a058768fc9c3f + subpackages: + - model + - prometheus + - text +- name: github.com/prometheus/client_model + version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + subpackages: + - go +- name: github.com/prometheus/procfs + version: c91d8eefde16bd047416409eb56353ea84a186e4 +- name: github.com/rakyll/statik + version: 274df120e9065bdd08eb1120e0375e3dc1ae8465 + repo: https://github.com/rakyll/statik +- name: github.com/rcrowley/go-metrics + version: 7839c01b09d2b1d7068034e5fe6e423f6ac5be22 + repo: https://github.com/rcrowley/go-metrics +- name: github.com/rs/cors + version: 5e4ce6bc0ecd3472f6f943666d84876691be2ced +- name: github.com/satori/go.uuid + version: 7c7f2020c4c9491594b85767967f4619c2fa75f9 +- name: github.com/shirou/gopsutil + version: 6a274c3628382ab316340478300f5282b89f7778 + subpackages: + - common + - disk + - mem +- name: github.com/Sirupsen/logrus + version: 418b41d23a1bf978c06faea5313ba194650ac088 +- name: github.com/StackExchange/wmi + version: "" +- name: github.com/stathat/go + version: 01d012b9ee2ecc107cb28b6dd32d9019ed5c1d77 + repo: https://github.com/stathat/go +- name: github.com/steakknife/hamming + version: 8bad99011016569c05320e51be39c648679c5b73 +- name: github.com/syndtr/goleveldb + version: 4875955338b0a434238a31165cb87255ab6e9e4a + subpackages: + - leveldb +- name: github.com/syndtr/gosnappy + version: 156a073208e131d7d2e212cb749feae7c339e846 + subpackages: + - snappy +- name: github.com/tang0th/go-ecdh + version: f92fb9eaa3f13e26aed86cc3f89a0fd8b99a2348 +- name: github.com/texttheater/golang-levenshtein + version: dfd657628c58d3eeaa26391097853b2473c8b94e + subpackages: + - levenshtein +- name: github.com/tsuibin/goxmpp2 + version: b561103748af83325d318cd49aed5602ecfc9f00 + subpackages: + - /xmpp +- name: github.com/tucnak/climax + version: 478b42504b6386822801b43c6ca48b8097933c32 +- name: github.com/tv42/base58 + version: b6649477bfe6276322b4eeaae8f5e947b49cec92 +- name: github.com/VividCortex/godaemon + version: 3d9f6e0b234fe7d17448b345b2e14ac05814a758 +- name: github.com/whyrusleeping/chunker + version: 537e901819164627ca4bb5ce4e3faa8ce7956564 +- name: github.com/whyrusleeping/go-logging + version: 128b9855511a4ea3ccbcf712695baf2bab72e134 +- name: github.com/whyrusleeping/go-metrics + version: 1cd8009604ec2238b5a71305a0ecd974066e0e16 +- name: github.com/whyrusleeping/go-multiplex + version: 474b9aebeb391746f304ddf7c764a5da12319857 +- name: github.com/whyrusleeping/go-multistream + version: 08e8f9c9f5665ed0c63ffde4fa5ef1d5fb3d516d +- name: github.com/whyrusleeping/multiaddr-filter + version: 9e26222151125ecd3fc1fd190179b6bdd55f5608 +- name: github.com/zenazn/goji + version: bf843a174a08e846246b8945f8a9a853d84a256a + repo: https://github.com/zenazn/goji +- name: golang.org/x/crypto + version: c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3 + subpackages: + - /otr + - scrypt +- name: golang.org/x/net + version: ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad + subpackages: + - /context +- name: golang.org/x/sys + version: 833a04a10549a95dc34458c195cbad61bbb6cb4d + repo: https://golang.org/x/sys +- name: golang.org/x/text + version: cf4986612c83df6c55578ba198316d1684a9a287 +- name: gopkg.in/airbrake/gobrake.v2 + version: c9d51adc624b5cc4c1bf8de730a09af4878ffe2d +- name: gopkg.in/alecthomas/kingpin.v2 + version: 8852570bd3865e9c4d4cb7cf5001c4295b07cad5 + repo: https://gopkg.in/alecthomas/kingpin.v2 +- name: gopkg.in/check.v1 + version: 91ae5f88a67b14891cfd43895b01164f6c120420 + repo: https://gopkg.in/check.v1 +- name: gopkg.in/fatih/pool.v2 + version: cba550ebf9bce999a02e963296d4bc7a486cb715 + repo: https://gopkg.in/fatih/pool.v2 +- name: gopkg.in/fsnotify.v1 + version: 96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0 +- name: gopkg.in/gemnasium/logrus-airbrake-hook.v2 + version: 31e6fd4bd5a98d8ee7673d24bc54ec73c31810dd +- name: gopkg.in/yaml.v2 + version: f7716cbe52baa25d2e9b0d0da546fcf909fc16b4 +- name: launchpad.net/gocheck + version: "" +devImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 00000000..f5a86806 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,45 @@ +package: github.com/disorganizer/brig +import: +- package: bazil.org/fuse + subpackages: + - fs +- package: code.google.com/p/go-uuid + subpackages: + - /uuid +- package: github.com/Sirupsen/logrus +- package: github.com/VividCortex/godaemon +- package: github.com/boltdb/bolt +- package: github.com/cathalgarvey/go-minilock + subpackages: + - taber +- package: github.com/chzyer/readline +- package: github.com/codahale/chacha20poly1305 +- package: github.com/glycerine/rbuf +- package: github.com/gogo/protobuf + subpackages: + - /proto +- package: github.com/golang/protobuf + subpackages: + - /proto +- package: github.com/golang/snappy +- package: github.com/ipfs/go-ipfs + subpackages: + - /repo/config + - repo/fsrepo +- package: github.com/jbenet/go-multihash +- package: github.com/nbutton23/zxcvbn-go +- package: github.com/nightlyone/lockfile +- package: github.com/olebedev/config +- package: github.com/tang0th/go-ecdh +- package: github.com/tsuibin/goxmpp2 + subpackages: + - /xmpp +- package: github.com/tucnak/climax +- package: golang.org/x/crypto + subpackages: + - /otr + - scrypt +- package: golang.org/x/net + subpackages: + - /context +- package: gopkg.in/yaml.v2 diff --git a/im/client.go b/im/client.go new file mode 100644 index 00000000..9a5e97d3 --- /dev/null +++ b/im/client.go @@ -0,0 +1,479 @@ +package im + +import ( + "bytes" + "crypto/tls" + "fmt" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + xmpp "github.com/tsuibin/goxmpp2/xmpp" + + "golang.org/x/crypto/otr" +) + +// Debug is a flag that enables some debug prints when set to true. +var Debug bool + +func init() { + Debug = false + xmpp.Debug = false +} + +// Config can be passed to NewClient to configure how the details. +type Config struct { + // Jid is the login user. + Jid xmpp.JID + + // TLSConfig is used in building the login communication. + TLSConfig tls.Config + + // Password is the XMPP login password. + Password string + + // The place where the private otr key is stored. + KeyPath string + + // The place where fingerprints are stored. + FingerprintStorePath string + + // Timeout before Read or Write will error with ErrTimeout. + Timeout time.Duration +} + +// Client is an XMPP client with OTR support. +// Before establishing a connection, OTR will be triggered +// and the Socialist Millionaire Protocol is played through, +// using the minilock IDs of the participants. +type Client struct { + sync.Mutex + + // Embedded client + C *xmpp.Client + + // Path to a otr-key file. If empty, a new one will be generated. + KeyPath string + + // Connection Status channel: + Status chan xmpp.Status + + // Timeout before Write/Read will timeout on error. + Timeout time.Duration + + // JID to each individual cnv. + // Only active connections are stored here. + buddies map[xmpp.JID]*Conversation + + // buddies that send initial messages to us are pushed to this chan. + incomingBuddies chan *Conversation + + // This channel gets notified and closed after the first presence message. + // IsOnline() might wait on startup for presences + a short timeout. + incomingPresence chan struct{} + + // Used to protect incomingPresence, so it is only notified once. + presenceOnce sync.Once + + // Needed to compare previous fingerprints + keys FingerprintStore + + // Lookup map for online status for Client.C.Roster + online map[xmpp.JID]bool + + // Current fingerprint + fingerprint string +} + +// NewClient returns a ready client or nil on error. +func NewClient(config *Config) (*Client, error) { + keyStore, err := NewFsFingerprintStore(config.FingerprintStorePath) + if err != nil { + return nil, err + } + + c := &Client{ + KeyPath: config.KeyPath, + Timeout: config.Timeout, + buddies: make(map[xmpp.JID]*Conversation), + incomingBuddies: make(chan *Conversation), + incomingPresence: make(chan struct{}, 1), + online: make(map[xmpp.JID]bool), + keys: keyStore, + } + + if config.Timeout <= 0 { + c.Timeout = 20 * time.Second + } + + xmppClient, err := xmpp.NewClient( + &config.Jid, config.Password, config.TLSConfig, + nil, xmpp.Presence{}, c.Status, + ) + + if err != nil { + log.Fatalf("NewClient(%v): %v", config.Jid, err) + return nil, err + } + + c.C = xmppClient + + // Try to create the otr key or load existing one: + privKey, err := loadPrivateKey(c.KeyPath) + if err != nil { + return nil, err + } + + c.fingerprint = FormatFingerprint(privKey.PublicKey.Fingerprint()) + + go func() { + for status := range c.Status { + log.Debugf("connection status %d", status) + } + }() + + // Recv loop: Handle incoming messages, filter OTR. + go func() { + for stanza := range c.C.Recv { + switch msg := stanza.(type) { + case *xmpp.Message: + response, err := c.recv(msg) + if err != nil { + log.Warningf("im-recv: %v", err) + } + + if response != nil { + if cnv, ok := c.lookupConversation(msg.From); ok { + // Compensate for slow receivers: + go func() { cnv.add(joinBodies(response)) }() + } + } + case *xmpp.Presence: + if msg.Type == "unavailable" { + if _, ok := c.lookupConversation(msg.From); ok { + log.Infof("Removed otr conversation with %v", msg.From) + c.removeConversation(msg.From) + } + } + + c.addPresence(msg) + } + } + }() + + return c, nil +} + +// IsOnline cheks if the partner is online. +// On startup, this might block until the first presence messages are available. +func (c *Client) IsOnline(jid xmpp.JID) bool { + if _, ok := <-c.incomingPresence; !ok { + log.Debugf("Sorry, needed to wait for presence stanzas.") + } + + return c.isOnline(jid) +} + +// Dial opens a conversation with another peer. +func (c *Client) Dial(jid xmpp.JID) (*Conversation, error) { + // Begin the OTR dance: + if err := c.send(jid, nil); err != nil { + return nil, err + } + + if cnv, ok := c.lookupConversation(jid); ok { + return cnv, nil + } + + return nil, nil +} + +// Listen waits for new buddies that talk to us. +func (c *Client) Listen() *Conversation { + return <-c.incomingBuddies +} + +func (c *Client) Fingerprint() string { + return c.fingerprint +} + +// Close terminates all open connections. +func (c *Client) Close() { + c.Lock() + defer c.Unlock() + + for _, cnv := range c.buddies { + cnv.adieu() + } + + c.C.Close() +} + +//////////////////////// +// INTERNAL FUNCTIONS // +//////////////////////// + +func (c *Client) addPresence(ps *xmpp.Presence) { + c.Lock() + defer c.Unlock() + + log.Debugf("Partner presence `%v`: %v", ps.From, ps.Type != "unavailable") + c.online[ps.From] = (ps.Type != "unavailable") + + // Executed the first time this is called. + // Notify IsOnline() that some presence messages are in. + // Use a small timeout to be sure that some more messages are collected. + c.presenceOnce.Do(func() { + go func() { + time.Sleep(2) + c.incomingPresence <- struct{}{} + close(c.incomingPresence) + }() + }) +} + +func (c *Client) isOnline(jid xmpp.JID) bool { + c.Lock() + defer c.Unlock() + + return c.online[jid] +} + +// locked cnv lookup +func (c *Client) lookupConversation(jid xmpp.JID) (*Conversation, bool) { + c.Lock() + defer c.Unlock() + + cnv, ok := c.buddies[jid] + return cnv, ok +} + +func (c *Client) removeConversation(jid xmpp.JID) { + c.Lock() + defer c.Unlock() + + if cnv, ok := c.buddies[jid]; ok { + cnv.adieu() + } + + delete(c.buddies, jid) +} + +func (c *Client) lookupOrInitConversation(jid xmpp.JID) (*Conversation, bool, error) { + c.Lock() + defer c.Unlock() + + _, ok := c.buddies[jid] + + if !ok { + log.Infof("new otr-conversation: `%v`", string(jid)) + privKey, err := loadPrivateKey(c.KeyPath) + + if err != nil { + log.Errorf("otr-key-gen failed: %v", err) + return nil, false, err + } + + c.fingerprint = FormatFingerprint(privKey.PublicKey.Fingerprint()) + c.buddies[jid] = newConversation(jid, c, privKey) + } + + return c.buddies[jid], !ok, nil +} + +func (c *Client) recv(msg *xmpp.Message) (*xmpp.Message, error) { + plain, responses, isNoOtrMsg, err := c.recvRaw(joinBodies(msg), msg.From) + if err != nil { + return nil, err + } + + // Turn every fragment into a separate xmpp message: + for _, outMsg := range responses { + if Debug { + fmt.Printf(" SEND BACK: %v\n", truncate(string(outMsg), 30)) + } + c.C.Send <- createMessage(c.C.Jid, msg.From, string(outMsg)) + } + + response := createMessage(msg.From, c.C.Jid, string(plain)) + if isNoOtrMsg { + return response, nil + } + + return nil, nil +} + +func (c *Client) recvRaw(input []byte, from xmpp.JID) ([]byte, [][]byte, bool, error) { + cnv, isNew, err := c.lookupOrInitConversation(from) + if err != nil { + return nil, nil, false, err + } + + cnv.Lock() + defer cnv.Unlock() + + // We talk to this cnv the first time. + if isNew { + cnv.initiated = false + c.incomingBuddies <- cnv + + // First received message should be the otr query. + // Sometimes a xmpp server might deliver old messages dating from the + // last conversation. In this case we just print a (probably harmless) warning. + if !bytes.Contains(input, []byte(otr.QueryMessage)) { + return nil, nil, false, fmtOtrErr("init", input, fmt.Errorf("First message was not OTR query")) + } + } + + // Pipe input through the conversation: + otrCnv := cnv.conversation + data, encrypted, stateChange, responses, err := otrCnv.Receive(input) + if err != nil { + return nil, nil, false, fmtOtrErr("recv", input, err) + } + + if Debug { + fmt.Printf("RECV: `%v` `%v` (encr: %v should: %v auth: %v) (state-change: %v)\n", + truncate(string(data), 30), + truncate(string(input), 30), + encrypted, + otrCnv.IsEncrypted(), + cnv.authenticated, + stateChange, + ) + } + + auth := func(question string, jid xmpp.JID) error { + var err error + var fingerprint string + + if jid == c.C.Jid { + fingerprint = FormatFingerprint(otrCnv.PrivateKey.PublicKey.Fingerprint()) + log.Debugf(" Answering own fingerprint: %v", fingerprint) + } else { + if fingerprint, err = c.keys.Lookup(string(jid)); err != nil { + return err + } + + log.Debugf(" Finger: %v: %s", jid, fingerprint) + } + + authResp, err := otrCnv.Authenticate(question, []byte(fingerprint)) + if err != nil { + log.Warningf("im: Authentication error: %v", err) + return err + } + + responses = append(responses, authResp...) + return nil + } + + // Handle any otr conversation state change: + switch stateChange { + case otr.NewKeys: // We exchanged keys, channel is encrypted now. + if cnv.initiated { + if err := auth("alice: bob's fingerprint?", from); err != nil { + return nil, nil, false, err + } + } + case otr.SMPSecretNeeded: // We received a question and have to answer. + question := otrCnv.SMPQuestion() + log.Debugf("[!] Answer a question from %v '%s'", from, question) + if err := auth(question, c.C.Jid); err != nil { + return nil, nil, false, err + } + case otr.SMPComplete: // We or they completed the quest. + log.Debugf("[!] Answer is correct") + if cnv.initiated == false && cnv.authenticated == false { + if err := auth("bob: alice's fingerprint?", from); err != nil { + return nil, nil, false, err + } + } + + err := c.keys.Remember( + string(from), + FormatFingerprint(otrCnv.TheirPublicKey.Fingerprint()), + ) + + if err != nil { + log.Warningf("Unable to save fingerprints: %v", err) + } + + if cnv.initiated == true && cnv.authenticated { + for _, backlogMsg := range cnv.backlog { + base64Texts, err := cnv.conversation.Send(backlogMsg) + if err != nil { + return nil, nil, false, fmtOtrErr("send", backlogMsg, err) + } + + responses = append(responses, base64Texts...) + } + cnv.backlog = make([][]byte, 0) + } + + cnv.authenticated = true + case otr.SMPFailed: // We or they failed. + log.Debugf("[!] Answer is wrong") + fallthrough + case otr.ConversationEnded: + c.removeConversation(cnv.Jid) + } + + return data, responses, stateChange == otr.NoChange && encrypted && len(data) > 0, nil +} + +// Send sends `text` to participant `to`. +// A new otr session will be established if required. +// It is allowed that `text` to be nil. This might trigger the otr exchange, +// but does not send any real messages. +func (c *Client) send(to xmpp.JID, text []byte) error { + cnv, isNew, err := c.lookupOrInitConversation(to) + if err != nil { + return err + } + + cnv.Lock() + defer cnv.Unlock() + + if isNew { + cnv.initiated = true + + // Send the initial ?OTRv2? query: + if err := c.sendRaw(to, []byte(otr.QueryMessage), cnv); err != nil { + return fmt.Errorf("im: OTR Authentication failed: %v", err) + } + } + + if text == nil { + return nil + } + + if !cnv.authenticated { + cnv.backlog = append(cnv.backlog, text) + return nil + } + + return c.sendRaw(to, text, cnv) +} + +func (c *Client) sendRaw(to xmpp.JID, text []byte, cnv *Conversation) error { + base64Texts, err := cnv.conversation.Send(text) + + if Debug { + fmt.Printf("SEND(%v|%v): %v => %v\n", + cnv.conversation.IsEncrypted(), cnv.authenticated, + string(text), truncate(string(base64Texts[0]), 30), + ) + } + + if err != nil { + log.Warningf("im: send:", err) + return err + } + + for _, base64Text := range base64Texts { + c.C.Send <- createMessage(c.C.Jid, to, string(base64Text)) + } + + return nil +} diff --git a/im/client_test.go b/im/client_test.go new file mode 100644 index 00000000..f95fc898 --- /dev/null +++ b/im/client_test.go @@ -0,0 +1,170 @@ +package im + +import ( + "bytes" + "crypto/tls" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/util" + colorlog "github.com/disorganizer/brig/util/log" + "github.com/tsuibin/goxmpp2/xmpp" +) + +func init() { + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.DebugLevel) + + // Log pretty text + log.SetFormatter(&colorlog.ColorfulLogFormatter{}) +} + +var ( + aliceJid = xmpp.JID("alice@jabber.nullcat.de/laptop") + bobJid = xmpp.JID("bob@jabber.nullcat.de/desktop") + alicePwd = "ThiuJ9wesh" + bobPwd = "eecot3oXan" + aliceKey = filepath.Join(os.TempDir(), "otr.key.alice") + bobKey = filepath.Join(os.TempDir(), "otr.key.bob") + buddyPathA = filepath.Join(os.TempDir(), "otr.test-buddies.alice") + buddyPathB = filepath.Join(os.TempDir(), "otr.test-buddies.bob") +) + +type Run struct { + alice *Client + bob *Client +} + +func writeDummyBuddies(t *testing.T, r *Run) { + fa := r.alice.Fingerprint() + fb := r.bob.Fingerprint() + + aliceBuddies := fmt.Sprintf("%s: %s\n", r.bob.C.Jid, fb) + bobBuddies := fmt.Sprintf("%s: %s\n", r.alice.C.Jid, fa) + + if err := ioutil.WriteFile(buddyPathA, []byte(aliceBuddies), 0644); err != nil { + t.Errorf("Could not create %v: %v", buddyPathA) + } + + if err := ioutil.WriteFile(buddyPathB, []byte(bobBuddies), 0644); err != nil { + t.Errorf("Could not create %v: %v", buddyPathB) + } +} + +func clientPingPong(t *testing.T) { + r := Run{} + + defer func() { + for _, path := range []string{aliceKey, bobKey, buddyPathA, buddyPathB} { + if err := os.Remove(path); err != nil { + t.Logf("Note: could not remove %v", path) + } + } + }() + + for _, path := range []string{buddyPathA, buddyPathB} { + if err := util.Touch(path); err != nil { + t.Errorf("touch `%v`: %v", path, err) + } + } + + client, err := NewClient(&Config{ + Jid: aliceJid, + Password: alicePwd, + TLSConfig: tls.Config{ServerName: aliceJid.Domain()}, + KeyPath: aliceKey, + FingerprintStorePath: buddyPathA, + }) + + if err != nil { + t.Errorf("Could not create alice client: %v", err) + return + } + + r.alice = client + + client, err = NewClient(&Config{ + Jid: bobJid, + Password: bobPwd, + TLSConfig: tls.Config{ServerName: bobJid.Domain()}, + KeyPath: bobKey, + FingerprintStorePath: buddyPathB, + }) + + if err != nil { + t.Errorf("Could not create bob client: %v", err) + return + } + + r.bob = client + + writeDummyBuddies(t, &r) + done := make(chan bool) + + go func() { + cnv, err := r.alice.Dial(bobJid) + if err != nil { + t.Errorf("Dial: %v", err) + return + } + + for i := 0; !cnv.Ended() && i < 10; i++ { + t.Logf("Alice: PING %d", i) + if _, err := cnv.Write([]byte(fmt.Sprintf("PING %d", i))); err != nil { + t.Errorf("alice: write failed: %v", err) + return + } + + msg, err := cnv.ReadMessage() + t.Logf("Alice: RECV %d: %s/%v", i, msg, err) + if err != nil { + t.Errorf("alice: read failed: %v", err) + return + } + + if !bytes.Equal(msg, []byte(fmt.Sprintf("PONG %d", i))) { + t.Errorf("PING %d does not match PONG %d", i, i) + return + } + } + + done <- true + }() + + cnv := r.bob.Listen() + t.Logf("Talking to %v", cnv.Jid) + + for i := 0; !cnv.Ended() && i < 10; i++ { + msg, err := cnv.ReadMessage() + t.Logf("Bob: RECV %d: %s/%v", i, msg, err) + if err != nil { + t.Errorf("bob: read failed: %v", err) + return + } + + if !bytes.Equal(msg, []byte(fmt.Sprintf("PING %d", i))) { + t.Errorf("PING %d does not match PONG %d", i, i) + return + } + + t.Logf("Bob: PONG %d", i) + if _, err = cnv.Write([]byte(fmt.Sprintf("PONG %d", i))); err != nil { + t.Errorf("bob: write failed: %v", err) + return + } + + } + + <-done + cnv.Close() +} + +func TestClientPingPong(t *testing.T) { + clientPingPong(t) +} diff --git a/im/common.go b/im/common.go new file mode 100644 index 00000000..9aec056f --- /dev/null +++ b/im/common.go @@ -0,0 +1,94 @@ +package im + +import ( + "bytes" + "crypto/rand" + "encoding/xml" + "fmt" + "io/ioutil" + "os" + + log "github.com/Sirupsen/logrus" + "golang.org/x/crypto/otr" + + "github.com/tsuibin/goxmpp2/xmpp" +) + +func truncate(a string, l int) string { + if len(a) > l { + return a[:l] + "..." + a[len(a)-l:] + } + + return a +} + +func createMessage(from, to xmpp.JID, text string) *xmpp.Message { + xmsg := &xmpp.Message{} + xmsg.From = from + xmsg.To = to + xmsg.Id = xmpp.NextId() + + xmsg.Type = "chat" + xmsg.Lang = "en" + xmsg.Body = []xmpp.Text{ + { + XMLName: xml.Name{Local: "body"}, + Chardata: text, + }, + } + + return xmsg +} + +func joinBodies(msg *xmpp.Message) []byte { + if msg == nil { + return nil + } + + buf := &bytes.Buffer{} + for _, field := range msg.Body { + buf.Write([]byte(field.Chardata)) + } + + return buf.Bytes() +} + +func fmtOtrErr(prefix string, msg []byte, err error) error { + return fmt.Errorf("otr-%v: %v, on msg: %v", prefix, err, truncate(string(msg), 20)) +} + +func genPrivateKey(key *otr.PrivateKey, path string) error { + key.Generate(rand.Reader) + keyDump := key.Serialize(nil) + + if err := ioutil.WriteFile(path, keyDump, 0600); err != nil { + return err + } + + keyString := fmt.Sprintf("%X", key.Serialize(nil)) + log.Infof("Key Generated: %x", truncate(keyString, 40)) + return nil +} + +// loadPrivateKey generates a valid otr.PrivateKey. +// This function should never fail in normal cases since it +// will attempt to generate a new key and write it to path as fallback. +func loadPrivateKey(path string) (*otr.PrivateKey, error) { + key := &otr.PrivateKey{} + + // Try to load an existing one: + if file, err := os.Open(path); err == nil { + if data, err := ioutil.ReadAll(file); err == nil { + if _, ok := key.Parse(data); ok { + return key, nil + } + } + } + + // Generate a new one as fallback or initial case: + if err := genPrivateKey(key, path); err != nil { + return nil, err + } + + return key, nil +} diff --git a/im/conversation.go b/im/conversation.go new file mode 100644 index 00000000..03de4e42 --- /dev/null +++ b/im/conversation.go @@ -0,0 +1,174 @@ +package im + +import ( + "bytes" + "fmt" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/tsuibin/goxmpp2/xmpp" + "golang.org/x/crypto/otr" +) + +var ( + // ErrTimeout happens when the partner could not be reached after Config.Timeout. + ErrTimeout = fmt.Errorf("Timeout reached during OTR io") + // ErrDeadConversation happens when the underlying OTR conversation was ended. + ErrDeadConversation = fmt.Errorf("Conversation ended already") +) + +// Conversation represents a point to point connection with a buddy. +// It can be used like a io.ReadWriter over network, encrypted via OTR. +type Conversation struct { + sync.Mutex + + // Jid of your Conversation. + Jid xmpp.JID + + // Client is a pointer to the client this cnv belongs to. + Client *Client + + // recv provides all messages sent from this cnv. + recv chan []byte + + // send can be used to send arbitary messages to this cnv. + send chan []byte + + // the underlying otr conversation + conversation *otr.Conversation + + // A backlog of messages send before otr auth. + backlog [][]byte + + // used in Read() to compensate against small read-buffers. + readBuf *bytes.Buffer + + // This is set to a value > 0 if the conversation ended. + isDead bool + + // Did we initiated the conversation to this cnv? + initiated bool + + // This cnv completed the auth-game + authenticated bool +} + +func newConversation(jid xmpp.JID, client *Client, privKey *otr.PrivateKey) *Conversation { + sendChan := make(chan []byte) + recvChan := make(chan []byte) + + go func() { + for data := range sendChan { + if err := client.send(jid, data); err != nil { + log.Warningf("im-send: %v", err) + } + } + }() + + return &Conversation{ + Jid: jid, + Client: client, + recv: recvChan, + send: sendChan, + backlog: make([][]byte, 0), + readBuf: &bytes.Buffer{}, + conversation: &otr.Conversation{ + PrivateKey: privKey, + }, + } +} + +func (b *Conversation) Write(buf []byte) (int, error) { + if b.Ended() { + return 0, ErrDeadConversation + } + + ticker := time.NewTicker(b.Client.Timeout) + + select { + case <-ticker.C: + return 0, ErrTimeout + case b.send <- buf: + return len(buf), nil + } +} + +func (b *Conversation) Read(buf []byte) (int, error) { + msg, err := b.ReadMessage() + if err != nil { + return 0, err + } + + b.Lock() + defer b.Unlock() + + n, _ := b.readBuf.Write(msg) + return b.readBuf.Read(buf[:n]) +} + +// ReadMessage returns exactly one message. +func (b *Conversation) ReadMessage() ([]byte, error) { + if b.Ended() { + return nil, ErrDeadConversation + } + + ticker := time.NewTicker(b.Client.Timeout) + + select { + case <-ticker.C: + return nil, ErrTimeout + case msg, ok := <-b.recv: + if ok { + return msg, nil + } + + return nil, ErrDeadConversation + } +} + +func (b *Conversation) adieu() { + // Make sure Write()/Read() does not block anymore. + b.Lock() + defer b.Unlock() + + if b.isDead { + return + } + + b.isDead = true + b.authenticated = false + + if b.conversation != nil { + // End() returns some messages that can be used to revert the connection + // back to a normal non-OTR connection. We just don't send those. + b.conversation.End() + } + + // Wakeup any Write/Read calls. + close(b.send) + close(b.recv) +} + +// Add a message to the conversation +func (b *Conversation) add(msg []byte) { + if !b.Ended() { + b.recv <- msg + } +} + +// Ended returns true when the underlying conversation was ended. +func (b *Conversation) Ended() bool { + b.Lock() + defer b.Unlock() + + return b.isDead +} + +// Close ends a conversation. You normally do not need to call this directly. +// There is no guarantee that previously send messages will be actually delivered. +func (b *Conversation) Close() error { + b.adieu() + b.Client.removeConversation(b.Jid) + return nil +} diff --git a/im/fingerprint.go b/im/fingerprint.go new file mode 100644 index 00000000..5fc40d1e --- /dev/null +++ b/im/fingerprint.go @@ -0,0 +1,105 @@ +package im + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/util" + "gopkg.in/yaml.v2" +) + +// FingerprintStore represents an arbitary store where fingerprints are stored. +type FingerprintStore interface { + // Lookup returns the last known fingerprint related to this jid. + Lookup(jid string) (string, error) + + // Remember stores the last known fingerprint of this jid. + Remember(jid string, fingerprint string) error +} + +// FormatFingerprint converts a raw byte string representation to a hex fingerprint. +func FormatFingerprint(raw []byte) string { + // NOTE: This is a little stupid, but fits in one line: + return strings.Replace(fmt.Sprintf("% X", raw), " ", ":", -1) +} + +// FsFingerprintStore represents a FingerprintStore that saves it's contents to +// a YAML file on the filesystem at an absolute path. +type FsFingerprintStore struct { + Path string + keys map[string]string +} + +func (k *FsFingerprintStore) load() (map[string]string, error) { + fd, err := os.Open(k.Path) + if err != nil { + return nil, err + } + + defer util.Closer(fd) + + data, err := ioutil.ReadAll(fd) + if err != nil { + return nil, err + } + + keys := make(map[string]string) + return keys, yaml.Unmarshal(data, &keys) +} + +// NewFsFingerprintStore returns a new, possibly empty, FingerprintStore +func NewFsFingerprintStore(path string) (*FsFingerprintStore, error) { + k := &FsFingerprintStore{Path: path} + keys, err := k.load() + + if err != nil { + return nil, err + } + + k.keys = keys + return k, nil +} + +// Lookup returns the last know fingerprint of this jid. No I/O is done. +func (k *FsFingerprintStore) Lookup(jid string) (string, error) { + keys, err := k.load() + if err != nil { + return "", err + } + + k.keys = keys + + fingerprint, ok := keys[jid] + if !ok { + log.Warningf("No fingerprint known for `%v`.", jid) + } + + return fingerprint, nil +} + +// Remember stores the last knwon fingerprint to this jid. It rewrites the +// fingerprint database on the filesystem +func (k *FsFingerprintStore) Remember(jid string, fingerprint string) error { + k.keys[jid] = fingerprint + + fd, err := os.OpenFile(k.Path, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + + defer util.Closer(fd) + + data, err := yaml.Marshal(&k.keys) + if err != nil { + return err + } + + if _, err := fd.Write(data); err != nil { + return err + } + + return nil +} diff --git a/im/im-util/main.go b/im/im-util/main.go new file mode 100644 index 00000000..ab20e460 --- /dev/null +++ b/im/im-util/main.go @@ -0,0 +1,93 @@ +package main + +import ( + "crypto/tls" + "flag" + "fmt" + "os" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/im" + colorlog "github.com/disorganizer/brig/util/log" + "github.com/tsuibin/goxmpp2/xmpp" +) + +func init() { + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.DebugLevel) + + // Log pretty text + log.SetFormatter(&colorlog.ColorfulLogFormatter{}) +} + +func main() { + sendFlag := flag.Bool("send", false, "Send otr query") + + flag.Parse() + + var jid xmpp.JID + var partnerJid xmpp.JID + var password string + + aliceJid := xmpp.JID("alice@jabber.nullcat.de/laptop") + bobJid := xmpp.JID("bob@jabber.nullcat.de/desktop") + + if *sendFlag { + jid, partnerJid, password = aliceJid, bobJid, "ThiuJ9wesh" + } else { + jid, partnerJid, password = bobJid, aliceJid, "eecot3oXan" + } + + client, err := im.NewClient(&im.Config{ + Jid: jid, + Password: password, + TLSConfig: tls.Config{ServerName: jid.Domain()}, + KeyPath: "/tmp/otr.key." + password, + FingerprintStorePath: "/tmp/otr.buddies." + password, + }) + + if err != nil { + log.Fatalf("Could not create client: %v", err) + return + } + + defer client.Close() + + log.Infof("Partner is Online: %v", client.IsOnline(partnerJid)) + + if *sendFlag { + cnv, err := client.Dial(partnerJid) + if err != nil { + log.Errorf("Dial: %v", err) + return + } + + for i := 0; !cnv.Ended() && i < 10; i++ { + log.Infof("Alice: PING %d", i) + cnv.Write([]byte(fmt.Sprintf("PING %d", i))) + + msg, err := cnv.ReadMessage() + log.Infof("Alice: RECV %d: %s/%v", i, msg, err) + time.Sleep(2 * time.Second) + } + + cnv.Close() + } else { + for { + cnv := client.Listen() + + log.Println("Dial to", cnv.Jid) + go func() { + for i := 0; !cnv.Ended() && i < 10; i++ { + msg, err := cnv.ReadMessage() + log.Infof("Bob: RECV %d: %s/%v", i, msg, err) + log.Infof("Bob: PONG %d", i) + cnv.Write([]byte(fmt.Sprintf("PONG %d", i))) + } + }() + } + } +} diff --git a/repo/config/config.go b/repo/config/config.go new file mode 100644 index 00000000..24b1e69f --- /dev/null +++ b/repo/config/config.go @@ -0,0 +1,55 @@ +package config + +import ( + "os" + + "github.com/olebedev/config" +) + +// LoadConfig loads a yaml configuration file. +func LoadConfig(path string) (*config.Config, error) { + cfg, err := config.ParseYamlFile(path) + + if err != nil { + return nil, err + } + return cfg, nil +} + +// SaveConfig saves a given config as yaml encoded configuration file. +func SaveConfig(path string, cfg *config.Config) (int, error) { + yamlString, err := config.RenderYaml(cfg.Root) + if err != nil { + return 0, err + } + file, err := os.Create(path) + if err != nil { + return 0, err + } + defer file.Close() + + written, err := file.WriteString(yamlString) + if err != nil { + return 0, err + } + + return written, nil +} + +// CreateDefaultConfig creates a configfile with default values. +func CreateDefaultConfig() *config.Config { + cfg := map[string]interface{}{ + "repository": map[string]interface{}{ + "jid": "", + "mid": "", + "uuid": "", + }, + "ipfs": map[string]interface{}{ + "port": 5001, + "hostname": "localhost", + "path": "", + }, + } + + return &config.Config{Root: cfg} +} diff --git a/repo/config/config_test.go b/repo/config/config_test.go new file mode 100644 index 00000000..91c600c9 --- /dev/null +++ b/repo/config/config_test.go @@ -0,0 +1,63 @@ +package config + +import ( + "fmt" + "testing" +) + +const configPath = "/tmp/brig_test.cfg" + +func TestConfig(t *testing.T) { + fmt.Println("Creating default config.") + cfg := CreateDefaultConfig() + fmt.Println("Saving default config to ", configPath) + SaveConfig(configPath, cfg) + fmt.Println("Loading default config from ", configPath) + c, err := LoadConfig(configPath) + if err != nil { + t.Errorf("Unable to load config: %v", err) + return + } + + inputValues := map[string]string{ + "repository.jid": "test@jabber.fr/waffeln", + "repository.uuid": "L@#K:JLKR:O#KJRLKQR", + "ipfs.path": "/tmp/katzenauge", + } + + fmt.Println("\nSetting some test parameters...") + for key, value := range inputValues { + fmt.Printf("Setting %s to %s\n", key, value) + c.Set(key, value) + } + + fmt.Println("\nSaving config to ", configPath) + SaveConfig(configPath, c) + + fmt.Println("Loading default config from ", configPath) + c, err = LoadConfig(configPath) + if err != nil { + t.Errorf("Unable to load config: %v", err) + return + } + + fmt.Println("\nPrinting config after manipulating parameters...") + expectedValues := map[string]interface{}{ + "repository.jid": "test@jabber.fr/waffeln", + "repository.uuid": "L@#K:JLKR:O#KJRLKQR", + "repository.mid": "", + "ipfs.path": "/tmp/katzenauge", + } + for key, expectedValue := range expectedValues { + configValue, _ := c.String(key) + fmt.Printf("Reading %s from config: %s\n", key, configValue) + if configValue != expectedValue { + t.Logf("%s read, but %s was expected.", configValue, expectedValue) + } + } + configValue, _ := c.Int("ipfs.port") + fmt.Printf("Reading %s from config: %d\n", "ipfs.port", configValue) + if configValue != 5001 { + t.Logf("%d read, but %d was expected.\n", configValue, 5001) + } +} diff --git a/repo/doc.go b/repo/doc.go new file mode 100644 index 00000000..1177cbdb --- /dev/null +++ b/repo/doc.go @@ -0,0 +1,19 @@ +// Package repo offers function for creating and loading a brig repository. +// +// The repository looks like this: +// +// /path/to/repo +// └── .brig +// ├── config +// ├── index.bolt[.minilock] +// ├── master.key[.minilock] +// └── ipfs +//    └── ... +// +// Directly after `init`, the index and key files will be still encrypted +// with minilock. `open` will use the user's password to decrypt those. +// `close` reverses this by encrypting them again. +// +// The `Repository` structure aids in accessing all those files and offers +// individual apis for them (like `Store` for reading/writing the index). +package repo diff --git a/repo/global/global.go b/repo/global/global.go new file mode 100644 index 00000000..59e31184 --- /dev/null +++ b/repo/global/global.go @@ -0,0 +1,162 @@ +// Package global implements the logic behind the global config files in +// ~/.brigconfig +package global + +import ( + "os" + "os/user" + "path" + + "github.com/disorganizer/brig/repo/config" + "github.com/disorganizer/brig/util/filelock" + yamlConfig "github.com/olebedev/config" +) + +const ( + DirName = ".brigconfig" +) + +// Repository is the handle for the global repository. +type Repository struct { + Folder string + Config *yamlConfig.Config +} + +// RepoListEntry is a single entry in ~/.brigconfig/repos +type RepoListEntry struct { + UniqueID string + RepoPath string + DaemonPort int + IpfsPort int +} + +func (g *Repository) acquireLock() error { + lockPath := path.Join(g.Folder, "lock") + if err := filelock.Acquire(lockPath); err != nil { + return err + } + + return nil +} + +func (g *Repository) releaseLock() error { + return filelock.Release(path.Join(g.Folder, "lock")) +} + +func guessGlobalFolder() string { + curr, err := user.Current() + if err != nil { + return os.TempDir() + } + + return path.Join(curr.HomeDir, DirName) +} + +// Init creates a new global Repository and returns it. +func Init() (*Repository, error) { + folder := guessGlobalFolder() + repo := &Repository{ + Folder: folder, + } + + if err := os.Mkdir(folder, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := repo.acquireLock(); err != nil { + return nil, err + } + defer repo.releaseLock() + + cfg := &yamlConfig.Config{ + Root: map[string]interface{}{ + "repositories": map[string]RepoListEntry{}, + }, + } + + if _, err := config.SaveConfig(path.Join(folder, "repos"), cfg); err != nil { + return nil, err + } + + repo.Config = cfg + return repo, nil +} + +// Load loads an existing global repository. +func Load() (*Repository, error) { + folder := guessGlobalFolder() + repo := &Repository{ + Folder: folder, + } + + if err := repo.acquireLock(); err != nil { + return nil, err + } + defer repo.releaseLock() + + cfg, err := config.LoadConfig(path.Join(folder, "repos")) + if err != nil { + return nil, err + } + + repo.Config = cfg + return repo, nil +} + +// New loads a global repository, if it's not there, it's created. +func New() (*Repository, error) { + folder := guessGlobalFolder() + if _, err := os.Stat(folder); os.IsExist(err) { + return Load() + } + + return Init() +} + +func (g *Repository) modifyConfig(worker func(cfg *yamlConfig.Config) error) error { + if err := g.acquireLock(); err != nil { + return err + } + defer g.releaseLock() + + cfg, err := config.LoadConfig(path.Join(g.Folder, "repos")) + if err != nil { + return err + } + + if err := worker(cfg); err != nil { + return err + } + + if _, err := config.SaveConfig(path.Join(g.Folder, "repos"), cfg); err != nil { + return err + } + + return nil +} + +// AddRepo adds a new repo to ~/.brigconfig/repos +func (g *Repository) AddRepo(entry RepoListEntry) error { + return g.modifyConfig(func(cfg *yamlConfig.Config) error { + repos, err := cfg.Map("repositories") + if err != nil { + return err + } + + repos[entry.UniqueID] = entry + return nil + }) +} + +// RemoveRepo deletes an existing repo to ~/.brigconfig/repos +func (g *Repository) RemoveRepo(entry RepoListEntry) error { + return g.modifyConfig(func(cfg *yamlConfig.Config) error { + repos, err := cfg.Map("repositories") + if err != nil { + return err + } + + delete(repos, entry.UniqueID) + return nil + }) +} diff --git a/repo/id-util/id-util b/repo/id-util/id-util new file mode 100755 index 00000000..478c0221 Binary files /dev/null and b/repo/id-util/id-util differ diff --git a/repo/id.go b/repo/id.go new file mode 100644 index 00000000..0520ca07 --- /dev/null +++ b/repo/id.go @@ -0,0 +1,178 @@ +package repo + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/cathalgarvey/go-minilock" + "github.com/cathalgarvey/go-minilock/taber" +) + +const ( + // EncFileSuffix is appended to all encrypted in-repo file paths + EncFileSuffix = ".minilock" +) + +// TODO: Build a LockFiles and UnlockFiles variant. +// Key gen seems to take about ~1s - slow for many files. + +// LockFile encrypts `path` with minilock, using pass and jid as email. +// The resulting file is written to `path` + EncFileSuffix, +// the source file is removed. +func LockFile(jid, pass, path string) error { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return err + } + + return lockFile(keys, jid, pass, path) +} + +// LockFiles works like LockFile but generates the key only once. +func LockFiles(jid, pass string, paths []string) error { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return err + } + + for _, path := range paths { + if err := lockFile(keys, jid, pass, path); err != nil { + return err + } + } + + return nil +} + +func lockFile(keys *taber.Keys, jid, pass, path string) error { + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + encData := make([]byte, 0) + dir, base := filepath.Split(path) + + // This seemed to crash minilock otherwise: + if len(data) != 0 { + if encData, err = minilock.EncryptFileContents(base, data, keys, keys); err != nil { + return err + } + } + + err = ioutil.WriteFile(filepath.Join(dir, base+EncFileSuffix), encData, 0666) + if err != nil { + return err + } + + if err := os.Remove(path); err != nil { + return err + } + + return nil +} + +// unlockFileReal is the actual implementation of TryUnlock/UnlockFile +func unlockFileReal(keys *taber.Keys, jid, pass, path string, write bool) error { + encPath := path + EncFileSuffix + data, err := ioutil.ReadFile(encPath) + if err != nil { + return err + } + + _, decName, decData, err := minilock.DecryptFileContents(data, keys) + if err != nil { + return err + } + + if !write { + return nil + } + + decPath := filepath.Join(filepath.Dir(encPath), decName) + err = ioutil.WriteFile(decPath, decData, 0666) + if err != nil { + return err + } + + if err := os.Remove(encPath); err != nil { + return err + } + + return nil +} + +// UnlockFile reverses the effect of LockFile. +// +// NOTE: `path` is the path without EncFileSuffix, +// i.e. the same path as given to LockFile! +// +// If the operation was successful, +func UnlockFile(jid, pass, path string) error { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return err + } + + return unlockFileReal(keys, jid, pass, path, true) +} + +// UnlockFiles works like UnlockFile for many paths, but generates keys just once. +func UnlockFiles(jid, pass string, paths []string) error { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return err + } + + for _, path := range paths { + if err := unlockFileReal(keys, jid, pass, path, true); err != nil { + return err + } + } + + return nil +} + +// TryUnlock tries to unlock a file, if successful, +// `path` will not be removed and no encrypted output is written. +func TryUnlock(jid, pass, path string) error { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return err + } + + return unlockFileReal(keys, jid, pass, path, false) +} + +// EncryptMinilockMsg encrypts a given plaintext for multiple receivers. +func EncryptMinilockMsg(jid, pass, plaintext string, mid ...string) (string, error) { + ciphertext, err := minilock.EncryptFileContentsWithStrings( + "Minilock Filename.", + []byte(plaintext), + jid, pass, false, mid..., + ) + if err != nil { + return "", nil + } + return string(ciphertext), nil +} + +// DecryptMinilockMsg decrypts a given ciphertext. +func DecryptMinilockMsg(jid, pass, ciphertext string) (string, error) { + userKey, err := minilock.GenerateKey(jid, pass) + if err != nil { + return "", nil + } + _, _, plaintext, _ := minilock.DecryptFileContents([]byte(ciphertext), userKey) + return string(plaintext), nil +} + +// GenerateMinilockID generates a base58-encoded pubkey + 1-byte blake2s checksum as a string +func GenerateMinilockID(jid, pass string) (string, error) { + keys, err := minilock.GenerateKey(jid, pass) + if err != nil { + return "", err + } + return keys.EncodeID() +} diff --git a/repo/id_test.go b/repo/id_test.go new file mode 100644 index 00000000..206b7fa5 --- /dev/null +++ b/repo/id_test.go @@ -0,0 +1,84 @@ +package repo + +import ( + "fmt" + "testing" +) + +type User struct { + Name string + Jid string + Password string + Mid string +} + +func encrypt(jid, password, secretMsg string, mid ...string) (string, error) { + ciphertext, err := EncryptMinilockMsg(jid, password, secretMsg, mid...) + return ciphertext, err +} + +func maliciousUserHasDecrypted(decryptedText, originalText string, user *User, maliciousUsers ...*User) bool { + for _, maliciousUser := range maliciousUsers { + // message successfully decrypted by a malicious user? + if decryptedText == originalText && user == maliciousUser { + return true + } + } + return false +} + +func (u *User) String() string { + return fmt.Sprintf("%s", u.Name) +} + +func TestID(t *testing.T) { + + // sender + alice := &User{ + "Alice", + "alice@jabber.de/laptop", + "3lrj;2lq3rj;lkqjwflkjwf", + "Jw7xyd3jrG4d4TkQmUzDKLwbH9RPcEV47SAFRJtCEFY6c", + } + + // receivers + bob := &User{ + "Bob,", + "bob@jabber.de/work", + "lk23j4lk2jlk3j4l2k3j12333", + "2JHpZWEypyBNxN1pe6mptBa4uFsNwj54r3DXegdLGuKanh", + } + bruce := &User{ + "Bruce", + "bruce@jabber.de/rsa", + "l3kjr;l33;)JLJK90092", + "j9VD7e2vgrxbxJX4i3ut4AGg47S8yoyJN5793ti1NNdWc", + } + + // indruder + micrathene := &User{ + "Micrathene", + "micrathene@jabber.de/forest", + "lijk3lk*(3l#KJ8#:Lk#", + "cewNAcGCRoqbB95JfgAyHFpXv4ka7hroUUkqQEx6vpdVE", + } + + originalText := "This is a very secret message." + receivers := []*User{bob, bruce} + receiverMids := []string{} + for _, receiver := range receivers { + receiverMids = append(receiverMids, receiver.Mid) + } + fmt.Printf("%s encrypts for %s\n", alice, receivers) + ciphertext, err := encrypt(alice.Jid, alice.Password, originalText, receiverMids...) + if err != nil { + t.Log("Error enctypting plaintext.", err) + } + for _, user := range []*User{alice, bob, bruce, micrathene} { + decryptedtext, _ := DecryptMinilockMsg(user.Jid, user.Password, ciphertext) + if maliciousUserHasDecrypted(decryptedtext, originalText, user, micrathene, alice /* malicious users*/) { + t.Errorf("%s souldn't be able to decrypt the ciphertext.\n", user.Jid) + } + fmt.Printf("User %s tries to encrypt: %t\n", user, decryptedtext == originalText) + } +} diff --git a/repo/init.go b/repo/init.go new file mode 100644 index 00000000..ca1353bc --- /dev/null +++ b/repo/init.go @@ -0,0 +1,186 @@ +package repo + +import ( + "crypto/rand" + "io" + "os" + "path/filepath" + + "code.google.com/p/go-uuid/uuid" + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/repo/config" + "github.com/disorganizer/brig/repo/global" + "github.com/disorganizer/brig/store" + "github.com/disorganizer/brig/util" + logutil "github.com/disorganizer/brig/util/log" + ipfsconfig "github.com/ipfs/go-ipfs/repo/config" + "github.com/ipfs/go-ipfs/repo/fsrepo" +) + +// NewRepository creates a new repository at filesystem level +// and returns a Repository interface +func NewRepository(jid, pwd, folder string) (*Repository, error) { + absFolderPath, err := filepath.Abs(folder) + if err != nil { + return nil, err + } + + if _, err = os.Stat(absFolderPath); os.IsExist(err) { + return nil, err + } + + if err := createRepositoryTree(absFolderPath); err != nil { + return nil, err + } + + cfg := config.CreateDefaultConfig() + minilockID, err := GenerateMinilockID(jid, pwd) + if err != nil { + return nil, err + } + + configDefaults := map[string]interface{}{ + "repository.jid": jid, + "repository.uuid": uuid.NewRandom().String(), + "repository.mid": minilockID, + "ipfs.path": filepath.Join(absFolderPath, ".brig", "ipfs"), + } + + for key, value := range configDefaults { + if err = cfg.Set(key, value); err != nil { + return nil, err + } + } + + configPath := filepath.Join(absFolderPath, ".brig", "config") + if _, err := config.SaveConfig(configPath, cfg); err != nil { + return nil, err + } + + return LoadRepository(pwd, absFolderPath) +} + +// CloneRepository clones a brig repository in a git like way +func CloneRepository() *Repository { + return nil +} + +// LoadRepository load a brig repository from a given folder. +func LoadRepository(pwd, folder string) (*Repository, error) { + absFolderPath, err := filepath.Abs(folder) + if err != nil { + return nil, err + } + + brigPath := filepath.Join(absFolderPath, ".brig") + cfg, err := config.LoadConfig(filepath.Join(brigPath, "config")) + if err != nil { + return nil, err + } + + configValues := map[string]string{ + "repository.jid": "", + "repository.mid": "", + "repository.uuid": "", + } + + for key := range configValues { + configValues[key], err = cfg.String(key) + if err != nil { + return nil, err + } + } + + // Init the global repo (similar to .gitconfig) + globalRepo, err := global.New() + if err != nil { + return nil, err + } + + globalRepo.AddRepo(global.RepoListEntry{ + UniqueID: configValues["repository.uuid"], + RepoPath: folder, + DaemonPort: 6666, + IpfsPort: 4001, + }) + + store, err := store.Open(brigPath) + if err != nil { + return nil, err + } + + repo := Repository{ + Jid: configValues["repository.jid"], + Mid: configValues["repository.mid"], + Folder: absFolderPath, + InternalFolder: brigPath, + UniqueID: configValues["repository.uuid"], + Config: cfg, + globalRepo: globalRepo, + Store: store, + Password: pwd, + } + + return &repo, nil +} + +func createRepositoryTree(absFolderPath string) error { + if err := os.Mkdir(absFolderPath, 0755); err != nil { + return err + } + + brigPath := filepath.Join(absFolderPath, ".brig") + if err := os.Mkdir(brigPath, 0755); err != nil { + return err + } + + ipfsPath := filepath.Join(brigPath, "ipfs") + if err := os.Mkdir(ipfsPath, 0755); err != nil { + return err + } + + empties := []string{"index.bolt", "otr.key", "otr.buddies"} + for _, empty := range empties { + fullPath := filepath.Join(brigPath, empty) + if err := util.Touch(fullPath); err != nil { + return err + } + } + + // Make the key larger than needed: + if err := createMasterKey(brigPath, 1024); err != nil { + return err + } + + return CreateIpfsRepo(ipfsPath) +} + +func createMasterKey(brigPath string, keySize int) error { + keyPath := filepath.Join(brigPath, "master.key") + fd, err := os.OpenFile(keyPath, os.O_CREATE|os.O_WRONLY, 0755) + if err != nil { + return err + } + + defer util.Closer(fd) + + if _, err := io.CopyN(fd, rand.Reader, int64(keySize/8)); err != nil { + return err + } + + return nil +} + +func CreateIpfsRepo(ipfsRootPath string) error { + logger := &logutil.Writer{Level: log.InfoLevel} + cfg, err := ipfsconfig.Init(logger, 2048) + if err != nil { + return err + } + + if err := fsrepo.Init(ipfsRootPath, cfg); err != nil { + return err + } + + return nil +} diff --git a/repo/locate.go b/repo/locate.go new file mode 100644 index 00000000..12df40ec --- /dev/null +++ b/repo/locate.go @@ -0,0 +1,66 @@ +package repo + +import ( + log "github.com/Sirupsen/logrus" + "os" + "path/filepath" +) + +// IsRepo checks if `folder` contains a brig repository. +// Currently, this is implemented by checkin for the hidden .brig folder, +// but this behaviour might change in the future. +func IsRepo(folder string) bool { + file, err := os.Stat(filepath.Join(folder, ".brig")) + if err != nil { + return false + } + + return file.IsDir() +} + +// FindRepo checks if `folder` or any of it's parents contains a brig +// repository. It uses IsRepo() to check if the folder is a repository. +// The path works on both relative and absolute paths. +func FindRepo(folder string) string { + curr, err := filepath.Abs(folder) + if err != nil { + return "" + } + + for curr != "" { + if IsRepo(curr) { + return curr + } + + // Try in the parent directory: + dirname := filepath.Dir(curr) + if dirname == curr { + break + } + + curr = dirname + } + return "" +} + +// GuessFolder tries to find the desired brig repo by heuristics. +// Current heuristics: check env var BRIG_PATH, then the working dir. +// On failure, it will return an empty string. +func GuessFolder() string { + wd := os.Getenv("BRIG_PATH") + if wd == "" { + var err error + wd, err = os.Getwd() + if err != nil { + log.Errorf("Unable to fetch working dir: %q", err) + return "" + } + } + + actualPath := FindRepo(wd) + if actualPath == "" { + log.Errorf("Unable to find repo in path or any parents: %q", wd) + } + + return actualPath +} diff --git a/repo/locate_test.go b/repo/locate_test.go new file mode 100644 index 00000000..71fb534b --- /dev/null +++ b/repo/locate_test.go @@ -0,0 +1,51 @@ +package repo + +import ( + "os" + "path/filepath" + "testing" +) + +var ( + TestPath = filepath.Join(os.TempDir(), "brig-test") + TestPathEmpty = filepath.Join(TestPath, "a", "b", "c", "d") + TestPathRepo = filepath.Join(TestPath, "a", ".brig") +) + +func createTestDir() { + for _, dir := range []string{TestPathEmpty, TestPathRepo} { + if err := os.MkdirAll(dir, 0777); err != nil { + panic(err) + } + } +} + +func purgeTestDir() { + err := os.RemoveAll(TestPath) + if err != nil { + panic(err) + } +} + +func TestFindRepo(t *testing.T) { + createTestDir() + defer purgeTestDir() + + tests := []struct { + input string + want string + }{ + {TestPath, ""}, + {TestPathEmpty, filepath.Dir(TestPathRepo)}, + {TestPathRepo, filepath.Dir(TestPathRepo)}, + {filepath.Dir(TestPathRepo), filepath.Dir(TestPathRepo)}, + } + + for _, test := range tests { + got := FindRepo(test.input) + if got != test.want { + t.Errorf("\nFindRepo(%q) == %q\nexpected: %q", + test.input, got, test.want) + } + } +} diff --git a/repo/open.go b/repo/open.go new file mode 100644 index 00000000..c9409dda --- /dev/null +++ b/repo/open.go @@ -0,0 +1,107 @@ +package repo + +import ( + "fmt" + "os" + "path/filepath" + + log "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/repo/config" +) + +// Filenames that will be encrypted on close: +var filenames = []string{ + "index.bolt", + "master.key", + // "otr.buddies", + // "otr.key", +} + +func lookupJid(configPath string) (string, error) { + cfg, err := config.LoadConfig(configPath) + if err != nil { + return "", fmt.Errorf("Could not load config: %v", err) + } + + jid, err := cfg.String("repository.jid") + if err != nil { + return "", fmt.Errorf("No jid in config: %v", err) + } + + return jid, nil +} + +// Open unencrypts all sensible data in the repository. +func Open(pwd, folder string) (*Repository, error) { + absFolderPath, err := filepath.Abs(folder) + brigPath := filepath.Join(absFolderPath, ".brig") + + // Figure out the JID from the config: + jid, err := lookupJid(filepath.Join(brigPath, "config")) + if err != nil { + return nil, err + } + + // Unlock all files: + absNames := make([]string, 0) + for _, name := range filenames { + absName := filepath.Join(brigPath, name) + if _, err := os.Stat(absName); err == nil { + // File exists, this might happen on a crash or killed daemon. + log.Warningf("File is already unlocked: %s", absName) + continue + } + + absNames = append(absNames, absName) + } + + if err := UnlockFiles(jid, pwd, absNames); err != nil { + return nil, err + } + + return LoadRepository(pwd, absFolderPath) +} + +// Close encrypts sensible files in the repository. +// The password is taken from Repository.Password. +func (r *Repository) Close() error { + absNames := make([]string, 0) + + for _, name := range filenames { + absName := filepath.Join(r.InternalFolder, name) + if _, err := os.Stat(absName); os.IsNotExist(err) { + // File does not exist. Might be already locked. + log.Warningf("File is already locked: %s", absName) + continue + } + + log.Infof("Locking file `%v`...", absName) + absNames = append(absNames, absName) + } + + fmt.Println(absNames) + if err := LockFiles(r.Jid, r.Password, absNames); err != nil { + return err + } + + return nil +} + +// CheckPassword tries to decrypt a file in the repository. +// If that does not work, an error is returned. +func CheckPassword(folder, pwd string) error { + absFolderPath, err := filepath.Abs(folder) + brigPath := filepath.Join(absFolderPath, ".brig") + + jid, err := lookupJid(filepath.Join(brigPath, "config")) + if err != nil { + return err + } + + absName := filepath.Join(brigPath, "master.key") + if err := TryUnlock(jid, pwd, absName); err != nil { + return err + } + + return nil +} diff --git a/repo/pwd-util/pwd-util.go b/repo/pwd-util/pwd-util.go new file mode 100644 index 00000000..c571f05c --- /dev/null +++ b/repo/pwd-util/pwd-util.go @@ -0,0 +1,23 @@ +package main + +import ( + "crypto/rand" + "fmt" + + "github.com/disorganizer/brig/repo" + "github.com/disorganizer/brig/util/security" +) + +func main() { + pwd, err := repo.PromptNewPassword(40.0) + if err != nil { + fmt.Println("Failed: ", err) + return + } + + salt := make([]byte, 32) + rand.Reader.Read(salt) + + key := security.Scrypt([]byte(pwd), salt, 32) + fmt.Printf("Key: %x\nSalt: %x\n", key, salt) +} diff --git a/repo/pwd.go b/repo/pwd.go new file mode 100644 index 00000000..1efa2143 --- /dev/null +++ b/repo/pwd.go @@ -0,0 +1,192 @@ +package repo + +import ( + "bytes" + "fmt" + + "github.com/chzyer/readline" + + "github.com/disorganizer/brig/util" + "github.com/disorganizer/brig/util/colors" + zxcvbn "github.com/nbutton23/zxcvbn-go" +) + +const ( + msgLowEntropy = "⚠ Please enter a password with at least %g bits entropy." + msgReEnter = "✔ Well done! Please re-type your password now:" + msgBadPassword = "⚠ This did not seem to match. Please try again." + msgMaxTriesHit = "⚡ Maximum number of password tries exceeded: %d" +) + +func doPromptLine(rl *readline.Instance, prompt string, hide bool) (string, error) { + var line = "" + var bytepwd []byte + var err error + + if hide { + bytepwd, err = rl.ReadPassword(prompt) + line = string(bytepwd) + } else { + line, err = rl.Readline() + } + + if err != nil { + return "", err + } + + return line, nil +} + +func createStrengthPrompt(password []rune, prefix string) string { + symbol, color := "", colors.Red + strength := zxcvbn.PasswordStrength(string(password), nil) + + switch { + case strength.Score <= 1: + symbol = "✗" + color = colors.Red + case strength.Score <= 2: + symbol = "⚡" + color = colors.Magenta + case strength.Score <= 3: + symbol = "⚠" + color = colors.Yellow + case strength.Score <= 4: + symbol = "✔" + color = colors.Green + } + + prompt := colors.Colorize(symbol, color) + if strength.Entropy > 0 { + entropy := fmt.Sprintf(" %3.0f", strength.Entropy) + prompt += colors.Colorize(entropy, colors.Cyan) + } else { + prompt += colors.Colorize(" ENT", colors.Cyan) + } + + prompt += colors.Colorize(" "+prefix+"passphrase: ", color) + return prompt +} + +// PromptNewPassword asks the user to input a password. +// +// While typing, the user gets feedback by the prompt color, +// which changes with the security of the password to green. +// Additionally the entrtopy of the password is shown. +// If minEntropy was not reached after hitting enter, +// this function will log a message and ask the user again. +func PromptNewPassword(minEntropy float64) ([]byte, error) { + rl, err := readline.New("") + if err != nil { + return nil, err + } + defer util.Closer(rl) + + passwordCfg := rl.GenPasswordConfig() + passwordCfg.SetListener(func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + rl.SetPrompt(createStrengthPrompt(line, " New ")) + rl.Refresh() + return nil, 0, false + }) + + pwd := []byte{} + + for { + pwd, err = rl.ReadPasswordWithConfig(passwordCfg) + if err != nil { + return nil, err + } + + strength := zxcvbn.PasswordStrength(string(pwd), nil) + if strength.Entropy >= minEntropy { + break + } + + fmt.Printf(colors.Colorize(msgLowEntropy, colors.Yellow)+"\n", minEntropy) + } + + passwordCfg.SetListener(func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + rl.SetPrompt(createStrengthPrompt(line, "Retype ")) + rl.Refresh() + return nil, 0, false + }) + + fmt.Println(colors.Colorize(msgReEnter, colors.Green)) + + for { + newPwd, err := rl.ReadPasswordWithConfig(passwordCfg) + if err != nil { + return nil, err + } + + if bytes.Equal(pwd, newPwd) { + break + } + + fmt.Println(colors.Colorize(msgBadPassword, colors.Yellow)) + } + + return pwd, nil +} + +func promptPasswordColored(color int) (string, error) { + prompt := "Password: " + if color > 0 { + prompt = colors.Colorize(prompt, color) + } + + rl, err := readline.New(prompt) + if err != nil { + return "", err + } + defer util.Closer(rl) + + return doPromptLine(rl, prompt, true) +} + +// PromptPassword just opens an uncolored password prompt. +// +// The password is not echo'd to stdout for safety reasons. +func PromptPassword() (string, error) { + return promptPasswordColored(0) +} + +// ErrTooManyTries happens when the user failed the password check too often +type ErrTooManyTries struct { + Tries int +} + +func (e ErrTooManyTries) Error() string { + return fmt.Sprintf(msgMaxTriesHit, e.Tries) +} + +var triesToColor = map[int]int{ + 0: colors.Green, + 1: colors.Yellow, + 2: colors.Magenta, + 3: colors.Red, +} + +// PromptPasswordMaxTries tries to read a password maxTries times. +// +// The typed password can be validated by the caller via the passfn function. +// If the user failed to pass the correct password, ErrTooManyTries is returned. +// For visual guidance the prompt color will gradually change from green to red +// with each failed try. +func PromptPasswordMaxTries(maxTries int, passfn func(string) bool) (string, error) { + for i := 0; i < maxTries; i++ { + color := triesToColor[util.Min(i, len(triesToColor))] + pwd, err := promptPasswordColored(color) + if err != nil { + return "", err + } + + if !passfn(pwd) { + continue + } + + return pwd, err + } + + return "", ErrTooManyTries{maxTries} +} diff --git a/repo/repo.go b/repo/repo.go new file mode 100644 index 00000000..a5172dc0 --- /dev/null +++ b/repo/repo.go @@ -0,0 +1,31 @@ +package repo + +import ( + "github.com/disorganizer/brig/repo/global" + "github.com/disorganizer/brig/store" + yamlConfig "github.com/olebedev/config" +) + +// Repository represents a handle to one physical brig repository. +// It groups the APIs to all useful files in it. +type Repository struct { + // Repository is identified by a XMPP Account: name@domain.tld/ressource + Jid string + + // Minilock ID + Mid string + + // Folder of repository + Folder string + InternalFolder string + + // UUID which represents a unique repository + UniqueID string + + // User supplied password: + Password string + + Config *yamlConfig.Config + Store *store.Store + globalRepo *global.Repository +} diff --git a/store/commit.go b/store/commit.go new file mode 100644 index 00000000..0ed798c7 --- /dev/null +++ b/store/commit.go @@ -0,0 +1,63 @@ +package store + +// import ( +// multihash "github.com/jbenet/go-multihash" +// "time" +// ) +// +// const ( +// // ChangeInvalid indicates a bug. +// ChangeInvalid = iota +// +// // The file was newly added. +// ChangeAdd +// +// // The file was modified +// ChangeModify +// +// // The file was removed. +// ChangeRemove +// ) +// +// type ChangeType byte +// +// // Commit groups a change set +// type Commit struct { +// // Optional commit message +// Message string +// +// // Time at this commit was conceived. +// ModTime time.Time +// +// // Set of files that were changed. +// Changes map[string]ChangeType +// +// // Parent commit (only nil for initial commit) +// Parent *Commit +// } +// +// func (c *Commit) Hash() multihash.Multihash { +// // TODO +// return nil +// } +// +// func (c *Commit) Contains(file File) ChangeType { +// if c == nil { +// return ChangeInvalid +// } +// +// if changeType, ok := c.Changes[file]; ok { +// return changeType +// } +// +// return c.Parent.Contains(file) +// } +// +// func NewCommit(parent *Commit, msg string, diffMap map[File]ChangeType) *Commit { +// return &Commit{ +// Message: msg, +// ModTime: time.Now(), +// Parent: parent, +// Changes: diffMap, +// } +// } diff --git a/store/compress/compress.go b/store/compress/compress.go new file mode 100644 index 00000000..8800715d --- /dev/null +++ b/store/compress/compress.go @@ -0,0 +1,114 @@ +package compress + +import ( + "io" + "os" + + "github.com/golang/snappy" +) + +// Compress the file at src to dst. +func CompressFile(src, dst string) (int64, error) { + fdFrom, err := os.OpenFile(src, os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer fdFrom.Close() + + fdTo, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return 0, err + } + defer fdTo.Close() + + return Compress(fdFrom, fdTo) +} + +// Decompress the file at src to dst. +func DecompressFile(src, dst string) (int64, error) { + fdFrom, err := os.OpenFile(src, os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer fdFrom.Close() + + fdTo, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return 0, err + } + defer fdTo.Close() + + return Decompress(fdFrom, fdTo) +} + +// Compress represents a layer stream compression. +// As input src and dst io.Reader/io.Writer is expected. +func Compress(src io.Reader, dst io.Writer) (int64, error) { + return io.Copy(snappy.NewWriter(dst), src) +} + +// Decompress represents a layer for stream decompression. +// As input src and dst io.Reader/io.Writer is expected. +func Decompress(src io.Reader, dst io.Writer) (int64, error) { + return io.Copy(dst, snappy.NewReader(src)) +} + +type reader struct { + rRaw io.Reader + rZip io.Reader + wasCompressed bool + readHeader bool +} + +func (r *reader) Read(buf []byte) (int, error) { + if !r.readHeader { + r.readHeader = true + + marker := make([]byte, 1) + if _, err := r.rRaw.Read(marker); err != nil { + return 0, err + } + + r.wasCompressed = marker[0] > 0 + } + + if r.wasCompressed { + return r.rZip.Read(buf) + } + + return r.rRaw.Read(buf) +} + +// NewReader returns a new compression Reader. +func NewReader(r io.Reader) io.Reader { + return &reader{ + rRaw: r, + rZip: snappy.NewReader(r), + } +} + +type writer struct { + wRaw io.Writer + wZip io.Writer + headerWritten bool +} + +func (w *writer) Write(buf []byte) (int, error) { + if !w.headerWritten { + w.headerWritten = true + + if _, err := w.wRaw.Write([]byte{1}); err != nil { + return 0, err + } + } + + return w.wZip.Write(buf) +} + +// NewWriter returns a new compression Writer. +func NewWriter(w io.Writer) io.Writer { + return &writer{ + wRaw: w, + wZip: snappy.NewWriter(w), + } +} diff --git a/store/compress/compress_test.go b/store/compress/compress_test.go new file mode 100644 index 00000000..043f8b7d --- /dev/null +++ b/store/compress/compress_test.go @@ -0,0 +1,56 @@ +package compress + +import ( + "bytes" + "io/ioutil" + "os" + "testing" + + "github.com/disorganizer/brig/util/testutil" +) + +func testDecAndCompress(t *testing.T, size int64) { + path := testutil.CreateFile(size) + + compressedPath := path + ".pack" + decompressedPath := path + ".unpack" + + defer os.Remove(path) + defer os.Remove(compressedPath) + defer os.Remove(decompressedPath) + + if _, err := CompressFile(path, compressedPath); err != nil { + t.Errorf("File compression failed: %v", err) + return + } + + if _, err := DecompressFile(compressedPath, decompressedPath); err != nil { + t.Errorf("File decompression failed: %v", err) + return + } + + a, _ := ioutil.ReadFile(path) + b, _ := ioutil.ReadFile(decompressedPath) + c, _ := ioutil.ReadFile(compressedPath) + + if !bytes.Equal(a, b) { + t.Errorf("Source and decompressed not equal") + } + + if bytes.Equal(a, c) && size != 0 { + t.Errorf("Source was not compressed (same as source)") + } +} + +func TestDecAndCompress(t *testing.T) { + sizes := []int64{0, 1, 1024, 1024 * 1024} + for _, size := range sizes { + testDecAndCompress(t, size) + } +} + +func BenchmarkCompress(b *testing.B) { + for n := 0; n < b.N; n++ { + testDecAndCompress(nil, 1024*1024*10) + } +} diff --git a/store/encrypt/format.go b/store/encrypt/format.go new file mode 100644 index 00000000..90e101f1 --- /dev/null +++ b/store/encrypt/format.go @@ -0,0 +1,192 @@ +// Package encrypt implements the encryption layer of brig. +// The file format used looks something like this: +// +// [HEADER][[BLOCKHEADER][PAYLOAD]...] +// +// HEADER is 20 bytes big and contains the following fields: +// - 8 Byte: Magic number (to identify non-brig files quickly) +// - 2 Byte: Format version +// - 2 Byte: Used cipher type (ChaCha20 or AES-GCM) +// - 4 Byte: Key length in bytes. +// - 4 Byte: Maximum size of each block (last may be less) +// +// BLOCKHEADER contains the following fields: +// - 8 Byte: Nonce/Block number +// +// PAYLOAD contains the actual encrypted data, possibly with padding. +// +// All metadata is encoded in big endian. +// +// Reader/Writer are capable or reading/writing this format. +// Additionally, both support efficient seeking into the encrypted data, +// provided the underlying datastream supports seeking. +package encrypt + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/binary" + "fmt" + "io" + + chacha "github.com/codahale/chacha20poly1305" +) + +// Possible ciphers in Counter mode: +const ( + aeadCipherChaCha = iota + aeadCipherAES +) + +// Other constants: +const ( + // Size of the initial header: + headerSize = 20 + + // Chacha20 appears to be twice as fast as AES-GCM on my machine + defaultCipherType = aeadCipherChaCha + + // MaxBlockSize is the maximum number of bytes a single payload may have + MaxBlockSize = 1 * 1024 * 1024 + + // GoodEncBufferSize is the recommended size of buffers + GoodEncBufferSize = MaxBlockSize + 32 + + // GoodDecBufferSize is the recommended size of buffers + GoodDecBufferSize = MaxBlockSize +) + +// KeySize of the used cipher's key in bytes. +var KeySize = chacha.KeySize + +//////////////////// +// Header Parsing // +//////////////////// + +// GenerateHeader creates a valid header for the format file +func GenerateHeader() []byte { + // This is in big endian: + header := []byte{ + // Brigs magic number (8 Byte): + 0x6d, 0x6f, 0x6f, 0x73, + 0x65, 0x63, 0x61, 0x74, + // File format version (2 Byte): + 0x0, 0x1, + // Cipher type (2 Byte): + defaultCipherType >> 8, + defaultCipherType & 0xff, + // Key length (4 Byte): + 0, 0, 0, 0, + // Block length (4 Byte): + 0, 0, 0, 0, + } + + binary.BigEndian.PutUint32(header[12:16], uint32(KeySize)) + binary.BigEndian.PutUint32(header[16:20], uint32(MaxBlockSize)) + return header +} + +// ParseHeader parses the header of the format file. +// Returns the format version, cipher type, keylength and block length. If +// parsing fails, an error is returned. +func ParseHeader(header []byte) (format uint16, cipher uint16, keylen uint32, blocklen uint32, err error) { + expected := GenerateHeader() + if bytes.Compare(header[:8], expected[:8]) != 0 { + err = fmt.Errorf("Magic number in header differs") + return + } + + format = binary.BigEndian.Uint16(header[8:10]) + cipher = binary.BigEndian.Uint16(header[10:12]) + switch cipher { + case aeadCipherAES: + case aeadCipherChaCha: + // we support this! + default: + err = fmt.Errorf("Unknown cipher type: %d", cipher) + return + } + + keylen = binary.BigEndian.Uint32(header[12:16]) + blocklen = binary.BigEndian.Uint32(header[16:20]) + if blocklen != MaxBlockSize { + err = fmt.Errorf("Unsupported block length in header: %d", blocklen) + } + + return +} + +////////////////////// +// Common Utilities // +////////////////////// + +func createAEADWorker(cipherType uint16, key []byte) (cipher.AEAD, error) { + switch cipherType { + case aeadCipherAES: + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewGCM(block) + case aeadCipherChaCha: + return chacha.New(key) + } + + return nil, fmt.Errorf("No such cipher type.") +} + +type aeadCommon struct { + // Nonce that form the first aead.NonceSize() bytes + // of the output + nonce []byte + + // For more information, see: + // https://en.wikipedia.org/wiki/Authenticated_encryption + aead cipher.AEAD + + // Buffer for encrypted data (MaxBlockSize + overhead) + encBuf []byte + + // Buffer for decrypted data (MaxBlockSize) + decBuf []byte +} + +func (c *aeadCommon) initAeadCommon(key []byte, cipherType uint16) error { + aead, err := createAEADWorker(cipherType, key) + if err != nil { + return err + } + + c.nonce = make([]byte, aead.NonceSize()) + c.aead = aead + + c.encBuf = make([]byte, 0, MaxBlockSize+aead.Overhead()) + c.decBuf = make([]byte, 0, MaxBlockSize) + + return nil +} + +// Encrypt is a utility function which encrypts the data from source with key +// and writes the resulting encrypted data to dest. +func Encrypt(key []byte, source io.Reader, dest io.Writer) (int64, error) { + layer, err := NewWriter(dest, key) + if err != nil { + return 0, err + } + + defer layer.Close() + return io.CopyBuffer(layer, source, make([]byte, GoodEncBufferSize)) +} + +// Decrypt is a utility function which decrypts the data from source with key +// and writes the resulting encrypted data to dest. +func Decrypt(key []byte, source io.Reader, dest io.Writer) (int64, error) { + layer, err := NewReader(source, key) + if err != nil { + return 0, err + } + + defer layer.Close() + return io.CopyBuffer(dest, layer, make([]byte, GoodDecBufferSize)) +} diff --git a/store/encrypt/format_test.go b/store/encrypt/format_test.go new file mode 100644 index 00000000..df833f12 --- /dev/null +++ b/store/encrypt/format_test.go @@ -0,0 +1,180 @@ +package encrypt + +import ( + "bytes" + "fmt" + "github.com/disorganizer/brig/util/testutil" + "io" + "io/ioutil" + "log" + "os" + "testing" +) + +var TestKey = []byte("01234567890ABCDE01234567890ABCDE") + +func encryptFile(key []byte, from, to string) (int64, error) { + fdFrom, _ := os.Open(from) + defer fdFrom.Close() + + fdTo, _ := os.OpenFile(to, os.O_CREATE|os.O_WRONLY, 0755) + defer fdTo.Close() + + return Encrypt(key, fdFrom, fdTo) +} + +func decryptFile(key []byte, from, to string) (int64, error) { + fdFrom, _ := os.Open(from) + defer fdFrom.Close() + + fdTo, _ := os.OpenFile(to, os.O_CREATE|os.O_WRONLY, 0755) + defer fdTo.Close() + + return Decrypt(key, fdFrom, fdTo) +} + +func testSimpleEncDec(t *testing.T, size int) { + path := testutil.CreateFile(int64(size)) + defer os.Remove(path) + + encPath := path + "_enc" + decPath := path + "_dec" + + _, err := encryptFile(TestKey, path, encPath) + defer os.Remove(encPath) + + if err != nil { + log.Println(err) + t.Errorf("Encrypt failed: %v", err) + } + + _, err = decryptFile(TestKey, encPath, decPath) + defer os.Remove(decPath) + + if err != nil { + log.Println(err) + t.Errorf("Decrypt failed: %v", err) + } + + a, _ := ioutil.ReadFile(path) + b, _ := ioutil.ReadFile(decPath) + c, _ := ioutil.ReadFile(encPath) + + if !bytes.Equal(a, b) { + t.Errorf("Source and decrypted not equal") + } + + if bytes.Equal(a, c) { + t.Errorf("Source was not encrypted (same as source)") + } +} + +func TestSimpleEncDec(t *testing.T) { + t.Parallel() + + sizes := []int{ + 0, + 1, + MaxBlockSize - 1, + MaxBlockSize, + MaxBlockSize + 1, + GoodDecBufferSize - 1, + GoodDecBufferSize, + GoodDecBufferSize + 1, + GoodEncBufferSize - 1, + GoodEncBufferSize, + GoodEncBufferSize + 1, + } + + for size := range sizes { + testSimpleEncDec(t, size) + } +} + +func TestSeek(t *testing.T) { + N := int64(2 * MaxBlockSize) + a := testutil.CreateDummyBuf(N) + b := make([]byte, 0, N) + + source := bytes.NewBuffer(a) + shared := &bytes.Buffer{} + dest := bytes.NewBuffer(b) + + encLayer, err := NewWriter(shared, TestKey) + if err != nil { + panic(err) + } + + buf := make([]byte, GoodEncBufferSize) + + // Encrypt: + _, err = io.CopyBuffer(encLayer, source, buf) + if err != nil { + panic(err) + } + + // This needs to be here, since close writes + // left over data to the write stream + encLayer.Close() + + sharedReader := bytes.NewReader(shared.Bytes()) + decLayer, err := NewReader(sharedReader, TestKey) + if err != nil { + panic(err) + } + defer decLayer.Close() + + seekTest := int64(MaxBlockSize) + pos, err := decLayer.Seek(seekTest, os.SEEK_SET) + if err != nil { + t.Errorf("Seek error'd: %v", err) + return + } + + if pos != seekTest { + t.Errorf("Seek is a bad jumper: %d (should %d)", pos, MaxBlockSize) + return + } + + pos, _ = decLayer.Seek(0, os.SEEK_CUR) + if pos != seekTest { + t.Errorf("SEEK_CUR(0) deliverd wrong status") + return + } + + pos, _ = decLayer.Seek(seekTest/2, os.SEEK_CUR) + if pos != seekTest+seekTest/2 { + t.Errorf("SEEK_CUR jumped to the wrong pos: %d", pos) + } + + pos, _ = decLayer.Seek(-seekTest, os.SEEK_CUR) + if pos != seekTest/2 { + t.Errorf("SEEK_CUR does not like negative indices: %d", pos) + } + + pos, _ = decLayer.Seek(seekTest/2, os.SEEK_CUR) + if pos != seekTest { + t.Errorf("SEEK_CUR has problems after negative indices: %d", pos) + } + + // Decrypt: + _, err = io.CopyBuffer(dest, decLayer, buf) + if err != nil { + t.Errorf("Decrypt failed: %v", err) + return + } + + if !bytes.Equal(a[seekTest:], dest.Bytes()) { + b := dest.Bytes() + fmt.Printf("AAA %d %x %x\n", len(a), a[:10], a[len(a)-10:]) + fmt.Printf("BBB %d %x %x\n", len(b), b[:10], b[len(b)-10:]) + t.Errorf("Buffers are not equal") + return + } +} + +func BenchmarkEncDec(b *testing.B) { + for n := 0; n < b.N; n++ { + testSimpleEncDec(nil, MaxBlockSize*100) + } +} diff --git a/store/encrypt/reader.go b/store/encrypt/reader.go new file mode 100644 index 00000000..3c65269d --- /dev/null +++ b/store/encrypt/reader.go @@ -0,0 +1,187 @@ +package encrypt + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// Reader decrypts and encrypted datastream from Reader. +type Reader struct { + aeadCommon + + // Underlying io.Reader + Reader io.Reader + + // Caches leftovers from unread blocks + backlog *bytes.Reader + + // Last index of the byte the user visited. + // (Used to avoid re-reads in Seek()) + // This does *not* equal the seek offset of the underlying stream. + lastSeekPos int64 +} + +// Read from source and decrypt. +// +// This method always decrypts one block to optimize for continous reads. If +// dest is too small to hold the block, the decrypted text is cached for the +// next read. +func (r *Reader) Read(dest []byte) (int, error) { + readBytes := 0 + + // Try our best to fill len(dest) + for readBytes < len(dest) { + if r.backlog.Len() == 0 { + if _, err := r.readBlock(); err != nil { + return readBytes, err + } + } + + n, _ := r.backlog.Read(dest[readBytes:]) + readBytes += n + r.lastSeekPos += int64(n) + } + + return readBytes, nil +} + +// Fill internal buffer with current block +func (r *Reader) readBlock() (int, error) { + if n, err := r.Reader.Read(r.nonce); err != nil { + return 0, err + } else if n != r.aead.NonceSize() { + return 0, fmt.Errorf("Nonce size mismatch. Should: %d. Have: %d", + r.aead.NonceSize(), n) + } + + // Read the *whole* block from the fs + N := MaxBlockSize + r.aead.Overhead() + n, err := io.ReadAtLeast(r.Reader, r.encBuf[:N], N) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return 0, err + } + + r.decBuf, err = r.aead.Open(r.decBuf[:0], r.nonce, r.encBuf[:n], nil) + if err != nil { + return 0, err + } + + r.backlog = bytes.NewReader(r.decBuf) + return len(r.decBuf), nil +} + +// Seek into the encrypted stream. +// +// Note that the seek offset is relative to the decrypted data, +// not to the underlying, encrypted stream. +// +// Mixing SEEK_CUR and SEEK_SET might not a good idea, +// since a seek might involve reading a whole encrypted block. +// Therefore relative seek offset +func (r *Reader) Seek(offset int64, whence int) (int64, error) { + // Check if seeking is supported: + seeker, ok := r.Reader.(io.ReadSeeker) + if !ok { + return 0, fmt.Errorf("Seek is not supported by underlying datastream") + } + + // Constants and assumption on the stream below: + blockSize := int64(MaxBlockSize) + blockHeaderSize := int64(r.aead.NonceSize()) + totalBlockSize := blockHeaderSize + blockSize + int64(r.aead.Overhead()) + + // absolute Offset in the decrypted stream + absOffsetDec := int64(0) + + // Convert possibly relative offset to absolute offset: + switch whence { + case os.SEEK_CUR: + absOffsetDec = r.lastSeekPos + offset + case os.SEEK_SET: + absOffsetDec = offset + case os.SEEK_END: + // We have no idea when the stream ends. + return 0, fmt.Errorf("SEEK_END is not supported for encrypted data") + } + + if absOffsetDec < 0 { + return 0, fmt.Errorf("Negative seek index") + } + + // Caller wanted to know only the current stream pos: + if absOffsetDec == r.lastSeekPos { + return absOffsetDec, nil + } + + // Convert decrypted offset to encrypted offset + absOffsetEnc := headerSize + ((absOffsetDec / blockSize) * totalBlockSize) + + // Check if we're still in the same block as last time: + blockNum := absOffsetEnc / totalBlockSize + lastBlockNum := r.lastSeekPos / blockSize + r.lastSeekPos = absOffsetDec + + if lastBlockNum != blockNum { + // Seek to the beginning of the encrypted block: + if _, err := seeker.Seek(absOffsetEnc, os.SEEK_SET); err != nil { + return 0, err + } + + // Make read consume the current block: + if _, err := r.readBlock(); err != nil { + return 0, err + } + } + + // Reslice the backlog, so Read() does not return skipped data. + r.backlog.Seek(absOffsetDec%blockSize, os.SEEK_SET) + return absOffsetDec, nil +} + +// Close does finishing work. +// It does not close the underlying data stream. +// +// This is currently a No-Op, but you should not rely on that. +func (r *Reader) Close() error { + return nil +} + +// NewReader creates a new encrypted reader and validates the file header. +// The key is required to be KeySize bytes long. +func NewReader(r io.Reader, key []byte) (*Reader, error) { + reader := &Reader{ + Reader: r, + backlog: bytes.NewReader([]byte{}), + } + + header := make([]byte, headerSize) + n, err := reader.Reader.Read(header) + if err != nil { + return nil, err + } + + if n != headerSize { + return nil, fmt.Errorf("No valid header found, damaged file?") + } + + version, ciperType, keylen, _, err := ParseHeader(header) + if err != nil { + return nil, err + } + + if version != 1 { + return nil, fmt.Errorf("This implementation does not support versions != 1") + } + + if uint32(len(key)) != keylen { + return nil, fmt.Errorf("Key length differs: file=%d, user=%d", keylen, len(key)) + } + + if err := reader.initAeadCommon(key, ciperType); err != nil { + return nil, err + } + + return reader, nil +} diff --git a/store/encrypt/writer.go b/store/encrypt/writer.go new file mode 100644 index 00000000..b40e600d --- /dev/null +++ b/store/encrypt/writer.go @@ -0,0 +1,116 @@ +package encrypt + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/disorganizer/brig/util" + "github.com/glycerine/rbuf" +) + +// Writer encrypts the data stream before writing to Writer. +type Writer struct { + // Common fields with Reader + aeadCommon + + // Internal Writer we would write to. + Writer io.Writer + + // A buffer that is MaxBlockSize big. + // Used for caching blocks + rbuf *rbuf.FixedSizeRingBuf + + // True after the first write. + headerWritten bool +} + +func (w *Writer) Write(p []byte) (int, error) { + if !w.headerWritten { + w.headerWritten = true + + _, err := w.Writer.Write(GenerateHeader()) + if err != nil { + return 0, err + } + } + + for w.rbuf.Readable >= MaxBlockSize { + _, err := w.flushPack(MaxBlockSize) + if err != nil { + return 0, err + } + } + + // Remember left-overs for next write: + _, err := w.rbuf.Write(p) + if err != nil { + return 0, nil + } + + // Fake the amount of data we've written: + return len(p), nil +} + +func (w *Writer) flushPack(chunkSize int) (int, error) { + n, err := w.rbuf.Read(w.decBuf[:chunkSize]) + if err != nil { + return 0, err + } + + // Create a new Nonce for this block: + blockNum := binary.BigEndian.Uint64(w.nonce) + binary.BigEndian.PutUint64(w.nonce, blockNum+1) + + // Encrypt the text: + w.encBuf = w.aead.Seal(w.encBuf[:0], w.nonce, w.decBuf[:n], nil) + + // Pass it to the underlying writer: + written := 0 + if n, err := w.Writer.Write(w.nonce); err == nil { + written += n + } + + if n, err := w.Writer.Write(w.encBuf); err == nil { + written += n + } + + return written, nil +} + +// Seek the write stream. This maps to a seek in the underlying datastream. +func (w *Writer) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := w.Writer.(io.Seeker); ok { + return seeker.Seek(offset, whence) + } + + return 0, fmt.Errorf("write: Seek is not supported by underlying datastream") +} + +// Close the Writer and write any left-over blocks +// This does not close the underlying data stream. +func (w *Writer) Close() error { + for w.rbuf.Readable > 0 { + n := util.Min(MaxBlockSize, w.rbuf.Readable) + _, err := w.flushPack(n) + if err != nil { + return err + } + } + return nil +} + +// NewWriter returns a new Writer which encrypts data with a +// certain key. +func NewWriter(w io.Writer, key []byte) (*Writer, error) { + writer := &Writer{ + Writer: w, + rbuf: rbuf.NewFixedSizeRingBuf(MaxBlockSize * 2), + } + + if err := writer.initAeadCommon(key, defaultCipherType); err != nil { + return nil, err + } + + return writer, nil +} diff --git a/store/file.go b/store/file.go new file mode 100644 index 00000000..c1f50b86 --- /dev/null +++ b/store/file.go @@ -0,0 +1,33 @@ +package store + +import ( + "github.com/disorganizer/brig/util/trie" + "github.com/jbenet/go-multihash" +) + +// File represents a single file in the repository. +// It stores all metadata about it and links to the actual data. +type File struct { + // Pointer for dynamic loading of bigger data: + *trie.Node + s *Store + + Size FileSize + Hash multihash.Multihash + IpfsHash multihash.Multihash +} + +// New returns a file inside a repo. +// Path is relative to the repo root. +func New(path string) (*File, error) { + // TODO: + return nil, nil +} + +// func (f *File) Open() (Stream, error) { +// // Get io.Reader from ipfs cat +// // Mask with decompressor +// // Mask with decrypter +// // return +// return nil, nil +// } diff --git a/store/format-util/main.go b/store/format-util/main.go new file mode 100644 index 00000000..c52844f2 --- /dev/null +++ b/store/format-util/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "flag" + "log" + "os" + + "github.com/disorganizer/brig/store/format" +) + +func main() { + decryptMode := flag.Bool("d", false, "Decrypt.") + flag.Parse() + + key := []byte("01234567890ABCDE01234567890ABCDE") + + var err error + if *decryptMode == false { + _, err = format.Encrypt(key, os.Stdin, os.Stdout) + } else { + _, err = format.Decrypt(key, os.Stdin, os.Stdout) + } + + if err != nil { + log.Fatal(err) + return + } +} diff --git a/store/index.go b/store/index.go new file mode 100644 index 00000000..3ac82658 --- /dev/null +++ b/store/index.go @@ -0,0 +1,196 @@ +package store + +import ( + "fmt" + "io" + "path/filepath" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/disorganizer/brig/util/ipfsutil" + "github.com/disorganizer/brig/util/trie" + "github.com/jbenet/go-multihash" +) + +var ( + bucketIndex = []byte("index") +) + +// Store is responsible for adding & retrieving all files from ipfs, +// while managing their metadata in a boltDB. +type Store struct { + db *bolt.DB + + // Trie models the directory tree. + // The root node is the repository root. + Trie trie.Trie + + IpfsCtx *ipfsutil.Context +} + +// Load opens the +func (s *Store) loadTrie() error { + s.Trie = trie.NewTrie() + + return s.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(bucketIndex) + if bucket != nil { + return nil + } + + err := bucket.ForEach(func(k, v []byte) error { + // k = absPath, v = File{} value + file := &File{s: s} + file.Node = s.Trie.Insert(string(k)) + return nil + }) + + if err == nil { + return nil + } + + return fmt.Errorf("store-load: %v", err) + }) +} + +// Open loads an existing store, if it does not exist, it is created. +func Open(repoPath string) (*Store, error) { + options := &bolt.Options{Timeout: 1 * time.Second} + db, err := bolt.Open(filepath.Join(repoPath, "index.bolt"), 0600, options) + + if err != nil { + return nil, err + } + + store := &Store{ + db: db, + IpfsCtx: &ipfsutil.Context{ + Path: filepath.Join(repoPath, "ipfs"), + }, + } + + // Create initial buckets: + err = db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"index", "commits", "pinned"} { + if _, berr := tx.CreateBucketIfNotExists([]byte(name)); berr != nil { + return fmt.Errorf("create bucket: %s", berr) + } + } + return nil + }) + + if err != nil { + log.Warningf("store-create-table failed: %v", err) + } + + if err := store.loadTrie(); err != nil { + return nil, err + } + + return store, nil +} + +var TestKey = []byte("01234567890ABCDE01234567890ABCDE") + +// Add reads data from r, encrypts & compresses it while feeding it to ipfs. +// The resulting hash will be committed to the index. +func (s *Store) Add(path string, r io.Reader) (multihash.Multihash, error) { + // TODO + // gets hash, size, modtime=now, ipfshash... + // creates File{} and serializes it to DB using GOB + // insert .Node before serializing, set again after. + stream, err := NewFileReader(TestKey, r) + if err != nil { + return nil, err + } + + hash, err := ipfsutil.Add(s.IpfsCtx, stream) + if err != nil { + return nil, err + } + + err = s.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(bucketIndex) + if bucket == nil { + return fmt.Errorf("Add: No index bucket") + } + + if err := bucket.Put([]byte(path), hash); err != nil { + return err + } + + return nil + }) + + if err != nil { + return nil, err + } + + return hash, nil +} + +func (s *Store) Cat(path string, w io.Writer) error { + hash, err := s.PathToHash(path) + if err != nil { + return err + } + + fmt.Println("HASH", hash.B58String()) + + ipfsStream, err := ipfsutil.Cat(s.IpfsCtx, hash) + if err != nil { + return err + } + defer ipfsStream.Close() + + cleanStream, err := NewIpfsReader(TestKey, ipfsStream) + if err != nil { + return err + } + + if _, err := io.Copy(w, cleanStream); err != nil { + return err + } + + return nil +} + +func (s *Store) PathToHash(path string) (multihash.Multihash, error) { + var hash multihash.Multihash + + err := s.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(bucketIndex) + if bucket == nil { + return fmt.Errorf("PathToHash: No index bucket") + } + + foundHash := bucket.Get([]byte(path)) + if foundHash == nil { + return fmt.Errorf("cat: no hash to path `%s`", path) + } + + hash = make([]byte, len(foundHash)) + copy(hash, foundHash) + return nil + }) + + return hash, err +} + +// Close syncs all data. It is an error to use the store afterwards. +func (s *Store) Close() { + if err := s.db.Sync(); err != nil { + log.Warningf("store-sync: %v", err) + } + + if err := s.db.Close(); err != nil { + log.Warningf("store-close: %v", err) + } +} + +// Remove will purge a file locally on this node. +// The file might still be available somewhere else. +func (s *Store) Remove(path string) error { + return nil +} diff --git a/store/size.go b/store/size.go new file mode 100644 index 00000000..3d524ac9 --- /dev/null +++ b/store/size.go @@ -0,0 +1,12 @@ +package store + +import ( + "github.com/dustin/go-humanize" +) + +// FileSize is a large enough integer for file sizes, offering a few util methods. +type FileSize int64 + +func (s FileSize) String() string { + return humanize.Bytes(uint64(s)) +} diff --git a/store/stream.go b/store/stream.go new file mode 100644 index 00000000..f900650a --- /dev/null +++ b/store/stream.go @@ -0,0 +1,58 @@ +package store + +import ( + "fmt" + "io" + "os" + + "github.com/disorganizer/brig/store/compress" + "github.com/disorganizer/brig/store/encrypt" +) + +func NewIpfsReader(key []byte, r io.Reader) (io.Reader, error) { + rEnc, err := encrypt.NewReader(r, key) + if err != nil { + return nil, err + } + + return compress.NewReader(rEnc), nil +} + +// NewFileReaderFromPath is a shortcut for reading a file from disk +// and returning ipfs-conforming data. +func NewFileReaderFromPath(key []byte, path string) (io.Reader, error) { + fd, err := os.Open(path) + if err != nil { + return nil, err + } + + return NewFileReader(key, fd) +} + +// NewFileReader reads an unencrypted, uncompressed file and +// returns a reader that will yield the data we feed to ipfs. +func NewFileReader(key []byte, r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + + // Setup the writer part: + wEnc, err := encrypt.NewWriter(pw, key) + if err != nil { + return nil, err + } + + wZip := compress.NewWriter(wEnc) + + go func() { + defer func() { + wEnc.Close() + pw.Close() + }() + + if _, err := io.Copy(wZip, r); err != nil { + // TODO: Warn or pass to outside? + fmt.Println("FUCK", err) + } + }() + + return pr, nil +} diff --git a/store/stream_test.go b/store/stream_test.go new file mode 100644 index 00000000..c30cc85f --- /dev/null +++ b/store/stream_test.go @@ -0,0 +1,46 @@ +package store + +import ( + "bytes" + "io" + "testing" +) + +var TestKey = []byte("01234567890ABCDE01234567890ABCDE") + +func TestWriteAndRead(t *testing.T) { + raw := []byte("Hello World") + rawBuf := &bytes.Buffer{} + rawBuf.Write(raw) + + encStream, err := NewFileReader(TestKey, rawBuf) + if err != nil { + t.Errorf("Creating encryption stream failed: %v", err) + return + } + + encrypted := &bytes.Buffer{} + if _, err := io.Copy(encrypted, encStream); err != nil { + t.Errorf("Reading encrypted data failed: %v", err) + return + } + + decStream, err := NewIpfsReader(TestKey, encrypted) + if err != nil { + t.Errorf("Creating decryption stream failed: %v", err) + return + } + + decrypted := &bytes.Buffer{} + if _, err := io.Copy(decrypted, decStream); err != nil { + t.Errorf("Reading decrypted data failed: %v", err) + return + } + + if !bytes.Equal(decrypted.Bytes(), raw) { + t.Errorf("Raw and decrypted is not equal => BUG.") + t.Errorf("RAW:\n %v", raw) + t.Errorf("DEC:\n %v", decrypted.Bytes()) + return + } +} diff --git a/util/colors/colors.go b/util/colors/colors.go new file mode 100644 index 00000000..a356baae --- /dev/null +++ b/util/colors/colors.go @@ -0,0 +1,32 @@ +// Package colors implement easy printing of terminal colors. +package colors + +import "fmt" + +const ( + // Cyan should be used for debug messages. + Cyan = 36 + // Green should be used for informational/success messages. + Green = 32 + // Magenta should be used for critical errors. + Magenta = 35 + // Red should be used for normal errors. + Red = 31 + // Yellow should be used for warnings. + Yellow = 33 + // BackgroundRed should be used for panic. + BackgroundRed = 41 +) + +// ColorResetEscape terminates all previous colors. +var ColorResetEscape = "\033[0m" + +// ColorEscape translates a ANSI color number to a color escape. +func ColorEscape(color int) string { + return fmt.Sprintf("\033[0;%dm", color) +} + +// Colorize the msg using ANSI color escapes +func Colorize(msg string, color int) string { + return ColorEscape(color) + msg + ColorResetEscape +} diff --git a/util/filelock/filelock.go b/util/filelock/filelock.go new file mode 100644 index 00000000..0cebef6f --- /dev/null +++ b/util/filelock/filelock.go @@ -0,0 +1,54 @@ +// Package filelock implements helper function for using a `lock` file for +// synchronising file system resources. +package filelock + +import ( + "time" + + "github.com/nightlyone/lockfile" +) + +// Acquire tries to lock the lock file at `lockPath`. +// If it is already locked it will re-try after a short timeout. +func Acquire(lockPath string) error { + lock, err := lockfile.New(lockPath) + if err != nil { + return err + } + + for { + if err := lock.TryLock(); err != nil { + if err == lockfile.ErrBusy { + time.Sleep(250 * time.Millisecond) + continue + } else { + return err + } + } + + break + } + + return nil +} + +// TryAcquire tries to acquire the lock at `lockPath`. +// It will not retry if it fails. +func TryAcquire(lockPath string) error { + lock, err := lockfile.New(lockPath) + if err != nil { + return err + } + + return lock.TryLock() +} + +// Release will remove the lockfile. +func Release(lockPath string) error { + lock, err := lockfile.New(lockPath) + if err != nil { + return err + } + + return lock.Unlock() +} diff --git a/util/ipfsutil/add.go b/util/ipfsutil/add.go new file mode 100644 index 00000000..eef092ae --- /dev/null +++ b/util/ipfsutil/add.go @@ -0,0 +1,65 @@ +package ipfsutil + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/jbenet/go-multihash" + + log "github.com/Sirupsen/logrus" +) + +// Add reads `r` and adds it to ipfs. +// The resulting content hash is returned. +func Add(ctx *Context, r io.Reader) (multihash.Multihash, error) { + adder := ipfsCommand(ctx, "add", "-q") + // adder := exec.Command("cat") + stdin, err := adder.StdinPipe() + if err != nil { + log.Warning("stdin failed") + return nil, err + } + + stderr, err := adder.StderrPipe() + stdout, err := adder.StdoutPipe() + if err != nil { + log.Warning("stdout failed") + return nil, err + } + + if err := adder.Start(); err != nil { + log.Warning("ipfs add failed: ", err) + return nil, err + } + + // Copy file to ipfs-add's stdin: + if _, err = io.Copy(stdin, r); err != nil { + log.Warning("copy failed") + return nil, err + } + + if err := stdin.Close(); err != nil { + log.Warningf("ipfs add: close failed: %v", err) + } + + hash, err := ioutil.ReadAll(stdout) + if err != nil { + log.Warning("hash failed", stdout) + return nil, err + } + + errs, _ := ioutil.ReadAll(stderr) + if err := adder.Wait(); err != nil { + log.Warningf("`ipfs add` failed: %v", err) + log.Warningf("Stderr: %v", string(errs)) + } + + hash = bytes.TrimSpace(hash) + mh, err := multihash.FromB58String(string(hash)) + if err != nil { + return nil, err + } + + return mh, nil +} diff --git a/util/ipfsutil/cat.go b/util/ipfsutil/cat.go new file mode 100644 index 00000000..a1701ec4 --- /dev/null +++ b/util/ipfsutil/cat.go @@ -0,0 +1,55 @@ +package ipfsutil + +import ( + "io" + "io/ioutil" + "os/exec" + + log "github.com/Sirupsen/logrus" + "github.com/jbenet/go-multihash" +) + +type ipfsCatter struct { + catter *exec.Cmd + stdout io.Reader + stderr io.Reader +} + +func (i *ipfsCatter) Read(buf []byte) (int, error) { + return i.stdout.Read(buf) +} + +func (i *ipfsCatter) Close() error { + stderrText, _ := ioutil.ReadAll(i.stderr) + if err := i.catter.Wait(); err != nil { + log.Warningf("`ipfs cat` failed: %v", err) + log.Warningf("Stderr: %v", string(stderrText)) + return err + } + return nil +} + +// Cat returns an io.Reader that reads from ipfs. +func Cat(ctx *Context, hash multihash.Multihash) (io.ReadCloser, error) { + catter := ipfsCommand(ctx, "cat", hash.B58String()) + stdout, err := catter.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := catter.StderrPipe() + if err != nil { + return nil, err + } + + if err := catter.Start(); err != nil { + log.Warningf("`ipfs cat` failed to start: ", err) + return nil, err + } + + return &ipfsCatter{ + catter: catter, + stdout: stdout, + stderr: stderr, + }, nil +} diff --git a/util/ipfsutil/common.go b/util/ipfsutil/common.go new file mode 100644 index 00000000..54fce52f --- /dev/null +++ b/util/ipfsutil/common.go @@ -0,0 +1,15 @@ +package ipfsutil + +import "os/exec" + +// Context remembers the settings needed for accessing the ipfs daemon. +type Context struct { + // TODO! + Path string +} + +func ipfsCommand(ctx *Context, args ...string) *exec.Cmd { + cmd := exec.Command("ipfs", args...) + cmd.Env = []string{"IPFS_PATH=" + ctx.Path} + return cmd +} diff --git a/util/ipfsutil/daemon.go b/util/ipfsutil/daemon.go new file mode 100644 index 00000000..759cbed7 --- /dev/null +++ b/util/ipfsutil/daemon.go @@ -0,0 +1,69 @@ +package ipfsutil + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "time" + + log "github.com/Sirupsen/logrus" +) + +// StartDaemon executes and watches `ipfs daemon`. +// The exec.Cmd associated to it is returned, +// use it to call Wait() on or to stop it via .Process.Kill() +func StartDaemon(ctx *Context) (*exec.Cmd, error) { + port := 4001 // TODO: read from ctx.Path.config + + daemon := ipfsCommand(ctx, "daemon") + stderr, err := daemon.StderrPipe() + if err != nil { + log.Warningf("Could not attach to stderr: %v", err) + } + + if err := daemon.Start(); err != nil { + return nil, err + } + + go func() { + stderrBytes, _ := ioutil.ReadAll(stderr) + if err := daemon.Wait(); err != nil { + log.Warningf("ipfs daemon exit: %v", err) + log.Warningf("Stderr: %v", string(stderrBytes)) + } + }() + + addr := fmt.Sprintf("localhost:%d", port) + for i := 0; i < 30; i++ { + conn, err := net.Dial("tcp", addr) + if err != nil { + time.Sleep(500 * time.Millisecond) + continue + } + + conn.Close() + + // This is pretty stupid. The ipfs daemon first starts the + // network interface but is not ready for usage yet. + // It appears to be ready once the api/ dir is created. + // Maybe better to check for the "Daemon is ready" phrase? + // (This is actually a TODO) + if _, err := os.Stat(filepath.Join(ctx.Path, "api")); err != nil { + time.Sleep(500 * time.Millisecond) + continue + } + + log.Infof("ipfs running on :%d", port) + return daemon, nil + } + + // Something wrong. Kill whatever we launched. + if err := daemon.Process.Kill(); err != nil { + return nil, err + } + + return nil, fmt.Errorf("ipfs daemon startup took too long.") +} diff --git a/util/ipfsutil/ipfs_test.go b/util/ipfsutil/ipfs_test.go new file mode 100644 index 00000000..5793b606 --- /dev/null +++ b/util/ipfsutil/ipfs_test.go @@ -0,0 +1,110 @@ +package ipfsutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + ipfsconfig "github.com/ipfs/go-ipfs/repo/config" + "github.com/ipfs/go-ipfs/repo/fsrepo" +) + +var ( + TEST_PATH = filepath.Join(os.TempDir(), "brig_test_ipfs_repo") +) + +func initRepo(t *testing.T) *Context { + if err := os.MkdirAll(TEST_PATH, 0744); err != nil { + t.Errorf("Could not create unit test dir: %v", err) + return nil + } + + ipfsPath := filepath.Join(TEST_PATH, ".ipfs") + cfg, err := ipfsconfig.Init(os.Stdout, 1024) + if err != nil { + t.Errorf("Could not create ipfs config %v", err) + return nil + } + + if err := fsrepo.Init(ipfsPath, cfg); err != nil { + t.Errorf("Could not create ipfs repo at %s: %v", TEST_PATH, err) + return nil + } + + return &Context{Path: ipfsPath} +} + +func TestStartDaemon(t *testing.T) { + ctx := initRepo(t) + if ctx == nil { + return + } + + defer os.RemoveAll(TEST_PATH) + + cmd, err := StartDaemon(ctx) + if err != nil { + t.Errorf("Could not start ipfs daemon: %v", err) + return + } + + if err := cmd.Process.Kill(); err != nil { + t.Errorf("Could not kill ipfs daemon: %v", err) + return + } +} + +func TestAddCat(t *testing.T) { + fmt.Println("Testing AddCat") + + ctx := initRepo(t) + if ctx == nil { + return + } + + defer os.RemoveAll(TEST_PATH) + + cmd, err := StartDaemon(ctx) + if err != nil { + t.Errorf("Could not start ipfs daemon: %v", err) + return + } + + defer func() { + if err := cmd.Process.Kill(); err != nil { + t.Errorf("Could not kill ipfs daemon: %v", err) + return + } + }() + + // Dummy in-memory reader: + origData := []byte("Hello World") + buf := &bytes.Buffer{} + buf.Write(origData) + + hash, err := Add(ctx, buf) + if err != nil { + t.Errorf("Add of a simple file failed: %v", err) + return + } + + reader, err := Cat(ctx, hash) + if err != nil { + t.Errorf("Could not cat simple file: %v", err) + return + } + defer reader.Close() + + data, err := ioutil.ReadAll(reader) + if err != nil { + t.Errorf("Could not read back added data: %v", err) + return + } + + if !bytes.Equal(data, origData) { + t.Errorf("Data not equal: %v <- -> %v", string(data), string(origData)) + } +} diff --git a/util/log/logger.go b/util/log/logger.go new file mode 100644 index 00000000..5f34c6b7 --- /dev/null +++ b/util/log/logger.go @@ -0,0 +1,129 @@ +// Package log implements utility methods for logging in a colorful manner. +package log + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/disorganizer/brig/util/colors" +) + +// ColorfulLogFormatter is the default logger for brig. +type ColorfulLogFormatter struct{} + +var symbolTable = map[logrus.Level]string{ + logrus.DebugLevel: "⚙", + logrus.InfoLevel: "⚐", + logrus.WarnLevel: "⚠", + logrus.ErrorLevel: "⚡", + logrus.FatalLevel: "☣", + logrus.PanicLevel: "☠", +} + +var colorTable = map[logrus.Level]int{ + logrus.DebugLevel: colors.Cyan, + logrus.InfoLevel: colors.Green, + logrus.WarnLevel: colors.Yellow, + logrus.ErrorLevel: colors.Red, + logrus.FatalLevel: colors.Magenta, + logrus.PanicLevel: colors.BackgroundRed, +} + +func colorEscape(level logrus.Level) string { + return colors.ColorEscape(colorTable[level]) +} + +func formatColored(buffer *bytes.Buffer, msg string, level logrus.Level) { + buffer.WriteString(colorEscape(level)) + buffer.WriteString(msg) + buffer.WriteString(colors.ColorResetEscape) +} + +func formatTimestamp(buffer *bytes.Buffer, t time.Time) { + fmt.Fprintf(buffer, "%02d.%02d.%04d", t.Day(), t.Month(), t.Year()) + buffer.WriteByte('/') + fmt.Fprintf(buffer, "%02d:%02d:%02d", t.Hour(), t.Minute(), t.Second()) +} + +func formatFields(buffer *bytes.Buffer, entry *logrus.Entry) { + idx := 0 + buffer.WriteString(" [") + + for key, value := range entry.Data { + // MAke the key colored: + formatColored(buffer, key, entry.Level) + buffer.WriteByte('=') + + // A few special cases depending on the type: + switch v := value.(type) { + case *logrus.Entry: + formatColored(buffer, v.Message, logrus.ErrorLevel) + default: + buffer.WriteString(fmt.Sprintf("%v", v)) + } + + // Print no space after the last element: + if idx != len(entry.Data)-1 { + buffer.WriteByte(' ') + } + + idx++ + } + + buffer.WriteByte(']') +} + +// Format logs a single entry according to our formatting ideas. +func (*ColorfulLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + buffer := bytes.Buffer{} + + // Add the timestamp: + buffer.WriteString(colorEscape(entry.Level)) + formatTimestamp(&buffer, entry.Time) + buffer.WriteByte(' ') + + // Add the symbol: + buffer.WriteString(symbolTable[entry.Level]) + buffer.WriteString(colors.ColorResetEscape) + + // Add the actual message: + buffer.WriteByte(' ') + buffer.WriteString(entry.Message) + + // Add the fields, if any: + if len(entry.Data) > 0 { + formatFields(&buffer, entry) + } + + buffer.WriteByte('\n') + return buffer.Bytes(), nil +} + +var logLevelToFunc = map[logrus.Level]func(args ...interface{}){ + logrus.DebugLevel: logrus.Debug, + logrus.InfoLevel: logrus.Info, + logrus.WarnLevel: logrus.Warn, + logrus.ErrorLevel: logrus.Error, + logrus.FatalLevel: logrus.Fatal, +} + +// Writer is an io.Writer that writes everything to logrus. +type Writer struct { + // Level determines the severity for all messages. + Level logrus.Level +} + +func (l *Writer) Write(buf []byte) (int, error) { + fn, ok := logLevelToFunc[l.Level] + if !ok { + logrus.Fatal("LogWriter: Bad loglevel passed.") + } else { + msg := string(buf) + fn(strings.Trim(msg, "\n\r ")) + } + + return len(buf), nil +} diff --git a/util/log/logger_test.go b/util/log/logger_test.go new file mode 100644 index 00000000..d079f7f1 --- /dev/null +++ b/util/log/logger_test.go @@ -0,0 +1,56 @@ +package log + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestLog(t *testing.T) { + logrus.SetOutput(os.Stderr) + + // Only log the warning severity or above. + logrus.SetLevel(logrus.DebugLevel) + + // Log pretty text + logrus.SetFormatter(&BrigLogFormatter{}) + // logrus.SetFormatter(&logrus.JSONFormatter{}) + + defer func() { + err := recover() + if err != nil { + logrus.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + logrus.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + logrus.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + logrus.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + logrus.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + logrus.Error("Stuff!") + + logrus.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/util/security/derivekey.go b/util/security/derivekey.go new file mode 100644 index 00000000..f0f15498 --- /dev/null +++ b/util/security/derivekey.go @@ -0,0 +1,35 @@ +// Package security implements utility function for often used +// security operations. At this very moment this includes: +// +// - Key derivation function using scrypt (DeriveAESKey) +package security + +import ( + "crypto/rand" + + "golang.org/x/crypto/scrypt" +) + +// Scrypt wraps scrypt.Key with the standard parameters. +func Scrypt(pwd, salt []byte, keyLen int) []byte { + // Parameters to be changed in future + // https://godoc.org/golang.org/x/crypto/scrypt + key, err := scrypt.Key(pwd, salt, 16384, 8, 1, keyLen) + if err != nil { + panic("Bad scrypt parameters: " + err.Error()) + } + + return key +} + +// DeriveAESKey generates an AES key (of keySize bytes) based on jid and password. +// It returns the key, the used salt and potentially an error. +func DeriveAESKey(jid, password string, keySize int) ([]byte, []byte, error) { + salt := make([]byte, keySize) + _, err := rand.Read(salt) + if err != nil { + return nil, nil, err + } + + return Scrypt([]byte(password), salt, keySize), salt, nil +} diff --git a/util/security/derivekey_test.go b/util/security/derivekey_test.go new file mode 100644 index 00000000..be985542 --- /dev/null +++ b/util/security/derivekey_test.go @@ -0,0 +1,11 @@ +package security + +import ( + "fmt" + "testing" +) + +func TestDerive(t *testing.T) { + key, salt, _ := DeriveAESKey("elch@jabber.nullcat.de", "Katznwald", 32) + fmt.Printf("Key: % x\nSalt: % x\n", key, salt) +} diff --git a/util/std.go b/util/std.go new file mode 100644 index 00000000..b6f2c768 --- /dev/null +++ b/util/std.go @@ -0,0 +1,72 @@ +// Package util implements small helper function that +// should be included in the stdlib in our opinion. +package util + +import ( + "io" + "os" + + log "github.com/Sirupsen/logrus" +) + +// Min returns the minimum of a and b. +func Min(a, b int) int { + if a < b { + return a + } + return b +} + +// Max returns the maximum of a and b. +func Max(a, b int) int { + if a < b { + return b + } + return a +} + +// Clamp limits x to the range [lo, hi] +func Clamp(x, lo, hi int) int { + return Max(lo, Min(x, hi)) +} + +// UMin returns the unsigned minimum of a and b +func UMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +// UMax returns the unsigned minimum of a and b +func UMax(a, b uint) uint { + if a < b { + return b + } + return a +} + +// UClamp limits x to the range [lo, hi] +func UClamp(x, lo, hi uint) uint { + return UMax(lo, UMin(x, hi)) +} + +// Closer closes c. If that fails, it will log the error. +// The intended usage is for convinient defer calls only! +// It gives only little knowledge about where the error is, +// but it's slightly better than a bare defer xyz.Close() +func Closer(c io.Closer) { + if err := c.Close(); err != nil { + log.Errorf("Error on close `%v`: %v", c, err) + } +} + +// Touch works like the unix touch(1) +func Touch(path string) error { + fd, err := os.Create(path) + if err != nil { + return err + } + + return fd.Close() +} diff --git a/util/testutil/testutil.go b/util/testutil/testutil.go new file mode 100644 index 00000000..8ab9c9f5 --- /dev/null +++ b/util/testutil/testutil.go @@ -0,0 +1,42 @@ +package testutil + +import "io/ioutil" + +func CreateDummyBuf(size int64) []byte { + buf := make([]byte, size) + + for i := int64(0); i < size; i++ { + // Be evil and stripe the data: + buf[i] = byte(i % 255) + } + + return buf +} + +func CreateFile(size int64) string { + fd, err := ioutil.TempFile("", "brig_test") + if err != nil { + panic("Cannot create temp file") + } + + defer fd.Close() + + blockSize := int64(1 * 1024 * 1024) + buf := CreateDummyBuf(blockSize) + + for size > 0 { + take := size + if size > int64(len(buf)) { + take = int64(len(buf)) + } + + _, err := fd.Write(buf[:take]) + if err != nil { + panic(err) + } + + size -= blockSize + } + + return fd.Name() +} diff --git a/util/trie/buildpath.go b/util/trie/buildpath.go new file mode 100644 index 00000000..28e47436 --- /dev/null +++ b/util/trie/buildpath.go @@ -0,0 +1,12 @@ +// +build !windows + +package trie + +import ( + "os" + "path/filepath" +) + +func buildPath(s []string) string { + return string(os.PathSeparator) + filepath.Join(s...) +} diff --git a/util/trie/buildpath_windows.go b/util/trie/buildpath_windows.go new file mode 100644 index 00000000..c9c3a5ff --- /dev/null +++ b/util/trie/buildpath_windows.go @@ -0,0 +1,9 @@ +package trie + +import ( + "path/filepath" +) + +func buildPath(s []string) string { + return filepath.Join(s...) +} diff --git a/util/trie/pathricia.go b/util/trie/pathricia.go new file mode 100644 index 00000000..eb3d8c88 --- /dev/null +++ b/util/trie/pathricia.go @@ -0,0 +1,233 @@ +// Package trie implements a general purpose Path-Trie. +package trie + +import ( + "os" + "strings" +) + +// Node represents a single node in a Trie, but it can be used as a whole +// (sub-)Trie through the Trie interface. A node value of `nil` is a perfectly +// valid trie. Node is suitable for embedding it into other structs. +type Node struct { + // Pointer to parent node or nil + Parent *Node + + // Basename to child-nodes + Children map[string]*Node + + // Basename of the node's Path + Name string + + // Number of explicitly added children of this node. + // (1 for leaf nodes) + Length int64 + + // Depth of the node. The root is at depth 0. + Depth uint16 +} + +// Trie represents the required methods for accessing a directory structure. +type Trie interface { + // Root returns the uppermost node of the trie. + Root() *Node + + // Insert adds a new node in the trie at string. If the node already exists, + // nothing changes. This operation costs O(log(n)). The newly created or + // existant node is returned. + Insert(path string) *Node + + // Lookup searches for a node references by a path. + Lookup(path string) *Node + + // Remove removes the node at path and all of it's children. + // The parent of the removed node is returned, which might be nil. + Remove() *Node + + // Len returns the current number of elements in the trie. + // This counts only explicitly inserted Nodes. + Len() int64 +} + +// SplitPath splits the path according to os.PathSeparator, +// but omits a leading empty name on /unix/paths +func SplitPath(path string) []string { + names := strings.Split(path, string(os.PathSeparator)) + if len(names) > 0 && names[0] == "" { + return names[1:] + } + + return names +} + +// NewTrie returns a trie with the root element pre-inserted. +// Note that `nil` is a perfectly valid, but empty trie. +func NewTrie() *Node { + return &Node{} +} + +// Root returns the root node of the trie. +func (n *Node) Root() *Node { + if n != nil && n.Parent != nil { + return n.Parent.Root() + } + return n +} + +// IsLeaf will return true if this node has no other children. +func (n *Node) IsLeaf() bool { + if n == nil { + return false + } + + return len(n.Children) == 0 +} + +// Insert adds a node into the trie at `path` +func (n *Node) Insert(path string) *Node { + curr := n + if curr == nil { + curr = NewTrie() + } + + wasAdded := false + + for _, name := range SplitPath(path) { + if curr.Children == nil { + curr.Children = make(map[string]*Node) + } + child, ok := curr.Children[name] + if !ok { + child = &Node{ + Parent: curr, + Name: name, + Depth: uint16(curr.Depth + 1), + } + curr.Children[name] = child + wasAdded = true + } + curr = child + } + + if wasAdded && curr != nil { + curr.Up(func(parent *Node) { + parent.Length++ + }) + } + return curr +} + +// Lookup searches a Node by it's absolute path. +// Returns nil if Node does not exist. +func (n *Node) Lookup(path string) *Node { + curr := n + if n == nil { + return nil + } + + if path == "/" { + return n.Root() + } + + for _, name := range SplitPath(path) { + child, ok := curr.Children[name] + if !ok { + return nil + } + curr = child + } + return curr +} + +// Remove removes the receiver and all of it's children. +// The removed node's parent is returned. +func (n *Node) Remove() *Node { + if n == nil { + return nil + } + + // Adjusts the parent's length: + length := n.Length + n.Up(func(parent *Node) { + parent.Length -= length + }) + + // Removes link to self: + if n.Parent != nil { + delete(n.Parent.Children, n.Name) + } + + // Make children garbage collectable: + parent := n.Parent + n.Walk(true, func(child *Node) { + child.Children = nil + child.Parent = nil + }) + return parent +} + +// Walk iterates over all (including intermediate )nodes in the trie. +// Depending on dfs the nodes are visited in depth-first or breadth-first. +// The supplied callback is called for each visited node. +func (n *Node) Walk(dfs bool, visit func(*Node)) { + if n == nil { + return + } + + if !dfs { + visit(n) + } + + if n.Children != nil { + for _, child := range n.Children { + child.Walk(dfs, visit) + } + } + + if dfs { + visit(n) + } +} + +// Up walks from the receiving node to the root node, +// calling `visit` on each node on it's way. +func (n *Node) Up(visit func(*Node)) { + if n != nil { + visit(n) + n.Parent.Up(visit) + } +} + +// Len returns the number of explicitly inserted elements in the trie. +func (n *Node) Len() int64 { + if n == nil { + return 0 + } + return n.Length +} + +// Path returns a full absolute path from the receiver +// to the root node of the trie. +func (n *Node) Path() string { + if n == nil { + return "" + } + + s := make([]string, n.Depth+2) + i := len(s) - 1 + + n.Up(func(parent *Node) { + s[i] = parent.Name + i-- + }) + + return buildPath(s) +} + +// String returns the absolute path of the node. +func (n *Node) String() string { + if n == nil { + return "" + } + return n.Path() +} diff --git a/util/trie/pathricia_test.go b/util/trie/pathricia_test.go new file mode 100644 index 00000000..57c53f01 --- /dev/null +++ b/util/trie/pathricia_test.go @@ -0,0 +1,134 @@ +package trie + +import "testing" + +func TestPathriciaInsertTrieLinux(t *testing.T) { + tests := []struct { + input string + name string + path string + length int64 + }{ + //Insert path | expected node name | expected path | trie.Len() + {"", "", "/", 0}, + {"\\", "\\", "/\\", 1}, + {"a", "a", "/a", 2}, + {"a/b", "b", "/a/b", 3}, + {"home", "home", "/home", 4}, + {"sahib", "sahib", "/sahib", 5}, + {"home/qitta", "qitta", "/home/qitta", 6}, + {" ", " ", "/ ", 7}, + } + + trie := NewTrie() + for _, test := range tests { + // Inserting at the root node. + node := trie.Insert(test.input) + if node == nil { + t.Errorf("Node is nil: %v", test) + continue + } + + nodeLen := node.Root().Len() + if nodeLen != test.length { + t.Errorf("Length differs, got: %d != expected: %d", nodeLen, test.length) + } + + if node.Name != test.name { + t.Errorf("Name differs, got: %s != expected: %s", node.Name, test.name) + } + + if node.Path() != test.path { + t.Errorf("Path differs, got: %s != expected: %s", node.Path(), test.path) + } + } +} + +func TestPathriciaInsertRelativeLinux(t *testing.T) { + tests := []struct { + input string + name string + path string + length int64 + }{ + //Insert path | expected node name | expected path | trie.Len() + {"", "", "/", 0}, + {"/", "", "/", 1}, + {"a", "a", "/a", 2}, + {"b", "b", "/a/b", 3}, + {"c", "c", "/a/b/c", 4}, + {"c/de/fe", "fe", "/a/b/c/c/de/fe", 5}, + {"c/de/fe/333", "333", "/a/b/c/c/de/fe/c/de/fe/333", 6}, + } + + trie := NewTrie() + node := trie.Root() + for _, test := range tests { + // Inserting at always at the returned node. + node = node.Insert(test.input) + if node == nil { + t.Errorf("Node is nil: %v", test) + continue + } + + // Check the explicitly added paths. + nodeLen := trie.Length + if nodeLen != test.length { + t.Errorf("Length differs, got: %d != expected: %d\n", nodeLen, test.length) + } + + if node.Name != test.name { + t.Errorf("Name differs, got: %s != expected: %s\n", node.Name, test.name) + } + + if node.Path() != test.path { + t.Errorf("Path differs, got: %s != expected: %s\n", node.Path(), test.path) + } + } + +} + +func TestPathriciaRemoveLinux(t *testing.T) { + paths := []string{ + "home/qitta", + "/sahib", + "/eule", + "home/eule", + "katze/eule", + "elch/eule", + "elch/eule/meow", + } + + trie := NewTrie() + for _, path := range paths { + trie.Insert(path) + } + + tests := []struct { + path string + length int64 + name string + }{ + {"/home", 5, ""}, + {"/katze/Eule", 5, ""}, + {"/katze/eule", 4, "katze"}, + {"/elch/eule/meow", 3, "eule"}, + {"/", 0, ""}, + {"/", 0, ""}, + } + + for _, test := range tests { + node := trie.Lookup(test.path).Remove() + if node == nil { + continue + } + + if node.Name != test.name { + t.Errorf("\nRemoving: [%s]\nName differs, got: %s != expected: %s\n", test.path, node.Name, test.name) + } + + if trie.Length != test.length { + t.Errorf("Length differs, got: %d != expected: %d\n", trie.Length, test.length) + } + } +} diff --git a/util/tunnel/tunnel.go b/util/tunnel/tunnel.go new file mode 100644 index 00000000..f2dfd229 --- /dev/null +++ b/util/tunnel/tunnel.go @@ -0,0 +1,121 @@ +// Package tunnel implements an io.ReadWriter that encrypts it's data. +// Technically it performs a Elliptic Curve Diffie Hellman key exchange +// before the first read or write (or triggered manually using Exchange()) +// +// All communication over the tunnel is encrypted with AES using CFB mode. +package tunnel + +import ( + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/elliptic" + "crypto/rand" + "fmt" + "io" + + "github.com/disorganizer/brig/util/security" + "github.com/tang0th/go-ecdh" +) + +type ecdhTunnel struct { + // Underlying ReadWriter + ReadWriter io.ReadWriter + + // Elliptic Curve Diffie Hellman state and keys: + ecdh ecdh.ECDH + privKey crypto.PrivateKey + pubKey crypto.PublicKey + + // CFB streaming ciphers for Read()/Write(): + streamW *cipher.StreamWriter + streamR *cipher.StreamReader +} + +// NewEllipticTunnel creates an io.ReadWriter that transparently encrypts all data. +func NewEllipticTunnel(rw io.ReadWriter) (io.ReadWriter, error) { + // TODO: Find safe elliptic curve + return newEllipticTunnelWithCurve(rw, elliptic.P521()) +} + +func newEllipticTunnelWithCurve(rw io.ReadWriter, curve elliptic.Curve) (io.ReadWriter, error) { + tnl := &ecdhTunnel{ + ReadWriter: rw, + ecdh: ecdh.NewEllipticECDH(curve), + } + + var err error + tnl.privKey, tnl.pubKey, err = tnl.ecdh.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + + return tnl, nil +} + +// Exchange triggers the Diffie Hellman key exchange manually. +func (tnl *ecdhTunnel) Exchange() error { + if tnl.streamW != nil || tnl.streamR != nil { + return nil + } + + pubKeyBuf := tnl.ecdh.Marshal(tnl.pubKey) + if _, err := tnl.ReadWriter.Write(pubKeyBuf); err != nil { + return err + } + + partnerBuf := make([]byte, len(pubKeyBuf)) + if _, err := tnl.ReadWriter.Read(partnerBuf); err != nil { + return err + } + + partnerKey, ok := tnl.ecdh.Unmarshal(partnerBuf) + if !ok { + return fmt.Errorf("Partner key unmarshal failed") + } + + secret, err := tnl.ecdh.GenerateSharedSecret(tnl.privKey, partnerKey) + if err != nil { + return err + } + + // Transform the secret to a usable 32 byte key: + key := security.Scrypt(secret, secret[:16], 32) + inv := security.Scrypt(secret, secret[16:], aes.BlockSize) + + blockCipher, err := aes.NewCipher(key) + if err != nil { + return err + } + + tnl.streamW = &cipher.StreamWriter{ + S: cipher.NewCFBEncrypter(blockCipher, inv), + W: tnl.ReadWriter, + } + tnl.streamR = &cipher.StreamReader{ + S: cipher.NewCFBDecrypter(blockCipher, inv), + R: tnl.ReadWriter, + } + return nil +} + +// Read decrypts underlying data using CFB and will trigger a key exchange +// if this was not done yet for this session. +func (tnl *ecdhTunnel) Read(buf []byte) (int, error) { + if err := tnl.Exchange(); err != nil { + return 0, err + } + + return tnl.streamR.Read(buf) +} + +// Write encrypts incoming data using CFB and will trigger a key exchange +// if this was not done yet for this session. +func (tnl *ecdhTunnel) Write(buf []byte) (int, error) { + if err := tnl.Exchange(); err != nil { + return 0, err + } + + n, e := tnl.streamW.Write(buf) + return n, e +} diff --git a/util/tunnel/tunnel_test.go b/util/tunnel/tunnel_test.go new file mode 100644 index 00000000..98455565 --- /dev/null +++ b/util/tunnel/tunnel_test.go @@ -0,0 +1,29 @@ +package tunnel + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" +) + +func TestTunnel(t *testing.T) { + m := &bytes.Buffer{} + + ta, err := NewEllipticTunnel(m) + if err != nil { + panic(err) + } + + fmt.Println(ta.Write([]byte("Hello"))) + fmt.Println(m) + fmt.Println(ta.Write([]byte("World"))) + fmt.Println(m) + + o, _ := ioutil.ReadAll(ta) + + fmt.Println(string(o)) + if string(o) != "HelloWorld" { + panic(o) + } +} diff --git a/version.go b/version.go new file mode 100644 index 00000000..955c4776 --- /dev/null +++ b/version.go @@ -0,0 +1,24 @@ +package brig + +import "fmt" + +const ( + // Major will be incremented on big releases. + Major = 0 + // Minor will be incremented on small releases. + Minor = 0 + // Patch should be incremented on every released change. + Patch = 0 + // PreRelease is an empty string for final releases, {alpha,beta} for pre-releases. + PreRelease = "beta" +) + +// Version returns a tuple of (major, minor, patch) +func Version() (int, int, int) { + return Major, Minor, Patch +} + +// VersionString returns a Maj.Min.Patch string. +func VersionString() string { + return fmt.Sprintf("v%d.%d.%d-%s", Major, Minor, Patch, PreRelease) +}