diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/binhex.py b/.gitmodules
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/binhex.py
rename to .gitmodules
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..287a87a4
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 EcmaXp
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/LICENSE-OpenComputers b/LICENSE-OpenComputers
new file mode 100644
index 00000000..0f19248a
--- /dev/null
+++ b/LICENSE-OpenComputers
@@ -0,0 +1,46 @@
+Copyright (c) 2013-2015 Florian "Sangar" Nücke
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+-------------------------------------------------------------------------------
+
+All images / textures and localization strings (resources) are put in the
+public domain, unless explicitly excluded below. More specicially, see CC0 1.0
+Universal:
+
+        http://creativecommons.org/publicdomain/zero/1.0/
+
+Contributions:
+  PixelToast - Capacitor textures.
+  asie - Disk drive inject/eject and floppy disk access sound samples.
+
+Thanks a lot!
+
+-------------------------------------------------------------------------------
+
+The font used for screens and for monospace text in manual is unscii, made by
+viznut, and was further expanded by asie. For more information, please see:
+
+        https://github.com/asiekierka/unscii-asie
+
+-------------------------------------------------------------------------------
+
+Assets from other sources:
+  HDD access samples based on this sample from freesound.org:
+    https://www.freesound.org/people/artykris/sounds/117401/
\ No newline at end of file
diff --git a/LICENSE-micropython b/LICENSE-micropython
new file mode 100644
index 00000000..e3474e33
--- /dev/null
+++ b/LICENSE-micropython
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013, 2014 Damien P. George
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/LICENSE-micropython-lib b/LICENSE-micropython-lib
new file mode 100644
index 00000000..87095e29
--- /dev/null
+++ b/LICENSE-micropython-lib
@@ -0,0 +1,287 @@
+micropython-lib consists of multiple modules from different sources and
+authors. Each module comes under its own licensing terms. Short name of
+a license can be found in a file within a module directory (usually
+metadata.txt or setup.py). Complete text of each license used is provided
+below. Files not belonging to a particular module a provided under MIT
+license, unless explicitly stated otherwise.
+
+=============== MIT License ===============
+
+The MIT License (MIT)
+
+Copyright (c) 2013, 2014 micropython-lib contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+=============== Python License ===============
+
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC.  Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team.  In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com).  In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property.  Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition).  Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+    Release         Derived     Year        Owner       GPL-
+                    from                                compatible? (1)
+
+    0.9.0 thru 1.2              1991-1995   CWI         yes
+    1.3 thru 1.5.2  1.2         1995-1999   CNRI        yes
+    1.6             1.5.2       2000        CNRI        no
+    2.0             1.6         2000        BeOpen.com  no
+    1.6.1           1.6         2001        CNRI        yes (2)
+    2.1             2.0+1.6.1   2001        PSF         no
+    2.0.1           2.0+1.6.1   2001        PSF         yes
+    2.1.1           2.1+2.0.1   2001        PSF         yes
+    2.1.2           2.1.1       2002        PSF         yes
+    2.1.3           2.1.2       2002        PSF         yes
+    2.2 and above   2.1.1       2001-now    PSF         yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+    the GPL.  All Python licenses, unlike the GPL, let you distribute
+    a modified version without making your changes open source.  The
+    GPL-compatible licenses make it possible to combine Python with
+    other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+    because its license has a choice of law clause.  According to
+    CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+    is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013 Python Software Foundation; All Rights Reserved" are retained
+in Python alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis.  PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee.  This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis.  BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions.  Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee.  This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party.  As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee.  Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement.  This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013.  This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis.  CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED.  BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement.  Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee.  This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+        ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands.  All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/LICENSE-mpack b/LICENSE-mpack
new file mode 100644
index 00000000..24654b2c
--- /dev/null
+++ b/LICENSE-mpack
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2016 Nicholas Fraser
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/LICENSE-msgpack-java b/LICENSE-msgpack-java
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/LICENSE-msgpack-java
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/LICENSE-musl b/LICENSE-musl
new file mode 100644
index 00000000..8c3a6d19
--- /dev/null
+++ b/LICENSE-musl
@@ -0,0 +1,180 @@
+musl as a whole is licensed under the following standard MIT license:
+
+----------------------------------------------------------------------
+Copyright © 2005-2014 Rich Felker, et al.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+----------------------------------------------------------------------
+
+Authors/contributors include:
+
+A. Wilcox
+Alex Dowad
+Alexander Monakov
+Andrew Kelley
+Anthony G. Basile
+Arvid Picciani
+Bartosz Brachaczek
+Bobby Bingham
+Boris Brezillon
+Brent Cook
+Chris Spiegel
+Clément Vasseur
+Daniel Micay
+Daniel Sabogal
+Daurnimator
+David Edelsohn
+Denys Vlasenko
+Dmitry Ivanov
+Dmitry V. Levin
+Emil Renner Berthing
+Felix Fietkau
+Felix Janda
+Gianluca Anzolin
+Hauke Mehrtens
+He X
+Hiltjo Posthuma
+Isaac Dunham
+Jaydeep Patil
+Jens Gustedt
+Jeremy Huntwork
+Jo-Philipp Wich
+Joakim Sindholt
+John Spencer
+Josiah Worcester
+Julien Ramseier
+Justin Cormack
+Khem Raj
+Kylie McClain
+Leah Neukirchen
+Luca Barbato
+Luka Perkov
+M Farkas-Dyck (Strake)
+Mahesh Bodapati
+Masanori Ogino
+Michael Forney
+Mikhail Kremnyov
+Natanael Copa
+Nicholas J. Kain
+orc
+Pascal Cuoq
+Petr Hosek
+Petr Skocik
+Pierre Carrier
+Reini Urban
+Rich Felker
+Richard Pennington
+Samuel Holland
+Shiz
+sin
+Solar Designer
+Stefan Kristiansson
+Szabolcs Nagy
+Timo Teräs
+Trutz Behn
+Valentin Ochs
+William Haddon
+William Pitcock
+
+Portions of this software are derived from third-party works licensed
+under terms compatible with the above MIT license:
+
+The TRE regular expression implementation (src/regex/reg* and
+src/regex/tre*) is Copyright © 2001-2008 Ville Laurikari and licensed
+under a 2-clause BSD license (license text in the source files). The
+included version has been heavily modified by Rich Felker in 2012, in
+the interests of size, simplicity, and namespace cleanliness.
+
+Much of the math library code (src/math/* and src/complex/*) is
+Copyright © 1993,2004 Sun Microsystems or
+Copyright © 2003-2011 David Schultz or
+Copyright © 2003-2009 Steven G. Kargl or
+Copyright © 2003-2009 Bruce D. Evans or
+Copyright © 2008 Stephen L. Moshier
+and labelled as such in comments in the individual source files. All
+have been licensed under extremely permissive terms.
+
+The ARM memcpy code (src/string/arm/memcpy_el.S) is Copyright © 2008
+The Android Open Source Project and is licensed under a two-clause BSD
+license. It was taken from Bionic libc, used on Android.
+
+The implementation of DES for crypt (src/crypt/crypt_des.c) is
+Copyright © 1994 David Burren. It is licensed under a BSD license.
+
+The implementation of blowfish crypt (src/crypt/crypt_blowfish.c) was
+originally written by Solar Designer and placed into the public
+domain. The code also comes with a fallback permissive license for use
+in jurisdictions that may not recognize the public domain.
+
+The smoothsort implementation (src/stdlib/qsort.c) is Copyright © 2011
+Valentin Ochs and is licensed under an MIT-style license.
+
+The BSD PRNG implementation (src/prng/random.c) and XSI search API
+(src/search/*.c) functions are Copyright © 2011 Szabolcs Nagy and
+licensed under following terms: "Permission to use, copy, modify,
+and/or distribute this code for any purpose with or without fee is
+hereby granted. There is no warranty."
+
+The x86_64 port was written by Nicholas J. Kain and is licensed under
+the standard MIT terms.
+
+The mips and microblaze ports were originally written by Richard
+Pennington for use in the ellcc project. The original code was adapted
+by Rich Felker for build system and code conventions during upstream
+integration. It is licensed under the standard MIT terms.
+
+The mips64 port was contributed by Imagination Technologies and is
+licensed under the standard MIT terms.
+
+The powerpc port was also originally written by Richard Pennington,
+and later supplemented and integrated by John Spencer. It is licensed
+under the standard MIT terms.
+
+All other files which have no copyright comments are original works
+produced specifically for use as part of this library, written either
+by Rich Felker, the main author of the library, or by one or more
+contibutors listed above. Details on authorship of individual files
+can be found in the git version control history of the project. The
+omission of copyright and license comments in each file is in the
+interest of source tree size.
+
+In addition, permission is hereby granted for all public header files
+(include/* and arch/*/bits/*) and crt files intended to be linked into
+applications (crt/*, ldso/dlstart.c, and arch/*/crt_arch.h) to omit
+the copyright notice and permission notice otherwise required by the
+license, and to use these files without any requirement of
+attribution. These files include substantial contributions from:
+
+Bobby Bingham
+John Spencer
+Nicholas J. Kain
+Rich Felker
+Richard Pennington
+Stefan Kristiansson
+Szabolcs Nagy
+
+all of whom have explicitly granted such permission.
+
+This file previously contained text expressing a belief that most of
+the files covered by the above exception were sufficiently trivial not
+to be subject to copyright, resulting in confusion over whether it
+negated the permissions granted in the license. In the spirit of
+permissive licensing, and of not having licensing issues being an
+obstacle to adoption, that text has been removed.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..24a65c55
--- /dev/null
+++ b/README.md
@@ -0,0 +1,35 @@
+# OpenPython
+
+![Python Interpreter in OpenPython v1.0](https://user-images.githubusercontent.com/21021916/47162981-0163aa80-d330-11e8-9e54-7b470a5e67b0.png)
+
+**OpenPython makes micropython available on OpenComputers.**
+
+micropython is Python for embedded devices and is different from CPython.
+Currently, micropython is partially compatible with Python version 3.4 as of October 2018.
+
+Features include:
+- Fully persistable.
+- Runs in a sandboxed environment and does not require a separate native DLL.
+- Supports virtual file system.
+- It can call other components and supports Lua architecture's UserData.
+
+Limitations include:
+- The firmware is 256 KB and the memory limit has the same limit as the Lua architecture, but the stack is allocated in a separate memory.
+- Most modules are difficult to use because the battery is partially included.
+- The operating system is not yet fully implemented, so the Python interpreter now runs.
+- Ctrl + C is not supported, so if you accidentally run infinite repeated code, the only way to stop it is to turn the computer off and then on.
+
+To use the Python architecture, you need the following items:
+- EEPROM (OpenPython): I'm sorry, but you have to bring items from the Creative tab.
+- Floppy Disk (OpenPython OS): This item can be obtained by creating a floppy disk and Scrench, or you can check it on the Creative tab.
+- OpenPython CPU: You can choose OpenPython architecture by holding Shift + Right Click on the Lua architecture CPU.
+
+It is still unstable, but we plan to make further improvements in the future.
+
+I need help from people who are familiar with OpenComputers and Python.
+
+Thank you for playing.
+
+Links
+- [minecraft.curseforge.com/projects/openpython](https://minecraft.curseforge.com/projects/openpython)
+- [OpenPython in oc.cil.li](https://oc.cil.li/index.php?/topic/1744-mc1122oc17-openpython%C2%A0micropython-available-on-opencomputers/)
diff --git a/build.gradle b/build.gradle
index a3408132..b328db7f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1,5 +1,5 @@
 buildscript {
-    ext.kotlin_version = '1.2.70'
+    ext.kotlin_version = '1.3.10'
     repositories {
         jcenter()
         maven {
diff --git a/gradle.properties b/gradle.properties
index c9a8a817..48502f6d 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,14 +1,16 @@
 # forge_gradle_version = 2.3-SNAPSHOT
 java_version = 1.8
-forgelin_version = 1.7.3
-# kotlin_version = 1.2.70
+forgelin_version = 1.8.2
+kotlin_version = 1.3.10
 
 minecraft_version=1.12.2
 minecraft_mappings=snapshot_20180704
 forge_version=14.23.4.2727
 
 mod_name=OpenPython
-mod_version=1.0.1
+mod_version=1.1.0
 mod_group=kr.pe.ecmaxp.openpython
 
 opencomputers_version = 1.7
+
+org.gradle.jvmargs=-Xmx2048m
diff --git a/micropython b/micropython
deleted file mode 160000
index 8921adde..00000000
--- a/micropython
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 8921adde620b3d93442269bf103f4b705a5a3129
diff --git a/micropython-lib b/micropython-lib
deleted file mode 160000
index f20d89c6..00000000
--- a/micropython-lib
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit f20d89c6aad9443a696561ca2a01f7ef0c8fb302
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/OpenPython.kt b/src/main/java/kr/pe/ecmaxp/openpython/OpenPython.kt
index 563dffa2..5572baab 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/OpenPython.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/OpenPython.kt
@@ -1,8 +1,8 @@
 package kr.pe.ecmaxp.openpython
 
 import kr.pe.ecmaxp.openpython.arch.OpenComputersLikeSaveHandler
-import kr.pe.ecmaxp.openpython.arch.OpenPythonFirmware
-import kr.pe.ecmaxp.openpython.arch.versions.OpenPythonArchitecture_v1_0
+import kr.pe.ecmaxp.openpython.arch.versions.v1.OpenPythonArchitectureV1_0
+import kr.pe.ecmaxp.openpython.arch.versions.v1.OpenPythonArchitectureV1_1
 import li.cil.oc.api.FileSystem
 import li.cil.oc.api.Items
 import li.cil.oc.api.Machine
@@ -23,12 +23,14 @@ import net.minecraftforge.fml.common.event.FMLPreInitializationEvent
 object OpenPython {
     const val MODID = "openpython"
     const val NAME = "OpenPython"
-    const val VERSION = "1.0.1"
+    const val VERSION = "1.1.0"
+    val DEBUG = true
 
     @Mod.EventHandler
     fun preInit(event: FMLPreInitializationEvent) {
         MinecraftForge.EVENT_BUS.register(OpenComputersLikeSaveHandler)
-        Machine.add(OpenPythonArchitecture_v1_0::class.java)
+        Machine.add(OpenPythonArchitectureV1_0::class.java)
+        Machine.add(OpenPythonArchitectureV1_1::class.java)
     }
 
     @Mod.EventHandler
@@ -36,13 +38,27 @@ object OpenPython {
         Items.registerFloppy(
                 "openpython",
                 EnumDyeColor.BLUE,
-                { FileSystem.fromClass(this.javaClass, OpenPython.MODID, "opos") },
-                true
-        ).setStackDisplayName("OpenPython OS (Operating System for micropython)")
+                { FileSystem.fromClass(this.javaClass, OpenPython.MODID, "opos/v1.0") },
+                false
+        ).setStackDisplayName("[Deprecated] OpenPython OS v1.0 for OpenPython v1.0")
+
+        Items.registerFloppy(
+                "openpy v1.1",
+                EnumDyeColor.BLUE,
+                { FileSystem.fromClass(this.javaClass, OpenPython.MODID, "opos/v1.1") },
+                false
+        ).setStackDisplayName("OpenPython OS v1.1 for OpenPython v1.1")
+
+        Items.registerEEPROM(
+                "EEPROM for OpenPython v1.0",
+                OpenPythonArchitectureV1_0.LATEST_FIRMWARE.loadEEPROM(),
+                byteArrayOf(),
+                false
+        )
 
         Items.registerEEPROM(
-                "EEPROM (OpenPython BIOS)",
-                OpenPythonFirmware.v1_0_1.loadEEPROM(),
+                "[Deprecated] EEPROM for OpenPython v1.1",
+                OpenPythonArchitectureV1_1.LATEST_FIRMWARE.loadEEPROM(),
                 byteArrayOf(),
                 false
         )
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonArchitectureLogic.kt b/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonArchitectureLogic.kt
new file mode 100644
index 00000000..50fe7939
--- /dev/null
+++ b/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonArchitectureLogic.kt
@@ -0,0 +1,9 @@
+package kr.pe.ecmaxp.openpython
+
+import kr.pe.ecmaxp.openpython.arch.OpenPythonFirmware
+import li.cil.oc.api.machine.Machine
+
+interface OpenPythonArchitectureLogic {
+    val LATEST_FIRMWARE: OpenPythonFirmware
+    fun spawn(machine: Machine, memorySize: Int): OpenPythonVirtualMachine
+}
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonVirtualMachine.kt b/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonVirtualMachine.kt
new file mode 100644
index 00000000..86855b94
--- /dev/null
+++ b/src/main/java/kr/pe/ecmaxp/openpython/OpenPythonVirtualMachine.kt
@@ -0,0 +1,17 @@
+package kr.pe.ecmaxp.openpython
+
+import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessagePacker
+import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessageUnpacker
+import li.cil.oc.api.Persistable
+import li.cil.oc.api.machine.ExecutionResult
+import li.cil.oc.api.machine.Value
+
+interface OpenPythonVirtualMachine : Persistable {
+    val memorySize: Int
+
+    fun close()
+    fun step(synchronized: Boolean): ExecutionResult
+
+    fun unpackExtension(unpacker: MessageUnpacker): Any?
+    fun packValue(packer: MessagePacker, value: Value)
+}
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonArchitecture.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonArchitecture.kt
index 2bcd8e64..4b1f885f 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonArchitecture.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonArchitecture.kt
@@ -1,5 +1,7 @@
 package kr.pe.ecmaxp.openpython.arch
 
+import kr.pe.ecmaxp.openpython.OpenPythonArchitectureLogic
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
 import kr.pe.ecmaxp.openpython.arch.consts.KB
 import li.cil.oc.api.Driver
 import li.cil.oc.api.driver.item.Memory
@@ -10,7 +12,7 @@ import net.minecraft.item.ItemStack
 import net.minecraft.nbt.NBTTagCompound
 
 
-open class OpenPythonArchitecture(val machine: Machine) : Architecture {
+open class OpenPythonArchitecture(val machine: Machine, val logic: OpenPythonArchitectureLogic) : Architecture {
     var totalMemory = 0
     var vm: OpenPythonVirtualMachine? = null
     var lastSynchronizedResult: ExecutionResult? = null
@@ -23,9 +25,8 @@ open class OpenPythonArchitecture(val machine: Machine) : Architecture {
         close()
 
         try {
-            val firmware = OpenPythonFirmware.v1_0_1
             recomputeMemory(machine.host().internalComponents())
-            vm = OpenPythonVirtualMachine(machine, totalMemory, firmware)
+            vm = logic.spawn(machine, totalMemory)
         } catch (e: Exception) {
             e.printStackTrace()
         }
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonFirmware.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonFirmware.kt
index 4d16557c..a3f902e6 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonFirmware.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonFirmware.kt
@@ -10,10 +10,6 @@ class OpenPythonFirmware(val name: String) {
     val protocol = 1
     private val path: String = "/assets/${OpenPython.MODID}/firmwares/$name"
 
-    companion object {
-        val v1_0_1 = OpenPythonFirmware("v1.0.1")
-    }
-
     init {
         if (name.indexOf('/') >= 0)
             throw Exception("Invalid Filename")
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/Msgpack.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/Msgpack.kt
index 5cef0147..fd9c1dcc 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/Msgpack.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/Msgpack.kt
@@ -1,6 +1,6 @@
 package kr.pe.ecmaxp.openpython.arch.msgpack
 
-import kr.pe.ecmaxp.openpython.arch.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
 
 class Msgpack(val vm: OpenPythonVirtualMachine? = null) {
     fun loads(buffer: ByteArray): Any? {
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackPacker.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackPacker.kt
index 759986de..50dececa 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackPacker.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackPacker.kt
@@ -1,6 +1,6 @@
 package kr.pe.ecmaxp.openpython.arch.msgpack
 
-import kr.pe.ecmaxp.openpython.arch.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
 import li.cil.oc.api.machine.Signal
 import li.cil.oc.api.machine.Value
 import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessagePack
@@ -70,11 +70,7 @@ class MsgpackPacker(val vm: OpenPythonVirtualMachine? = null) {
                     packString(o.toString())
                 }
                 is Value -> {
-                    val packer = MsgpackPacker(vm)
-                    packer.pack(vm!!.state.valueMap.register(o).id)
-                    val buffer = packer.toByteArray()
-                    packExtensionTypeHeader(1, buffer.size)
-                    writePayload(buffer)
+                    vm!!.packValue(this, o)
                 }
                 else -> {
                     throw Exception("mismatch type ${o.javaClass} => $o")
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackUnpacker.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackUnpacker.kt
index f24d5fab..ab8825ad 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackUnpacker.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/msgpack/MsgpackUnpacker.kt
@@ -1,6 +1,6 @@
 package kr.pe.ecmaxp.openpython.arch.msgpack
 
-import kr.pe.ecmaxp.openpython.arch.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
 import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessagePack
 import kr.pe.ecmaxp.openpython.repack.org.msgpack.value.ValueType
 
@@ -41,16 +41,7 @@ class MsgpackUnpacker(buffer: ByteArray, val vm: OpenPythonVirtualMachine? = nul
                     map
                 }
                 ValueType.EXTENSION -> {
-                    val ext = unpackExtensionTypeHeader()
-                    val payload = readPayload(ext.length)
-                    when (ext.type.toInt() and 0xFF) {
-                        1 -> {
-                            val unpacker = MsgpackUnpacker(payload, vm)
-                            val pointer = unpacker.unpack() as Int
-                            vm!!.state.valueMap[pointer]
-                        }
-                        else -> TODO()
-                    }
+                    vm!!.unpackExtension(this)
                 }
                 else -> throw Exception()
             }
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/call/ArgumentsImpl.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/call/ArgumentsImpl.kt
index f0b85ef2..ae1d533b 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/call/ArgumentsImpl.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/call/ArgumentsImpl.kt
@@ -41,14 +41,14 @@ class ArgumentsImpl(val args: MutableList<Any?>) : Arguments {
         return if (value != null && value is T) value else default
     }
 
-    override fun optAny(index: Int, default: Any?): Any? = optValue(index, "hello", default)
-    override fun optBoolean(index: Int, default: Boolean): Boolean = optValue(index, "hello", default)
-    override fun optInteger(index: Int, default: Int): Int = optValue(index, "hello", default)
-    override fun optDouble(index: Int, default: Double): Double = optValue(index, "hello", default)
-    override fun optString(index: Int, default: String): String = optValue(index, "hello", default)
-    override fun optByteArray(index: Int, default: ByteArray): ByteArray = optValue(index, "hello", default)
-    override fun optTable(index: Int, default: MutableMap<Any?, Any?>): MutableMap<Any?, Any?> = optValue(index, "hello", default)
-    override fun optItemStack(index: Int, default: ItemStack): ItemStack = optValue(index, "hello", default)
+    override fun optAny(index: Int, default: Any?): Any? = optValue(index, "value", default)
+    override fun optBoolean(index: Int, default: Boolean): Boolean = optValue(index, "boolean", default)
+    override fun optInteger(index: Int, default: Int): Int = optValue(index, "number", default)
+    override fun optDouble(index: Int, default: Double): Double = optValue(index, "number", default)
+    override fun optString(index: Int, default: String): String = optValue(index, "string", default)
+    override fun optByteArray(index: Int, default: ByteArray): ByteArray = optValue(index, "string", default)
+    override fun optTable(index: Int, default: MutableMap<Any?, Any?>): MutableMap<Any?, Any?> = optValue(index, "table", default)
+    override fun optItemStack(index: Int, default: ItemStack): ItemStack = optValue(index, "itemstack", default)
 
 
     private fun safeValue(index: Int): Any? = if (0 < index && index < args.size) null else args[index]
@@ -86,7 +86,6 @@ class ArgumentsImpl(val args: MutableList<Any?>) : Arguments {
         val value = safeValue(index)
         return when (value) {
             is String -> true
-            is ByteArray -> true
             else -> false
         }
     }
@@ -94,7 +93,6 @@ class ArgumentsImpl(val args: MutableList<Any?>) : Arguments {
     override fun isByteArray(index: Int): Boolean {
         val value = safeValue(index)
         return when (value) {
-            is String -> true
             is ByteArray -> true
             else -> false
         }
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/Interrupt.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/Interrupt.kt
new file mode 100644
index 00000000..c4684eae
--- /dev/null
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/Interrupt.kt
@@ -0,0 +1,19 @@
+package kr.pe.ecmaxp.openpython.arch.types.interrupt
+
+import kr.pe.ecmaxp.openpython.arch.types.call.InvokeResult
+
+interface Interrupt {
+    fun readBuffer(): ByteArray
+    fun readBuffer(address: Int, size: Int): ByteArray
+
+    fun readString(): String?
+    fun readString(address: Int, maxSize: Int): String
+
+    fun readObject(): Any?
+
+    fun responseNone(): Int
+    fun responseResult(ret: InvokeResult): Int
+    fun responseValue(value: Any?): Int
+    fun responseError(value: Throwable): Int
+    fun responseBuffer(buffer: ByteArray): Int
+}
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/InterruptHandler.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/InterruptHandler.kt
new file mode 100644
index 00000000..1130179b
--- /dev/null
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/types/interrupt/InterruptHandler.kt
@@ -0,0 +1,5 @@
+package kr.pe.ecmaxp.openpython.arch.types.interrupt
+
+interface InterruptHandler {
+    operator fun invoke(interrupt: Interrupt, synchronized: Boolean)
+}
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/OpenPythonArchitecture_v1_0.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/OpenPythonArchitecture_v1_0.kt
deleted file mode 100644
index 01c2b476..00000000
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/OpenPythonArchitecture_v1_0.kt
+++ /dev/null
@@ -1,10 +0,0 @@
-package kr.pe.ecmaxp.openpython.arch.versions
-
-import kr.pe.ecmaxp.openpython.arch.OpenPythonArchitecture
-import li.cil.oc.api.machine.Architecture
-import li.cil.oc.api.machine.Machine
-
-
-@Suppress("unused", "ClassName")
-@Architecture.Name("OpenPython v1.0")
-class OpenPythonArchitecture_v1_0(machine: Machine) : OpenPythonArchitecture(machine)
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonArchitectureV1.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonArchitectureV1.kt
new file mode 100644
index 00000000..851154fc
--- /dev/null
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonArchitectureV1.kt
@@ -0,0 +1,36 @@
+package kr.pe.ecmaxp.openpython.arch.versions.v1
+
+import kr.pe.ecmaxp.openpython.OpenPythonArchitectureLogic
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.arch.OpenPythonArchitecture
+import kr.pe.ecmaxp.openpython.arch.OpenPythonFirmware
+import li.cil.oc.api.machine.Architecture
+import li.cil.oc.api.machine.Machine
+
+
+@Suppress("unused", "ClassName")
+@Architecture.Name("OpenPython v1.0")
+class OpenPythonArchitectureV1_0(machine: Machine) : OpenPythonArchitecture(machine, this) {
+    companion object : OpenPythonArchitectureLogic {
+        val FIRMWARE_v1_0_1: OpenPythonFirmware = OpenPythonFirmware("v1.0.1")
+        override val LATEST_FIRMWARE: OpenPythonFirmware = FIRMWARE_v1_0_1
+
+        override fun spawn(machine: Machine, memorySize: Int): OpenPythonVirtualMachine {
+            return OpenPythonVirtualMachineV1(machine, memorySize, LATEST_FIRMWARE)
+        }
+    }
+}
+
+
+@Suppress("unused", "ClassName")
+@Architecture.Name("OpenPython v1.1")
+class OpenPythonArchitectureV1_1(machine: Machine) : OpenPythonArchitecture(machine, this) {
+    companion object : OpenPythonArchitectureLogic {
+        val FIRMWARE_v1_1_0: OpenPythonFirmware = OpenPythonFirmware("v1.1.0")
+        override val LATEST_FIRMWARE: OpenPythonFirmware = FIRMWARE_v1_1_0
+
+        override fun spawn(machine: Machine, memorySize: Int): OpenPythonVirtualMachine {
+            return OpenPythonVirtualMachineV1(machine, memorySize, LATEST_FIRMWARE)
+        }
+    }
+}
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonInterruptHandler.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptHandlerV1.kt
similarity index 87%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonInterruptHandler.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptHandlerV1.kt
index 54715f67..f715ee86 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonInterruptHandler.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptHandlerV1.kt
@@ -1,8 +1,10 @@
-package kr.pe.ecmaxp.openpython.arch
+package kr.pe.ecmaxp.openpython.arch.versions.v1
 
+import kr.pe.ecmaxp.openpython.OpenPython
+import kr.pe.ecmaxp.openpython.arch.types.interrupt.InterruptHandler
 import kr.pe.ecmaxp.openpython.arch.consts.*
 import kr.pe.ecmaxp.openpython.arch.state.FileHandle
-import kr.pe.ecmaxp.openpython.arch.types.Interrupt
+import kr.pe.ecmaxp.openpython.arch.types.interrupt.Interrupt
 import kr.pe.ecmaxp.openpython.arch.types.call.*
 import kr.pe.ecmaxp.thumbsf.consts.R7
 import kr.pe.ecmaxp.thumbsf.signal.ControlPauseSignal
@@ -18,8 +20,13 @@ import java.io.FileNotFoundException
 import java.nio.charset.StandardCharsets
 import li.cil.oc.api.Machine as MachineAPI
 
-class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
-    operator fun invoke(intr: Interrupt, synchronized: Boolean) {
+class OpenPythonInterruptHandlerV1(val vm: OpenPythonVirtualMachineV1) : InterruptHandler {
+    val machine: Machine get() = vm.machine
+    val state: OpenPythonVirtualMachineStateV1 get() = vm.state
+
+    override operator fun invoke(interrupt: Interrupt, synchronized: Boolean) {
+        val intr = interrupt as OpenPythonInterruptV1
+
         try {
             val code: Int = when (intr.imm and (0xFFFF shl 16)) {
                 SYS_CONTROL -> handleControl(intr, synchronized)
@@ -37,7 +44,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
             intr.cpu.regs[0] = code
         } catch (e: UnknownInterrupt) {
             e.printStackTrace()
-            throw ControlStopSignal(ExecutionResult.Error("Unknown Interrupt?"))
+            throw ControlStopSignal(ExecutionResult.Error("Unknown OpenPythonInterruptV1"))
         } catch (e: LimitReachedException) {
             throw ControlPauseSignal(ExecutionResult.SynchronizedCall())
         }
@@ -49,10 +56,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
 
     inner class UnknownInterrupt : Exception()
 
-    val machine: Machine get() = vm.machine
-    val state: OpenPythonVirtualMachineState get() = vm.state
-
-    private fun handleControl(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleControl(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_CONTROL_SHUTDOWN -> throw ControlStopSignal(ExecutionResult.Shutdown(false))
             SYS_CONTROL_REBOOT -> throw ControlStopSignal(ExecutionResult.Shutdown(true))
@@ -81,15 +85,18 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleDebug(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleDebug(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         val buf = intr.readBuffer()
         val str = String(buf, StandardCharsets.UTF_8)
 
-        print(str)
+        @Suppress("ConstantConditionIf")
+        if (OpenPython.DEBUG)
+            print(str)
+
         return 0
     }
 
-    private fun handleSignal(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleSignal(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_SIGNAL_POP -> {
                 val signal: Signal? = machine.popSignal()
@@ -111,14 +118,14 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleComponents(intr: Interrupt, synchronized: Boolean): Int {
+    private fun handleComponents(intr: OpenPythonInterruptV1, synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_COMPONENT_INVOKE -> {
                 val obj = intr.readObject()
                 val call = ComponentInvoke.fromArray(obj as Array<*>)
                         ?: return intr.responseError(Exception("Invalid invoke"))
 
-                if (!synchronized) {
+                if (!synchronized) { // in thread pooll
                     val node = machine.node().network().node(call.component) as? Component
                             ?: return intr.responseError(Exception("Invalid Component"))
 
@@ -211,7 +218,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleValue(intr: Interrupt, synchronized: Boolean): Int {
+    private fun handleValue(intr: OpenPythonInterruptV1, synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_VALUE_INVOKE -> {
                 val obj = intr.readObject()
@@ -293,7 +300,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleComputer(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleComputer(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         when (intr.imm) {
             SYS_COMPUTER_LAST_ERROR -> return intr.responseValue(machine.lastError())
             SYS_COMPUTER_BEEP_1 -> {
@@ -393,7 +400,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleInfo(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleInfo(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_INFO_VERSION -> 0x01000000 // 1.0.0.0
             SYS_INFO_RAM_SIZE -> vm.memorySize
@@ -401,7 +408,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleTimer(intr: Interrupt, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
+    private fun handleTimer(intr: OpenPythonInterruptV1, @Suppress("UNUSED_PARAMETER") synchronized: Boolean): Int {
         return when (intr.imm) {
             SYS_TIMER_TICKS_MS -> System.currentTimeMillis().toInt()
             SYS_TIMER_TICKS_US -> System.nanoTime().toInt()
@@ -414,7 +421,7 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
         }
     }
 
-    private fun handleVirtualFileSystem(intr: Interrupt, synchronized: Boolean): Int {
+    private fun handleVirtualFileSystem(intr: OpenPythonInterruptV1, synchronized: Boolean): Int {
         if (!synchronized)
             throw ControlPauseSignal(ExecutionResult.SynchronizedCall())
 
@@ -463,31 +470,41 @@ class OpenPythonInterruptHandler(val vm: OpenPythonVirtualMachine) {
                     }
                 }
                 SYS_VFS_READ -> {
-                    val ret = fh("read", intr.r1)(machine)
-                    when {
-                        ret.error != null -> {
-                            ret.error.printStackTrace()
-                            return 1
-                        }
-                        ret.args != null -> {
-                            if (ret.args.size != 1) {
+                    var size = Math.min(intr.r1, 4096)
+                    var pos = 0
+                    val buffer = ByteArray(size)
+
+                    while (pos < size) {
+                        val ret = fh("read", size - pos)(machine)
+                        when {
+                            ret.error != null -> {
+                                ret.error.printStackTrace()
                                 return MP_EPERM
                             }
+                            ret.args != null -> {
+                                if (ret.args.size != 1) {
+                                    return MP_EPERM
+                                }
 
-                            val arg = ret.args[0]
-                            return when (arg) {
-                                is ByteArray -> {
-                                    intr.memory.writeBuffer(intr.r2, arg)
-                                    intr.memory.writeInt(intr.r3, arg.size)
-                                    0
+                                val arg = ret.args[0]
+                                when (arg) {
+                                    is ByteArray -> {
+                                        if (arg.size == 0) {
+                                            size = pos
+                                        } else {
+                                            System.arraycopy(arg, 0, buffer, pos, Math.min(arg.size, size - pos))
+                                            pos += arg.size
+                                        }
+                                    }
+                                    null -> size = pos
                                 }
-                                null -> // EOF
-                                    0
-                                else -> MP_EPERM
                             }
                         }
-                        else -> return MP_EPERM
                     }
+
+                    intr.memory.writeBuffer(intr.r2, buffer)
+                    intr.memory.writeInt(intr.r3, pos)
+                    return MP_OK
                 }
                 SYS_VFS_WRITE -> {
                     val buf = intr.readBuffer(intr.r1, intr.r2)
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/Interrupt.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptV1.kt
similarity index 60%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/types/Interrupt.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptV1.kt
index 56cccdf7..2678f4c7 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/types/Interrupt.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonInterruptV1.kt
@@ -1,14 +1,14 @@
-package kr.pe.ecmaxp.openpython.arch.types
+package kr.pe.ecmaxp.openpython.arch.versions.v1
 
-import kr.pe.ecmaxp.openpython.arch.OpenPythonMemoryRegion
-import kr.pe.ecmaxp.openpython.arch.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
 import kr.pe.ecmaxp.openpython.arch.msgpack.Msgpack
 import kr.pe.ecmaxp.openpython.arch.types.call.InvokeResult
+import kr.pe.ecmaxp.openpython.arch.types.interrupt.Interrupt
 import kr.pe.ecmaxp.thumbsf.CPU
 import kr.pe.ecmaxp.thumbsf.Memory
 import kr.pe.ecmaxp.thumbsf.consts.*
 
-class Interrupt(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null) {
+class OpenPythonInterruptV1(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null) : Interrupt {
     val imm: Int
     val r0: Int
     val r1: Int
@@ -28,19 +28,18 @@ class Interrupt(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null
 
     val memory: Memory get() = cpu.memory
 
-    fun readBuffer(address: Int, size: Int): ByteArray = memory.readBuffer(address, size)
-    fun readBuffer(): ByteArray = readBuffer(r0, r1)
+    override fun readBuffer(): ByteArray = readBuffer(r0, r1)
+    override fun readBuffer(address: Int, size: Int): ByteArray = memory.readBuffer(address, size)
 
-    fun readString(address: Int, maxSize: Int): String = memory.readString(address, maxSize)
-    fun readString(): String? = readString(r0, r1)
+    override fun readString(): String? = readString(r0, r1)
+    override fun readString(address: Int, maxSize: Int): String = memory.readString(address, maxSize)
 
-    fun readObject(): Any? = Msgpack(vm).loads(readBuffer())
+    override fun readObject(): Any? = Msgpack(vm).loads(readBuffer())
 
+    override fun responseNone(): Int = 0
 
-    fun responseNone(): Int = 0
-
-    fun responseError(value: Throwable): Int {
-        val bufAddress = OpenPythonMemoryRegion.SYSCALL.address
+    override fun responseError(value: Throwable): Int {
+        val bufAddress = OpenPythonMemoryRegionV1.SYSCALL.address
         val buffer = Msgpack(vm).dumps(value)
         memory.writeInt(bufAddress, 0) // + 0 | 1 = OK (msgpack)
         memory.writeInt(bufAddress + 4, bufAddress + 12) // + 4
@@ -49,11 +48,11 @@ class Interrupt(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null
         return bufAddress
     }
 
-    fun responseValue(value: Any?): Int {
+    override fun responseValue(value: Any?): Int {
         if (value == null)
             return responseNone()
 
-        val bufAddress = OpenPythonMemoryRegion.SYSCALL.address
+        val bufAddress = OpenPythonMemoryRegionV1.SYSCALL.address
         val buffer = Msgpack(vm).dumps(value)
         memory.writeInt(bufAddress, 1) // + 0 = OK (msgpack)
         memory.writeInt(bufAddress + 4, bufAddress + 12) // + 4
@@ -62,8 +61,8 @@ class Interrupt(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null
         return bufAddress
     }
 
-    fun responseBuffer(buffer: ByteArray): Int {
-        val bufAddress = OpenPythonMemoryRegion.SYSCALL.address
+    override fun responseBuffer(buffer: ByteArray): Int {
+        val bufAddress = OpenPythonMemoryRegionV1.SYSCALL.address
         memory.writeInt(bufAddress, bufAddress + 8) // + 0 | 0 = ERROR
         memory.writeInt(bufAddress + 4, buffer.size) // + 4
         memory.writeBuffer(bufAddress + 8, buffer) // + 8
@@ -71,7 +70,7 @@ class Interrupt(val cpu: CPU, imm: Int, val vm: OpenPythonVirtualMachine? = null
         return bufAddress
     }
 
-    fun responseResult(ret: InvokeResult): Int {
+    override fun responseResult(ret: InvokeResult): Int {
         return if (ret.error == null) responseValue(ret.args) else responseError(ret.error)
     }
 }
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonMemoryRegion.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonMemoryRegionV1.kt
similarity index 70%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonMemoryRegion.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonMemoryRegionV1.kt
index d0646254..9a6e0bdf 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonMemoryRegion.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonMemoryRegionV1.kt
@@ -1,11 +1,11 @@
-package kr.pe.ecmaxp.openpython.arch
+package kr.pe.ecmaxp.openpython.arch.versions.v1
 
 import kr.pe.ecmaxp.openpython.arch.consts.KB
 import kr.pe.ecmaxp.thumbsf.MemoryFlag
 import kr.pe.ecmaxp.thumbsf.MemoryFlag.RW
 import kr.pe.ecmaxp.thumbsf.MemoryFlag.RX
 
-enum class OpenPythonMemoryRegion(val address: Int, val size: Int, val flag: MemoryFlag) {
+enum class OpenPythonMemoryRegionV1(val address: Int, val size: Int, val flag: MemoryFlag) {
     FLASH(0x08000000, 256 * KB, RX),
     SRAM(0x20000000, 64 * KB, RW),
     RAM(0x60000000, 256 * KB, RW), // dynamic size
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/consts/OpenPythonSystemCallTable.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonSystemCallTableV1.kt
similarity index 98%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/consts/OpenPythonSystemCallTable.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonSystemCallTableV1.kt
index a4cd7072..aa818021 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/consts/OpenPythonSystemCallTable.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonSystemCallTableV1.kt
@@ -1,4 +1,4 @@
-package kr.pe.ecmaxp.openpython.arch.consts
+package kr.pe.ecmaxp.openpython.arch.versions.v1
 
 const val SYS_DEBUG = 0x010000
 
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachineState.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineStateV1.kt
similarity index 92%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachineState.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineStateV1.kt
index d61501d6..dfa48272 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachineState.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineStateV1.kt
@@ -1,4 +1,4 @@
-package kr.pe.ecmaxp.openpython.arch
+package kr.pe.ecmaxp.openpython.arch.versions.v1
 
 import kr.pe.ecmaxp.openpython.arch.state.FileHandle
 import kr.pe.ecmaxp.openpython.arch.state.ValueContainerMap
@@ -7,7 +7,7 @@ import net.minecraft.nbt.NBTTagCompound
 import net.minecraft.nbt.NBTTagList
 import java.util.*
 
-class OpenPythonVirtualMachineState : Persistable {
+class OpenPythonVirtualMachineStateV1 : Persistable {
     var fdCount = 3
     var fdMap: HashMap<Int, FileHandle> = HashMap()
     var valueMap: ValueContainerMap = ValueContainerMap()
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachine.kt b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineV1.kt
similarity index 63%
rename from src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachine.kt
rename to src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineV1.kt
index 0de4709d..faddba56 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/arch/OpenPythonVirtualMachine.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/arch/versions/v1/OpenPythonVirtualMachineV1.kt
@@ -1,7 +1,15 @@
-package kr.pe.ecmaxp.openpython.arch
-
-import kr.pe.ecmaxp.openpython.arch.OpenPythonMemoryRegion.*
-import kr.pe.ecmaxp.openpython.arch.types.Interrupt
+package kr.pe.ecmaxp.openpython.arch.versions.v1
+
+import kr.pe.ecmaxp.openpython.OpenPythonVirtualMachine
+import kr.pe.ecmaxp.openpython.arch.OpenComputersLikeSaveHandler
+import kr.pe.ecmaxp.openpython.arch.OpenPythonFirmware
+import kr.pe.ecmaxp.openpython.arch.msgpack.MsgpackPacker
+import kr.pe.ecmaxp.openpython.arch.msgpack.MsgpackUnpacker
+import kr.pe.ecmaxp.openpython.arch.types.interrupt.Interrupt
+import kr.pe.ecmaxp.openpython.arch.types.interrupt.InterruptHandler
+import kr.pe.ecmaxp.openpython.arch.versions.v1.OpenPythonMemoryRegionV1.*
+import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessagePacker
+import kr.pe.ecmaxp.openpython.repack.org.msgpack.core.MessageUnpacker
 import kr.pe.ecmaxp.thumbsf.CPU
 import kr.pe.ecmaxp.thumbsf.consts.I0
 import kr.pe.ecmaxp.thumbsf.consts.PC
@@ -9,6 +17,7 @@ import kr.pe.ecmaxp.thumbsf.exc.InvalidMemoryException
 import kr.pe.ecmaxp.thumbsf.signal.ControlSignal
 import li.cil.oc.api.machine.ExecutionResult
 import li.cil.oc.api.machine.Machine
+import li.cil.oc.api.machine.Value
 import net.minecraft.nbt.NBTTagCompound
 import net.minecraft.nbt.NBTTagList
 import java.io.ByteArrayOutputStream
@@ -16,10 +25,32 @@ import java.util.zip.GZIPInputStream
 import java.util.zip.GZIPOutputStream
 
 
-class OpenPythonVirtualMachine internal constructor(val machine: Machine, val memorySize: Int, var firmware: OpenPythonFirmware) {
+class OpenPythonVirtualMachineV1 internal constructor(val machine: Machine, override val memorySize: Int, val firmware: OpenPythonFirmware) : OpenPythonVirtualMachine {
+    override fun packValue(packer: MessagePacker, value: Value) {
+        val newPacker = MsgpackPacker(this)
+        newPacker.pack(state.valueMap.register(value).id)
+        val buffer = newPacker.toByteArray()
+        packer.packExtensionTypeHeader(1, buffer.size)
+        packer.writePayload(buffer)
+    }
+
+    override fun unpackExtension(unpacker: MessageUnpacker): Any? {
+        val ext = unpacker.unpackExtensionTypeHeader()
+        val payload = unpacker.readPayload(ext.length)
+
+        return when (ext.type.toInt() and 0xFF) {
+            1 -> {
+                val newUnpacker = MsgpackUnpacker(payload, this)
+                val pointer = newUnpacker.unpack() as Int
+                state.valueMap[pointer]
+            }
+            else -> TODO()
+        }
+    }
+
     val cpu: CPU = CPU()
-    var state: OpenPythonVirtualMachineState = OpenPythonVirtualMachineState()
-    var interruptHandler: OpenPythonInterruptHandler = OpenPythonInterruptHandler(this)
+    var state: OpenPythonVirtualMachineStateV1 = OpenPythonVirtualMachineStateV1()
+    var interruptHandler: InterruptHandler = OpenPythonInterruptHandlerV1(this)
 
     init {
         val memory = cpu.memory.apply {
@@ -32,14 +63,14 @@ class OpenPythonVirtualMachine internal constructor(val machine: Machine, val me
         cpu.regs[PC] = memory.readInt(FLASH.address + 4) and I0.inv()
     }
 
-    fun close() {
+    override fun close() {
         // TODO: free memory
     }
 
-    fun step(synchronized: Boolean): ExecutionResult {
+    override fun step(synchronized: Boolean): ExecutionResult {
         return try {
             cpu.run(if (synchronized) 1 else 10000000) {
-                val interrupt = Interrupt(cpu, it, this)
+                val interrupt = OpenPythonInterruptV1(cpu, it, this)
                 interruptHandler(interrupt, synchronized)
             }
 
@@ -58,27 +89,19 @@ class OpenPythonVirtualMachine internal constructor(val machine: Machine, val me
         }
     }
 
-    fun load(tag: NBTTagCompound) {
-        val rootTag = OpenComputersLikeSaveHandler.loadNbt(machine.host(), tag, machine.node().address()) ?: run {
-            if (machine.isRunning) machine.crash("Missing data")
-            return
-        }
+    override fun load(tag: NBTTagCompound) {
+        val rootTag = OpenComputersLikeSaveHandler.loadNbt(machine.host(), tag, machine.node().address())
+                ?: run {
+                    if (machine.isRunning) machine.crash("Missing data")
+                    return
+                }
 
 
-        // firmware
-        val firmwareTag = rootTag.getCompoundTag("firmware")
+        // LATEST_FIRMWARE
+        val firmwareTag = rootTag.getCompoundTag("LATEST_FIRMWARE")
         val firmwareName = firmwareTag.getString("name")
         if (firmware.name != firmwareName) {
-            try {
-                firmware = OpenPythonFirmware(firmwareName)
-            } catch (e: Exception) {
-                if (firmwareName.startsWith("v1.0.")) {
-                    e.printStackTrace()
-                    firmware = OpenPythonFirmware.v1_0_1
-                } else {
-                    throw e;
-                }
-            }
+            machine.crash("Invalid firmware")
         }
 
 
@@ -110,14 +133,12 @@ class OpenPythonVirtualMachine internal constructor(val machine: Machine, val me
         state.load(stateTag)
     }
 
-    fun save(tag: NBTTagCompound) {
-
-        // firmware
+    override fun save(tag: NBTTagCompound) {
+        // LATEST_FIRMWARE
         val firmwareTag = NBTTagCompound()
         firmwareTag.setString("name", firmware.name)
         firmwareTag.setInteger("protocol", firmware.protocol)
 
-
         // cpu
         val cpuTag = NBTTagCompound()
 
@@ -148,7 +169,7 @@ class OpenPythonVirtualMachine internal constructor(val machine: Machine, val me
 
         val rootTag = NBTTagCompound()
         // rootTag.setInteger("memorySize", memorySize) // TODO: no idea
-        rootTag.setTag("firmware", firmwareTag)
+        rootTag.setTag("LATEST_FIRMWARE", firmwareTag)
         rootTag.setTag("cpu", cpuTag)
         rootTag.setTag("state", stateTag)
 
diff --git a/src/main/java/kr/pe/ecmaxp/openpython/console/OpenPieBenchmark.kt b/src/main/java/kr/pe/ecmaxp/openpython/console/OpenPieBenchmark.kt
index bcaeae12..06754845 100644
--- a/src/main/java/kr/pe/ecmaxp/openpython/console/OpenPieBenchmark.kt
+++ b/src/main/java/kr/pe/ecmaxp/openpython/console/OpenPieBenchmark.kt
@@ -1,10 +1,10 @@
 package kr.pe.ecmaxp.openpython.console
 
-import kr.pe.ecmaxp.openpython.arch.OpenPythonFirmware
-import kr.pe.ecmaxp.openpython.arch.OpenPythonMemoryRegion.*
 import kr.pe.ecmaxp.openpython.arch.consts.KB
-import kr.pe.ecmaxp.openpython.arch.consts.SYS_DEBUG
-import kr.pe.ecmaxp.openpython.arch.consts.SYS_INFO_RAM_SIZE
+import kr.pe.ecmaxp.openpython.arch.versions.v1.OpenPythonArchitectureV1_1
+import kr.pe.ecmaxp.openpython.arch.versions.v1.OpenPythonMemoryRegionV1.*
+import kr.pe.ecmaxp.openpython.arch.versions.v1.SYS_DEBUG
+import kr.pe.ecmaxp.openpython.arch.versions.v1.SYS_INFO_RAM_SIZE
 import kr.pe.ecmaxp.thumbsf.CPU
 import kr.pe.ecmaxp.thumbsf.MemoryFlag
 import kr.pe.ecmaxp.thumbsf.consts.PC
@@ -17,7 +17,7 @@ object OpenPythonBenchmark {
     @JvmStatic
     fun main(args: Array<String>) {
         val cpu = CPU()
-        val firmware = OpenPythonFirmware.v1_0_1
+        val firmware = OpenPythonArchitectureV1_1.LATEST_FIRMWARE
         cpu.memory.apply {
             flash(FLASH.address, FLASH.size, firmware.loadFirmware())
             map(SRAM.address, SRAM.size, MemoryFlag.RW) // ram
diff --git a/src/main/resources/assets/openpython/opos/bin/python.py b/src/main/resources/assets/openpython/opos/v1.0/bin/python.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/bin/python.py
rename to src/main/resources/assets/openpython/opos/v1.0/bin/python.py
diff --git a/src/main/resources/assets/openpython/opos/boot/01_basic.py b/src/main/resources/assets/openpython/opos/v1.0/boot/01_basic.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/boot/01_basic.py
rename to src/main/resources/assets/openpython/opos/v1.0/boot/01_basic.py
diff --git a/src/main/resources/assets/openpython/opos/boot/02_screen.py b/src/main/resources/assets/openpython/opos/v1.0/boot/02_screen.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/boot/02_screen.py
rename to src/main/resources/assets/openpython/opos/v1.0/boot/02_screen.py
diff --git a/src/main/resources/assets/openpython/opos/boot/03_builtin.py b/src/main/resources/assets/openpython/opos/v1.0/boot/03_builtin.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/boot/03_builtin.py
rename to src/main/resources/assets/openpython/opos/v1.0/boot/03_builtin.py
diff --git a/src/main/resources/assets/openpython/opos/boot/98_filesystem.py b/src/main/resources/assets/openpython/opos/v1.0/boot/98_filesystem.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/boot/98_filesystem.py
rename to src/main/resources/assets/openpython/opos/v1.0/boot/98_filesystem.py
diff --git a/src/main/resources/assets/openpython/opos/init.lua b/src/main/resources/assets/openpython/opos/v1.0/init.lua
similarity index 100%
rename from src/main/resources/assets/openpython/opos/init.lua
rename to src/main/resources/assets/openpython/opos/v1.0/init.lua
diff --git a/src/main/resources/assets/openpython/opos/init.py b/src/main/resources/assets/openpython/opos/v1.0/init.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/init.py
rename to src/main/resources/assets/openpython/opos/v1.0/init.py
diff --git a/src/main/resources/assets/openpython/opos/lib/component.py b/src/main/resources/assets/openpython/opos/v1.0/lib/component.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/component.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/component.py
diff --git a/src/main/resources/assets/openpython/opos/lib/computer.py b/src/main/resources/assets/openpython/opos/v1.0/lib/computer.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/computer.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/computer.py
diff --git a/src/main/resources/assets/openpython/opos/lib/errno.py b/src/main/resources/assets/openpython/opos/v1.0/lib/errno.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/errno.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/errno.py
diff --git a/src/main/resources/assets/openpython/opos/lib/event.py b/src/main/resources/assets/openpython/opos/v1.0/lib/event.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/event.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/event.py
diff --git a/src/main/resources/assets/openpython/opos/lib/filesystem.py b/src/main/resources/assets/openpython/opos/v1.0/lib/filesystem.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/filesystem.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/filesystem.py
diff --git a/src/main/resources/assets/openpython/opos/lib/heapq.py b/src/main/resources/assets/openpython/opos/v1.0/lib/heapq.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/heapq.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/heapq.py
diff --git a/src/main/resources/assets/openpython/opos/lib/imp.py b/src/main/resources/assets/openpython/opos/v1.0/lib/imp.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/imp.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/imp.py
diff --git a/src/main/resources/assets/openpython/opos/lib/json.py b/src/main/resources/assets/openpython/opos/v1.0/lib/json.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/json.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/json.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/__future__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/__future__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/__future__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/__future__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/_libc.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/_libc.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/_libc.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/_libc.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/_markupbase.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/_markupbase.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/_markupbase.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/_markupbase.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/abc.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/abc.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/abc.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/abc.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/argparse.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/argparse.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/argparse.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/argparse.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/asyncio_slow.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/asyncio_slow.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/asyncio_slow.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/asyncio_slow.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/base64.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/base64.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/base64.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/base64.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/binascii.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/binascii.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/binascii.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/binascii.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/calendar.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/binhex.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/calendar.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/binhex.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/bisect.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/bisect.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/bisect.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/bisect.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/code.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/calendar.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/code.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/calendar.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/cgi.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/cgi.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/cgi.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/cgi.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/cmd.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/cmd.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/cmd.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/cmd.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/codecs.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/code.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/codecs.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/code.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/codeop.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/codecs.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/codeop.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/codecs.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/concurrent/futures/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/codeop.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/concurrent/futures/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/codeop.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/collections/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/collections/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/collections/defaultdict.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/defaultdict.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/collections/defaultdict.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/defaultdict.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/collections/deque.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/deque.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/collections/deque.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/collections/deque.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/csv.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/concurrent/futures/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/csv.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/concurrent/futures/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/contextlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/contextlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/contextlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/contextlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/copy.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/copy.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/copy.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/copy.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/dbm.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/csv.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/dbm.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/csv.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/curses/ascii.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/curses/ascii.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/curses/ascii.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/curses/ascii.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/datetime.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/datetime.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/datetime.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/datetime.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/decimal.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/dbm.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/decimal.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/dbm.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/difflib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/decimal.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/difflib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/decimal.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/formatter.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/difflib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/formatter.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/difflib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/_encoded_words.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_encoded_words.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/_encoded_words.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_encoded_words.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/_parseaddr.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_parseaddr.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/_parseaddr.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_parseaddr.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/_policybase.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_policybase.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/_policybase.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/_policybase.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/base64mime.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/base64mime.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/base64mime.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/base64mime.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/charset.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/charset.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/charset.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/charset.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/encoders.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/encoders.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/encoders.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/encoders.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/errors.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/errors.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/errors.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/errors.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/feedparser.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/feedparser.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/feedparser.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/feedparser.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/header.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/header.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/header.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/header.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/iterators.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/iterators.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/iterators.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/iterators.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/message.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/message.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/message.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/message.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/parser.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/parser.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/parser.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/parser.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/quoprimime.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/quoprimime.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/quoprimime.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/quoprimime.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/email/utils.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/utils.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/email/utils.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/email/utils.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/errno.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/errno.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/errno.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/errno.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/fcntl.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fcntl.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/fcntl.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fcntl.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ffilib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ffilib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ffilib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ffilib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/fnmatch.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fnmatch.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/fnmatch.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fnmatch.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/fractions.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/formatter.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/fractions.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/formatter.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ftplib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fractions.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ftplib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/fractions.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/getpass.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ftplib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/getpass.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ftplib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/functools.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/functools.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/functools.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/functools.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/getopt.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/getopt.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/getopt.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/getopt.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/imaplib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/getpass.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/imaplib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/getpass.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/gettext.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/gettext.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/gettext.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/gettext.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/glob.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/glob.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/glob.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/glob.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/gzip.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/gzip.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/gzip.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/gzip.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hashlib/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hashlib/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha224.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha224.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha224.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha224.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha256.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha256.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha256.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha256.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha384.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha384.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha384.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha384.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha512.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha512.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hashlib/_sha512.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hashlib/_sha512.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/heapq.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/heapq.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/heapq.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/heapq.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/hmac.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hmac.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/hmac.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/hmac.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/html/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/html/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/html/entities.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/entities.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/html/entities.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/entities.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/html/parser.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/parser.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/html/parser.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/html/parser.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/http/client.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/http/client.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/http/client.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/http/client.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/imp.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/imaplib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/imp.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/imaplib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/importlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/imp.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/importlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/imp.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ipaddress.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/importlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ipaddress.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/importlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/inspect.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/inspect.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/inspect.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/inspect.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/io.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/io.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/io.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/io.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/mailbox.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ipaddress.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/mailbox.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ipaddress.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/itertools.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/itertools.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/itertools.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/itertools.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/json/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/json/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/json/decoder.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/decoder.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/json/decoder.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/decoder.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/json/encoder.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/encoder.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/json/encoder.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/encoder.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/json/scanner.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/scanner.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/json/scanner.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/scanner.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/json/tool.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/tool.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/json/tool.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/json/tool.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/keyword.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/keyword.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/keyword.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/keyword.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/linecache.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/linecache.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/linecache.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/linecache.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/locale.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/locale.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/locale.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/locale.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/logging.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/logging.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/logging.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/logging.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/machine/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/machine/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/machine/pin.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/pin.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/machine/pin.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/pin.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/machine/timer.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/timer.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/machine/timer.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/machine/timer.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/mailcap.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mailbox.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/mailcap.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mailbox.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/mimetypes.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mailcap.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/mimetypes.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mailcap.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/nntplib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mimetypes.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/nntplib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/mimetypes.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/multiprocessing.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/multiprocessing.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/multiprocessing.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/multiprocessing.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/numbers.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/nntplib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/numbers.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/nntplib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/optparse.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/numbers.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/optparse.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/numbers.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/operator.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/operator.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/operator.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/operator.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pathlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/optparse.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pathlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/optparse.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/os/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/os/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/os/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/os/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/os/path.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/os/path.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/os/path.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/os/path.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pdb.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pathlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pdb.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pathlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pickletools.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pdb.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pickletools.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pdb.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pickle.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pickle.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pickle.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pickle.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/platform.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pickletools.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/platform.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pickletools.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pkg_resources.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pkg_resources.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pkg_resources.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pkg_resources.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pkgutil.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pkgutil.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pkgutil.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pkgutil.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/poplib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/platform.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/poplib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/platform.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/posixpath.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/poplib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/posixpath.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/poplib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/profile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/posixpath.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/profile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/posixpath.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pprint.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pprint.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pprint.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pprint.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pty.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/profile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pty.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/profile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/queue.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pty.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/queue.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pty.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pwd.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pwd.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pwd.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pwd.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pyb.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pyb.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pyb.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pyb.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pystone.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pystone.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pystone.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pystone.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/pystone_lowmem.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pystone_lowmem.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/pystone_lowmem.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/pystone_lowmem.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/readline.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/queue.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/readline.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/queue.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/quopri.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/quopri.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/quopri.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/quopri.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/random.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/random.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/random.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/random.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/re.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/re.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/re.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/re.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/reprlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/readline.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/reprlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/readline.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/runpy.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/reprlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/runpy.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/reprlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/sched.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/runpy.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/sched.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/runpy.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/selectors.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sched.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/selectors.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sched.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/select.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/select.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/select.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/select.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/shelve.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/selectors.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/shelve.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/selectors.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/shlex.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shelve.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/shlex.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shelve.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/smtplib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shlex.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/smtplib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shlex.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/shutil.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shutil.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/shutil.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/shutil.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/signal.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/signal.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/signal.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/signal.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/socketserver.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/smtplib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/socketserver.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/smtplib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/socket.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/socket.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/socket.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/socket.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/statistics.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/socketserver.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/statistics.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/socketserver.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/sqlite3.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sqlite3.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/sqlite3.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sqlite3.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ssl.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ssl.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ssl.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ssl.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/stat.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/stat.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/stat.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/stat.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/stringprep.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/statistics.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/stringprep.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/statistics.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/string.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/string.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/string.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/string.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/subprocess.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/stringprep.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/subprocess.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/stringprep.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/struct.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/struct.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/struct.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/struct.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/sys.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/subprocess.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/sys.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/subprocess.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/tarfile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sys.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/tarfile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/sys.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/telnetlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tarfile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/telnetlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tarfile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/tempfile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/telnetlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/tempfile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/telnetlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/trace.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tempfile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/trace.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tempfile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/test/pystone.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/test/pystone.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/test/pystone.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/test/pystone.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/test/support.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/test/support.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/test/support.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/test/support.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/tests.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tests.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/tests.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tests.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/tests/test.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tests/test.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/tests/test.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tests/test.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/textwrap.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/textwrap.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/textwrap.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/textwrap.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/threading.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/threading.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/threading.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/threading.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/time.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/time.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/time.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/time.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/timeit.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/timeit.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/timeit.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/timeit.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/typing.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/trace.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/typing.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/trace.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/traceback.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/traceback.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/traceback.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/traceback.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/tty.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tty.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/tty.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/tty.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/types.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/types.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/types.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/types.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/urllib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/typing.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/urllib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/typing.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uaiohttpclient.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uaiohttpclient.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uaiohttpclient.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uaiohttpclient.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/core.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/core.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/core.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/core.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/queues.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/queues.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/queues.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/queues.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/synchro.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/synchro.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/synchro.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/synchro.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/udp.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/udp.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/udp.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/udp.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/websocket/server.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/websocket/server.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uasyncio/websocket/server.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uasyncio/websocket/server.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ucontextlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ucontextlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ucontextlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ucontextlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/ucurses/__init__.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ucurses/__init__.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/ucurses/__init__.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/ucurses/__init__.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/udnspkt.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/udnspkt.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/udnspkt.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/udnspkt.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/umqtt/robust.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/umqtt/robust.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/umqtt/robust.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/umqtt/robust.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/umqtt/simple.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/umqtt/simple.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/umqtt/simple.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/umqtt/simple.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/unicodedata.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/unicodedata.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/unicodedata.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/unicodedata.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/unittest.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/unittest.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/unittest.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/unittest.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/upip.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upip.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/upip.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upip.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/upip_utarfile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upip_utarfile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/upip_utarfile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upip_utarfile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/upysh.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upysh.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/upysh.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/upysh.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/urequests.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urequests.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/urequests.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urequests.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uuid.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uuid.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/urllib/parse.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib/parse.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/urllib/parse.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib/parse.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/urllib/urequest.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib/urequest.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/urllib/urequest.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/urllib/urequest.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/utarfile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/utarfile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/utarfile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/utarfile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/uu.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uu.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/uu.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uu.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/venv.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uuid.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/venv.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/uuid.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/zipfile.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/venv.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/zipfile.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/venv.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/warnings.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/warnings.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/warnings.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/warnings.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/weakref.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/weakref.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/weakref.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/weakref.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/xmltok.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/xmltok.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/xmltok.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/xmltok.py
diff --git a/src/main/resources/assets/openpython/opos/lib/urllib/request.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/zipfile.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/urllib/request.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/zipfile.py
diff --git a/src/main/resources/assets/openpython/opos/lib/micropython/zlib.py b/src/main/resources/assets/openpython/opos/v1.0/lib/micropython/zlib.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/micropython/zlib.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/micropython/zlib.py
diff --git a/src/main/resources/assets/openpython/opos/lib/monitor.py b/src/main/resources/assets/openpython/opos/v1.0/lib/monitor.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/monitor.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/monitor.py
diff --git a/src/main/resources/assets/openpython/opos/lib/msgpack.py b/src/main/resources/assets/openpython/opos/v1.0/lib/msgpack.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/msgpack.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/msgpack.py
diff --git a/src/main/resources/assets/openpython/opos/lib/ocpath.py b/src/main/resources/assets/openpython/opos/v1.0/lib/ocpath.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/ocpath.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/ocpath.py
diff --git a/src/main/resources/assets/openpython/opos/lib/os.py b/src/main/resources/assets/openpython/opos/v1.0/lib/os.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/os.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/os.py
diff --git a/src/main/resources/assets/openpython/opos/lib/random.py b/src/main/resources/assets/openpython/opos/v1.0/lib/random.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/random.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/random.py
diff --git a/src/main/resources/assets/openpython/opos/lib/re.py b/src/main/resources/assets/openpython/opos/v1.0/lib/re.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/re.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/re.py
diff --git a/src/main/resources/assets/openpython/opos/lib/shell.py b/src/main/resources/assets/openpython/opos/v1.0/lib/shell.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/shell.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/shell.py
diff --git a/src/main/resources/assets/openpython/opos/lib/time.py b/src/main/resources/assets/openpython/opos/v1.0/lib/time.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/time.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/time.py
diff --git a/src/main/resources/assets/openpython/opos/lib/urllib/parse.py b/src/main/resources/assets/openpython/opos/v1.0/lib/urllib/parse.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/urllib/parse.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/urllib/parse.py
diff --git a/src/main/resources/assets/openpython/opos/usr/bin/env.py b/src/main/resources/assets/openpython/opos/v1.0/lib/urllib/request.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/usr/bin/env.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/urllib/request.py
diff --git a/src/main/resources/assets/openpython/opos/lib/value.py b/src/main/resources/assets/openpython/opos/v1.0/lib/value.py
similarity index 100%
rename from src/main/resources/assets/openpython/opos/lib/value.py
rename to src/main/resources/assets/openpython/opos/v1.0/lib/value.py
diff --git a/src/main/resources/assets/openpython/opos/v1.0/usr/bin/env.py b/src/main/resources/assets/openpython/opos/v1.0/usr/bin/env.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/.prop b/src/main/resources/assets/openpython/opos/v1.1/.prop
new file mode 100644
index 00000000..9ba21168
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/.prop
@@ -0,0 +1 @@
+{label="OpenPython OS", reboot=true, setlabel=true, setboot=true}
\ No newline at end of file
diff --git a/src/main/resources/assets/openpython/opos/v1.1/bin/dir.py b/src/main/resources/assets/openpython/opos/v1.1/bin/dir.py
new file mode 100644
index 00000000..a48f6fc3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/bin/dir.py
@@ -0,0 +1,9 @@
+import os
+
+
+def main():
+    print(os.listdir('.'))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/bin/python.py b/src/main/resources/assets/openpython/opos/v1.1/bin/python.py
new file mode 100644
index 00000000..e7688772
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/bin/python.py
@@ -0,0 +1,49 @@
+import sys
+
+from ucode import MICROPY_GIT_TAG, MICROPY_BUILD_DATE, MICROPY_HW_BOARD_NAME, MICROPY_HW_MCU_NAME
+from ucode import repl_input, repl_compile, repl_call
+
+
+def main():
+    context = {"__name__": "<shell>"}
+
+    print("MicroPython",
+          MICROPY_GIT_TAG,
+          "on", MICROPY_BUILD_DATE + ";",
+          MICROPY_HW_BOARD_NAME,
+          "with", MICROPY_HW_MCU_NAME)
+
+    try:
+        # noinspection PyStatementEffect
+        help
+    except NameError:
+        pass
+    else:
+        print("Type \"help()\" for more information.")
+
+    while True:
+        try:
+            code = repl_input()
+        except Exception as e:
+            print(type(e).__name__, e)
+            continue
+
+        if code == 'exit':
+            break
+
+        try:
+            func = repl_compile(code, context)
+        except BaseException as e:
+            sys.print_exception(e)
+            continue
+
+        try:
+            repl_call(func, context)
+        except SystemExit as e:
+            return e.args[0] if e.args else 0
+        except BaseException as e:
+            sys.print_exception(e)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/bin/shell.py b/src/main/resources/assets/openpython/opos/v1.1/bin/shell.py
new file mode 100644
index 00000000..a8964e6a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/bin/shell.py
@@ -0,0 +1,27 @@
+import os
+import shell
+
+
+def main():
+    while True:
+        command, *args = input(os.getcwd() + "> ").split()
+        if command == "exit":
+            break
+        elif command == "cd":
+            try:
+                os.chdir(args[0] if args else '.')
+            except OSError as e:
+                print("fail", e)
+        else:
+            programs = os.listdir('/bin')
+            filename = command + '.py'
+            if filename in programs:
+                progpath = os.path.join('/bin', filename)
+                exitcode = shell.spawn(progpath, *args)
+                print("exitcode:", exitcode)
+            else:
+                print("Program not found:", filename)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/01_sys.py b/src/main/resources/assets/openpython/opos/v1.1/boot/01_sys.py
new file mode 100644
index 00000000..8657cfdb
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/01_sys.py
@@ -0,0 +1,12 @@
+from uimp import new_module
+
+import sys
+
+sys_module = new_module('sys')
+
+for name in dir(sys):
+    obj = getattr(sys, name)
+    setattr(sys_module, name, obj)
+
+sys.modules['sys'] = sys_module
+sys.modules['usys'] = sys
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/02_event.py b/src/main/resources/assets/openpython/opos/v1.1/boot/02_event.py
new file mode 100644
index 00000000..a2a864ee
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/02_event.py
@@ -0,0 +1,3 @@
+import event
+
+event.setup()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/03_component.py b/src/main/resources/assets/openpython/opos/v1.1/boot/03_component.py
new file mode 100644
index 00000000..f7873d83
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/03_component.py
@@ -0,0 +1,3 @@
+import component
+
+component.setup()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/04_builtin.py b/src/main/resources/assets/openpython/opos/v1.1/boot/04_builtin.py
new file mode 100644
index 00000000..e30fe61f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/04_builtin.py
@@ -0,0 +1,55 @@
+import builtins
+
+import event
+import machine
+import sys
+
+last_input = []
+buf = []
+
+
+def check_key_down(name, *_):
+    return name == "key_down"
+
+
+@machine.hook_stdin
+def input_handler():
+    while True:
+        signal = event.pull_filtered(1, check_key_down)
+        if signal is not None:
+            _name, _address, char, code, _user = signal
+            if char:
+                return int(char)
+
+
+# noinspection PyShadowingBuiltins
+def input(prompt=None):
+    if prompt is not None:
+        print(prompt, end="")
+
+    read = sys.stdin.read
+    write = sys.stdout.write
+    buf = []
+    ignore = 0
+    while True:
+        ch = read(1)
+        if ignore > 0:
+            ignore -= 1
+            continue
+
+        if ch == '\n':
+            write(ch)
+            break
+        elif ch == '\b':
+            if buf:
+                buf.pop()
+                write(ch)
+            continue
+        else:
+            write(ch)
+            buf.append(ch)
+
+    return ''.join(buf)
+
+
+builtins.input = input
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/04_value.py b/src/main/resources/assets/openpython/opos/v1.1/boot/04_value.py
new file mode 100644
index 00000000..90383030
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/04_value.py
@@ -0,0 +1,3 @@
+import value
+
+value.setup()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/05_screen.py b/src/main/resources/assets/openpython/opos/v1.1/boot/05_screen.py
new file mode 100644
index 00000000..9a9fab41
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/05_screen.py
@@ -0,0 +1,43 @@
+import machine
+
+import component
+from monitor import Monitor, _set_monitor
+
+gpu = component.get_primary("gpu")
+
+screen = None
+for screen in component.list("screen"):
+    if screen.getKeyboards():
+        component.set_primary("screen", screen.address)
+        break
+
+if gpu and screen:
+    monitor = Monitor(gpu)
+    gpu.bind(screen.address)
+    w, h = gpu.maxResolution()
+    monitor.w, monitor.h = w, h
+    gpu.setResolution(w, h)
+    gpu.setBackground(0x000000)
+    gpu.setForeground(0xFFFFFF)
+    gpu.fill(1, 1, w, h, " ")
+
+
+    @machine.hook_stdout
+    def print_handler(string):
+        try:
+            for char in string:
+                monitor.put(char)
+        except BaseException as e:
+            machine.debug("print_handler exc =? %s: %s" % (type(e).__name__, e))
+
+
+    _set_monitor(monitor)
+
+    import tty
+
+    tty.bind(gpu)
+
+
+    @machine.hook_stdout
+    def print_handler(buf):
+        tty.stream.write(buf)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/98_filesystem.py b/src/main/resources/assets/openpython/opos/v1.1/boot/98_filesystem.py
new file mode 100644
index 00000000..93af55d6
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/98_filesystem.py
@@ -0,0 +1,8 @@
+import uos
+
+import computer
+from filesystem import FileSystem
+
+uos.umount('/')
+uos.mount(FileSystem(__path__, root=True), '/')
+uos.mount(FileSystem(computer.tmp_address()), '/tmp')
diff --git a/src/main/resources/assets/openpython/opos/v1.1/boot/99_process.py b/src/main/resources/assets/openpython/opos/v1.1/boot/99_process.py
new file mode 100644
index 00000000..0db9348a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/boot/99_process.py
@@ -0,0 +1,3 @@
+import process
+
+process.setup()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/init.lua b/src/main/resources/assets/openpython/opos/v1.1/init.lua
new file mode 100644
index 00000000..4568aa4d
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/init.lua
@@ -0,0 +1 @@
+error("This Operating System requires a CPU running the OpenPython architecture.")
diff --git a/src/main/resources/assets/openpython/opos/v1.1/init.py b/src/main/resources/assets/openpython/opos/v1.1/init.py
new file mode 100644
index 00000000..fd8f2659
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/init.py
@@ -0,0 +1,69 @@
+"""OpenPython Operating System"""
+
+import sys
+
+import machine
+import uos
+from ucomponent import invoke
+from uio import FileIO
+
+
+class FileSystem:
+    def __init__(self, address):
+        self.address = address
+
+    def mount(self, readonly, mkfs):
+        pass
+
+    def umount(self):
+        pass
+
+    def getcwd(self):
+        return '/'
+
+    def ilistdir(self, dir):
+        for name in invoke(self.address, 'list', dir):
+            if invoke(self.address, 'isDirectory', dir + "/" + name):
+                yield (name, 0x4000, 0, -1)
+            else:
+                yield (name, 0x8000, 0, 0)
+
+    def stat(self, path):
+        if not invoke(self.address, 'exists', path):
+            raise OSError(1)
+
+        return 0x4000 if invoke(self.address, 'isDirectory', path) else 0x8000, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
+    def open(self, file, mode):
+        return FileIO(self.address, file, mode)
+
+
+@machine.hook_stdin
+def input_handler():
+    return 0
+
+
+@machine.hook_stdout
+def print_handler(string):
+    machine.debug(string)
+
+
+def init():
+    uos.mount(FileSystem(__path__), '/')
+    sys.path.append('/lib')
+    sys.path.append('/lib/internal')
+    sys.path.append('/lib/openos')
+    sys.path.append('/usr/lib')
+    sys.path.append('/lib/micropython')
+
+    for filename in sorted(uos.listdir("/boot")):
+        context = {'__name__': '__main__', '__path__': __path__}
+        # noinspection PyUnresolvedReferences
+        execfile("/boot/" + filename, context)
+
+    from shell import spawn
+    spawn("/bin/shell.py")
+
+
+if __name__ == "__main__":
+    init()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/filesystem.py b/src/main/resources/assets/openpython/opos/v1.1/lib/filesystem.py
new file mode 100644
index 00000000..1492bec5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/filesystem.py
@@ -0,0 +1,135 @@
+from uio import FileIO
+
+import errno
+import ocpath
+from component import Component
+
+forced_cd = False
+
+
+class FileSystem:
+    def __init__(self, address, root=False):
+        self.fs = Component(address)
+        self.address = address
+        self.readonly = self.fs.isReadOnly()
+        self.cwd = "/" if root else ''
+        self.root = root
+
+    # noinspection PyUnusedLocal
+    def mount(self, readonly, mkfs):
+        self.readonly = self.fs.isReadOnly() or readonly
+
+    def umount(self):
+        pass
+
+    def ilistdir(self, dir):
+        for name in self.fs.list(dir):
+            if self.fs.isDirectory(dir + "/" + name):
+                yield (name, 0x4000, 0, -1)
+            else:
+                size = self.fs.size(name)
+                yield (name, 0x8000, 0, size)
+
+    def guard_readonly(self):
+        if self.readonly:
+            raise OSError(errno.EPERM)
+
+    def chdir(self, dir):
+        path = ocpath.join(self.cwd, dir)
+        path = ocpath.normpath(path)
+        path = path.rstrip(ocpath.sep)
+
+        if not path:
+            path = "/" if self.root else "'"
+        elif '..' == path or ('..' + ocpath.sep) in path:
+            path = "/" if self.root else ""
+
+        if not forced_cd and not self.fs.isDirectory(path):
+            raise OSError(errno.ENOTDIR)
+
+        self.cwd = path
+
+    def getcwd(self):
+        return self.cwd
+
+    def mkdir(self, path):
+        self.guard_readonly()
+        result = self.fs.makeDirectory(path)
+        if not result:
+            exists = self.fs.exists(path)
+            if self.fs.isDirectory(path):
+                raise OSError(errno.EISDIR)
+            elif exists:  # file
+                raise OSError(errno.EEXIST)
+
+            raise OSError(errno.ENOENT)
+
+    def remove(self, path):
+        self.guard_readonly()
+        self.fs.remove(path)
+
+    def rename(self, old_path, new_path):
+        self.guard_readonly()
+        result = self.fs.rename(old_path, new_path)
+        if not result:
+            raise OSError(errno.ENOENT)
+
+    def rmdir(self, path):
+        self.guard_readonly()
+        if not self.fs.isDirectory(path):
+            if self.fs.exists(path):
+                # is file
+                raise OSError(errno.EEXIST)
+
+            raise OSError(errno.ENOENT)
+
+        result = self.fs.remove(path)
+        if not result:
+            raise OSError(errno.ENOENT)
+
+    def stat(self, path):
+        if not self.fs.exists(path):
+            raise OSError(errno.ENOENT)
+
+        is_dir = self.fs.isDirectory(path)
+        size = self.fs.size(path) if not is_dir else 0
+        mtime = self.fs.lastModified(path)
+
+        return (
+            0x4000 if is_dir else 0x8000,  # st_mode
+            hash(path),  # st_ino
+            hash(self.address),  # dev
+            1,  # nlink
+            0,  # uid: root
+            0,  # gid: root
+            size,  # size
+            mtime,  # atime
+            mtime,  # mtime
+            mtime,  # ctime
+        )
+
+    # noinspection PyUnusedLocal
+    def statvfs(self, path):
+        used = self.fs.spaceUsed()
+        total = self.fs.spaceTotal()
+        free = total - used
+
+        return (
+            1,  # f_bsize
+            1,  # f_frsize
+            used,  # f_blocks
+            free,  # f_bfree
+            free,  # f_bavail
+            used,  # f_files
+            free,  # f_ffree
+            free,  # f_favail
+            0,  # f_flag
+            256,  # f_namemax
+        )
+
+    def open(self, file, mode):
+        # TODO: nomalize mode
+        return FileIO(self.fs.address, file, mode)
+
+    def __repr__(self):
+        return "<FileSystem: {!r}>".format(self.address)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/genericpath.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/genericpath.py
new file mode 100644
index 00000000..58674427
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/genericpath.py
@@ -0,0 +1,145 @@
+"""
+Path operations common to more than one OS
+Do not use directly.  The OS specific modules import the appropriate
+functions from this module themselves.
+"""
+import uos as os
+import stat
+
+__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
+           'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
+           'samestat']
+
+
+ST_MODE, ST_INO, ST_DEV, ST_NLINK, ST_UID, ST_GID, ST_SIZE, ST_ATIME, ST_MTIME, ST_CTIME = range(10)
+
+
+# Does a path exist?
+# This is false for dangling symbolic links on systems that support them.
+def exists(path):
+    """Test whether a path exists.  Returns False for broken symbolic links"""
+    try:
+        os.stat(path)
+    except OSError:
+        return False
+    return True
+
+
+# This follows symbolic links, so both islink() and isdir() can be true
+# for the same path on systems that support symlinks
+def isfile(path):
+    """Test whether a path is a regular file"""
+    try:
+        st = os.stat(path)
+    except OSError:
+        return False
+    return stat.S_ISREG(st[ST_MODE])
+
+
+# Is a path a directory?
+# This follows symbolic links, so both islink() and isdir()
+# can be true for the same path on systems that support symlinks
+def isdir(s):
+    """Return true if the pathname refers to an existing directory."""
+    try:
+        st = os.stat(s)
+    except OSError:
+        return False
+    return stat.S_ISDIR(st[ST_MODE])
+
+
+def getsize(filename):
+    """Return the size of a file, reported by os.stat()."""
+    return os.stat(filename)[ST_SIZE]
+
+
+def getmtime(filename):
+    """Return the last modification time of a file, reported by os.stat()."""
+    return os.stat(filename)[ST_MTIME]
+
+
+def getatime(filename):
+    """Return the last access time of a file, reported by os.stat()."""
+    return os.stat(filename)[ST_ATIME]
+
+
+def getctime(filename):
+    """Return the metadata change time of a file, reported by os.stat()."""
+    return os.stat(filename)[ST_CTIME]
+
+
+# Return the longest prefix of all list elements.
+def commonprefix(m):
+    "Given a list of pathnames, returns the longest common leading component"
+    if not m: return ''
+    s1 = min(m)
+    s2 = max(m)
+    for i, c in enumerate(s1):
+        if c != s2[i]:
+            return s1[:i]
+    return s1
+
+# Are two stat buffers (obtained from stat, fstat or lstat)
+# describing the same file?
+def samestat(s1, s2):
+    """Test whether two stat buffers reference the same file"""
+    return (s1[ST_INO] == s2[ST_INO] and
+            s1[ST_DEV] == s2[ST_DEV])
+
+
+# Are two filenames really pointing to the same file?
+def samefile(f1, f2):
+    """Test whether two pathnames reference the same actual file"""
+    s1 = os.stat(f1)
+    s2 = os.stat(f2)
+    return samestat(s1, s2)
+
+
+# Are two open files really referencing the same file?
+# (Not necessarily the same file descriptor!)
+def sameopenfile(fp1, fp2):
+    """Test whether two open file objects reference the same file"""
+    raise NotImplementedError
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+# Generic implementation of splitext, to be parametrized with
+# the separators
+def _splitext(p, sep, altsep, extsep):
+    """Split the extension from a pathname.
+    Extension is everything from the last dot to the end, ignoring
+    leading dots.  Returns "(root, ext)"; ext may be empty."""
+    # NOTE: This code must work for text and bytes strings.
+
+    sepIndex = p.rfind(sep)
+    if altsep:
+        altsepIndex = p.rfind(altsep)
+        sepIndex = max(sepIndex, altsepIndex)
+
+    dotIndex = p.rfind(extsep)
+    if dotIndex > sepIndex:
+        # skip all leading dots
+        filenameIndex = sepIndex + 1
+        while filenameIndex < dotIndex:
+            if p[filenameIndex:filenameIndex+1] != extsep:
+                return p[:dotIndex], p[dotIndex:]
+            filenameIndex += 1
+
+    return p, p[:0]
+
+def _check_arg_types(funcname, *args):
+    hasstr = hasbytes = False
+    for s in args:
+        if isinstance(s, str):
+            hasstr = True
+        elif isinstance(s, bytes):
+            hasbytes = True
+        else:
+            raise TypeError('%s() argument must be str or bytes, not %r' %
+                            (funcname, s.__class__.__name__)) from None
+    if hasstr and hasbytes:
+        raise TypeError("Can't mix strings and bytes in path components") from None
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/heapq.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/heapq.py
new file mode 100644
index 00000000..599c3b1c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/heapq.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from uheapq import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/imp.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/imp.py
new file mode 100644
index 00000000..a606be1a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/imp.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from uimp import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/json.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/json.py
new file mode 100644
index 00000000..107af7b8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/json.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from ujson import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/msgpack.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/msgpack.py
new file mode 100644
index 00000000..cd8c1339
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/msgpack.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from umsgpack import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/random.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/random.py
new file mode 100644
index 00000000..868735f9
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/random.py
@@ -0,0 +1,15 @@
+from urandom import *
+
+from time import ticks_us as _ticks_us
+
+seed(_ticks_us())
+del _ticks_us
+
+
+# micropython-lib #
+
+def shuffle(seq):
+    l = len(seq)
+    for i in range(l):
+        j = randrange(l)
+        seq[i], seq[j] = seq[j], seq[i]
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/re.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/re.py
new file mode 100644
index 00000000..152f91fa
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/re.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from ure import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/internal/time.py b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/time.py
new file mode 100644
index 00000000..0cc286c7
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/internal/time.py
@@ -0,0 +1,2 @@
+# noinspection PyUnresolvedReferences
+from utime import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/__future__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/__future__.py
new file mode 100644
index 00000000..45b935ed
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/__future__.py
@@ -0,0 +1,7 @@
+nested_scopes = True
+generators = True
+division = True
+absolute_import = True
+with_statement = True
+print_function = True
+unicode_literals = True
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_libc.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_libc.py
new file mode 100644
index 00000000..a930cbf7
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_libc.py
@@ -0,0 +1,34 @@
+import ffi
+import sys
+
+
+_h = None
+
+names = ('libc.so', 'libc.so.0', 'libc.so.6', 'libc.dylib')
+
+def get():
+    global _h
+    if _h:
+        return _h
+    err = None
+    for n in names:
+        try:
+            _h = ffi.open(n)
+            return _h
+        except OSError as e:
+            err = e
+    raise err
+
+
+def set_names(n):
+    global names
+    names = n
+
+# Find out bitness of the platform, even if long ints are not supported
+# TODO: All bitness differences should be removed from micropython-lib, and
+# this snippet too.
+bitness = 1
+v = sys.maxsize
+while v:
+    bitness += 1
+    v >>= 1
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_markupbase.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_markupbase.py
new file mode 100644
index 00000000..2af5f1c2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/_markupbase.py
@@ -0,0 +1,395 @@
+"""Shared support for scanning document type declarations in HTML and XHTML.
+
+This module is used as a foundation for the html.parser module.  It has no
+documented public API and should not be used directly.
+
+"""
+
+import re
+
+_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
+_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
+_commentclose = re.compile(r'--\s*>')
+_markedsectionclose = re.compile(r']\s*]\s*>')
+
+# An analysis of the MS-Word extensions is available at
+# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
+
+_msmarkedsectionclose = re.compile(r']\s*>')
+
+del re
+
+
+class ParserBase:
+    """Parser base class which provides some common support methods used
+    by the SGML/HTML and XHTML parsers."""
+
+    def __init__(self):
+        if self.__class__ is ParserBase:
+            raise RuntimeError(
+                "_markupbase.ParserBase must be subclassed")
+
+    def error(self, message):
+        raise NotImplementedError(
+            "subclasses of ParserBase must override error()")
+
+    def reset(self):
+        self.lineno = 1
+        self.offset = 0
+
+    def getpos(self):
+        """Return current line number and offset."""
+        return self.lineno, self.offset
+
+    # Internal -- update line number and offset.  This should be
+    # called for each piece of data exactly once, in order -- in other
+    # words the concatenation of all the input strings to this
+    # function should be exactly the entire input.
+    def updatepos(self, i, j):
+        if i >= j:
+            return j
+        rawdata = self.rawdata
+        nlines = rawdata.count("\n", i, j)
+        if nlines:
+            self.lineno = self.lineno + nlines
+            pos = rawdata.rindex("\n", i, j) # Should not fail
+            self.offset = j-(pos+1)
+        else:
+            self.offset = self.offset + j-i
+        return j
+
+    _decl_otherchars = ''
+
+    # Internal -- parse declaration (for use by subclasses).
+    def parse_declaration(self, i):
+        # This is some sort of declaration; in "HTML as
+        # deployed," this should only be the document type
+        # declaration ("<!DOCTYPE html...>").
+        # ISO 8879:1986, however, has more complex
+        # declaration syntax for elements in <!...>, including:
+        # --comment--
+        # [marked section]
+        # name in the following list: ENTITY, DOCTYPE, ELEMENT,
+        # ATTLIST, NOTATION, SHORTREF, USEMAP,
+        # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
+        rawdata = self.rawdata
+        j = i + 2
+        assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
+        if rawdata[j:j+1] == ">":
+            # the empty comment <!>
+            return j + 1
+        if rawdata[j:j+1] in ("-", ""):
+            # Start of comment followed by buffer boundary,
+            # or just a buffer boundary.
+            return -1
+        # A simple, practical version could look like: ((name|stringlit) S*) + '>'
+        n = len(rawdata)
+        if rawdata[j:j+2] == '--': #comment
+            # Locate --.*-- as the body of the comment
+            return self.parse_comment(i)
+        elif rawdata[j] == '[': #marked section
+            # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
+            # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
+            # Note that this is extended by Microsoft Office "Save as Web" function
+            # to include [if...] and [endif].
+            return self.parse_marked_section(i)
+        else: #all other declaration elements
+            decltype, j = self._scan_name(j, i)
+        if j < 0:
+            return j
+        if decltype == "doctype":
+            self._decl_otherchars = ''
+        while j < n:
+            c = rawdata[j]
+            if c == ">":
+                # end of declaration syntax
+                data = rawdata[i+2:j]
+                if decltype == "doctype":
+                    self.handle_decl(data)
+                else:
+                    # According to the HTML5 specs sections "8.2.4.44 Bogus
+                    # comment state" and "8.2.4.45 Markup declaration open
+                    # state", a comment token should be emitted.
+                    # Calling unknown_decl provides more flexibility though.
+                    self.unknown_decl(data)
+                return j + 1
+            if c in "\"'":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1 # incomplete
+                j = m.end()
+            elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+                name, j = self._scan_name(j, i)
+            elif c in self._decl_otherchars:
+                j = j + 1
+            elif c == "[":
+                # this could be handled in a separate doctype parser
+                if decltype == "doctype":
+                    j = self._parse_doctype_subset(j + 1, i)
+                elif decltype in {"attlist", "linktype", "link", "element"}:
+                    # must tolerate []'d groups in a content model in an element declaration
+                    # also in data attribute specifications of attlist declaration
+                    # also link type declaration subsets in linktype declarations
+                    # also link attribute specification lists in link declarations
+                    self.error("unsupported '[' char in %s declaration" % decltype)
+                else:
+                    self.error("unexpected '[' char in declaration")
+            else:
+                self.error(
+                    "unexpected %r char in declaration" % rawdata[j])
+            if j < 0:
+                return j
+        return -1 # incomplete
+
+    # Internal -- parse a marked section
+    # Override this to handle MS-word extension syntax <![if word]>content<![endif]>
+    def parse_marked_section(self, i, report=1):
+        rawdata= self.rawdata
+        assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
+        sectName, j = self._scan_name( i+3, i )
+        if j < 0:
+            return j
+        if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
+            # look for standard ]]> ending
+            match= _markedsectionclose.search(rawdata, i+3)
+        elif sectName in {"if", "else", "endif"}:
+            # look for MS Office ]> ending
+            match= _msmarkedsectionclose.search(rawdata, i+3)
+        else:
+            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.unknown_decl(rawdata[i+3: j])
+        return match.end(0)
+
+    # Internal -- parse comment, return length or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        if rawdata[i:i+4] != '<!--':
+            self.error('unexpected call to parse_comment()')
+        match = _commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.handle_comment(rawdata[i+4: j])
+        return match.end(0)
+
+    # Internal -- scan past the internal subset in a <!DOCTYPE declaration,
+    # returning the index just past any whitespace following the trailing ']'.
+    def _parse_doctype_subset(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        j = i
+        while j < n:
+            c = rawdata[j]
+            if c == "<":
+                s = rawdata[j:j+2]
+                if s == "<":
+                    # end of buffer; incomplete
+                    return -1
+                if s != "<!":
+                    self.updatepos(declstartpos, j + 1)
+                    self.error("unexpected char in internal subset (in %r)" % s)
+                if (j + 2) == n:
+                    # end of buffer; incomplete
+                    return -1
+                if (j + 4) > n:
+                    # end of buffer; incomplete
+                    return -1
+                if rawdata[j:j+4] == "<!--":
+                    j = self.parse_comment(j, report=0)
+                    if j < 0:
+                        return j
+                    continue
+                name, j = self._scan_name(j + 2, declstartpos)
+                if j == -1:
+                    return -1
+                if name not in {"attlist", "element", "entity", "notation"}:
+                    self.updatepos(declstartpos, j + 2)
+                    self.error(
+                        "unknown declaration %r in internal subset" % name)
+                # handle the individual names
+                meth = getattr(self, "_parse_doctype_" + name)
+                j = meth(j, declstartpos)
+                if j < 0:
+                    return j
+            elif c == "%":
+                # parameter entity reference
+                if (j + 1) == n:
+                    # end of buffer; incomplete
+                    return -1
+                s, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                if rawdata[j] == ";":
+                    j = j + 1
+            elif c == "]":
+                j = j + 1
+                while j < n and rawdata[j].isspace():
+                    j = j + 1
+                if j < n:
+                    if rawdata[j] == ">":
+                        return j
+                    self.updatepos(declstartpos, j)
+                    self.error("unexpected char after internal subset")
+                else:
+                    return -1
+            elif c.isspace():
+                j = j + 1
+            else:
+                self.updatepos(declstartpos, j)
+                self.error("unexpected char %r in internal subset" % c)
+        # end of buffer reached
+        return -1
+
+    # Internal -- scan past <!ELEMENT declarations
+    def _parse_doctype_element(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j == -1:
+            return -1
+        # style content model; just skip until '>'
+        rawdata = self.rawdata
+        if '>' in rawdata[j:]:
+            return rawdata.find(">", j) + 1
+        return -1
+
+    # Internal -- scan past <!ATTLIST declarations
+    def _parse_doctype_attlist(self, i, declstartpos):
+        rawdata = self.rawdata
+        name, j = self._scan_name(i, declstartpos)
+        c = rawdata[j:j+1]
+        if c == "":
+            return -1
+        if c == ">":
+            return j + 1
+        while 1:
+            # scan a series of attribute descriptions; simplified:
+            #   name type [value] [#constraint]
+            name, j = self._scan_name(j, declstartpos)
+            if j < 0:
+                return j
+            c = rawdata[j:j+1]
+            if c == "":
+                return -1
+            if c == "(":
+                # an enumerated type; look for ')'
+                if ")" in rawdata[j:]:
+                    j = rawdata.find(")", j) + 1
+                else:
+                    return -1
+                while rawdata[j:j+1].isspace():
+                    j = j + 1
+                if not rawdata[j:]:
+                    # end of buffer, incomplete
+                    return -1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+            c = rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == "#":
+                if rawdata[j:] == "#":
+                    # end of buffer
+                    return -1
+                name, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == '>':
+                # all done
+                return j + 1
+
+    # Internal -- scan past <!NOTATION declarations
+    def _parse_doctype_notation(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j < 0:
+            return j
+        rawdata = self.rawdata
+        while 1:
+            c = rawdata[j:j+1]
+            if not c:
+                # end of buffer; incomplete
+                return -1
+            if c == '>':
+                return j + 1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1
+                j = m.end()
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan past <!ENTITY declarations
+    def _parse_doctype_entity(self, i, declstartpos):
+        rawdata = self.rawdata
+        if rawdata[i:i+1] == "%":
+            j = i + 1
+            while 1:
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+                if c.isspace():
+                    j = j + 1
+                else:
+                    break
+        else:
+            j = i
+        name, j = self._scan_name(j, declstartpos)
+        if j < 0:
+            return j
+        while 1:
+            c = self.rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1    # incomplete
+            elif c == ">":
+                return j + 1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan a name token and the new position and the token, or
+    # return -1 if we've reached the end of the buffer.
+    def _scan_name(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        if i == n:
+            return None, -1
+        m = _declname_match(rawdata, i)
+        if m:
+            s = m.group()
+            name = s.strip()
+            if (i + len(s)) == n:
+                return None, -1  # end of buffer
+            return name.lower(), m.end()
+        else:
+            self.updatepos(declstartpos, i)
+            self.error("expected name token at %r"
+                       % rawdata[declstartpos:declstartpos+20])
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_decl(self, data):
+        pass
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/abc.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/abc.py
new file mode 100644
index 00000000..941be4f5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/abc.py
@@ -0,0 +1,2 @@
+def abstractmethod(f):
+    return f
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/argparse.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/argparse.py
new file mode 100644
index 00000000..7f57356d
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/argparse.py
@@ -0,0 +1,216 @@
+"""
+Minimal and functional version of CPython's argparse module.
+"""
+
+import sys
+from ucollections import namedtuple
+
+
+class _ArgError(BaseException):
+    pass
+
+
+class _Arg:
+    def __init__(self, names, dest, action, nargs, const, default, help):
+        self.names = names
+        self.dest = dest
+        self.action = action
+        self.nargs = nargs
+        self.const = const
+        self.default = default
+        self.help = help
+
+    def parse(self, optname, args):
+        # parse args for this arg
+        if self.action == "store":
+            if self.nargs is None:
+                if args:
+                    return args.pop(0)
+                else:
+                    raise _ArgError("expecting value for %s" % optname)
+            elif self.nargs == "?":
+                if args:
+                    return args.pop(0)
+                else:
+                    return self.default
+            else:
+                if self.nargs == "*":
+                    n = -1
+                elif self.nargs == "+":
+                    if not args:
+                        raise _ArgError("expecting value for %s" % optname)
+                    n = -1
+                else:
+                    n = int(self.nargs)
+                ret = []
+                stop_at_opt = True
+                while args and n != 0:
+                    if stop_at_opt and args[0].startswith("-") and args[0] != "-":
+                        if args[0] == "--":
+                            stop_at_opt = False
+                            args.pop(0)
+                        else:
+                            break
+                    else:
+                        ret.append(args.pop(0))
+                        n -= 1
+                if n > 0:
+                    raise _ArgError("expecting value for %s" % optname)
+                return ret
+        elif self.action == "store_const":
+            return self.const
+        else:
+            assert False
+
+
+def _dest_from_optnames(opt_names):
+    dest = opt_names[0]
+    for name in opt_names:
+        if name.startswith("--"):
+            dest = name
+            break
+    return dest.lstrip("-").replace("-", "_")
+
+
+class ArgumentParser:
+    def __init__(self, *, description=""):
+        self.description = description
+        self.opt = []
+        self.pos = []
+
+    def add_argument(self, *args, **kwargs):
+        action = kwargs.get("action", "store")
+        if action == "store_true":
+            action = "store_const"
+            const = True
+            default = kwargs.get("default", False)
+        elif action == "store_false":
+            action = "store_const"
+            const = False
+            default = kwargs.get("default", True)
+        else:
+            const = kwargs.get("const", None)
+            default = kwargs.get("default", None)
+        if args and args[0].startswith("-"):
+            list = self.opt
+            dest = kwargs.get("dest")
+            if dest is None:
+                dest = _dest_from_optnames(args)
+        else:
+            list = self.pos
+            dest = kwargs.get("dest")
+            if dest is None:
+                dest = args[0]
+            if not args:
+                args = [dest]
+        list.append(
+            _Arg(args, dest, action, kwargs.get("nargs", None),
+                 const, default, kwargs.get("help", "")))
+
+    def usage(self, full):
+        # print short usage
+        print("usage: %s [-h]" % sys.argv[0], end="")
+
+        def render_arg(arg):
+            if arg.action == "store":
+                if arg.nargs is None:
+                    return " %s" % arg.dest
+                if isinstance(arg.nargs, int):
+                    return " %s(x%d)" % (arg.dest, arg.nargs)
+                else:
+                    return " %s%s" % (arg.dest, arg.nargs)
+            else:
+                return ""
+        for opt in self.opt:
+            print(" [%s%s]" % (', '.join(opt.names), render_arg(opt)), end="")
+        for pos in self.pos:
+            print(render_arg(pos), end="")
+        print()
+
+        if not full:
+            return
+
+        # print full information
+        print()
+        if self.description:
+            print(self.description)
+        if self.pos:
+            print("\npositional args:")
+            for pos in self.pos:
+                print("  %-16s%s" % (pos.names[0], pos.help))
+        print("\noptional args:")
+        print("  -h, --help      show this message and exit")
+        for opt in self.opt:
+            print("  %-16s%s" % (', '.join(opt.names) + render_arg(opt), opt.help))
+
+    def parse_args(self, args=None):
+        return self._parse_args_impl(args, False)
+
+    def parse_known_args(self, args=None):
+        return self._parse_args_impl(args, True)
+
+    def _parse_args_impl(self, args, return_unknown):
+        if args is None:
+            args = sys.argv[1:]
+        else:
+            args = args[:]
+        try:
+            return self._parse_args(args, return_unknown)
+        except _ArgError as e:
+            self.usage(False)
+            print("error:", e)
+            sys.exit(2)
+
+    def _parse_args(self, args, return_unknown):
+        # add optional args with defaults
+        arg_dest = []
+        arg_vals = []
+        for opt in self.opt:
+            arg_dest.append(opt.dest)
+            arg_vals.append(opt.default)
+
+        # deal with unknown arguments, if needed
+        unknown = []
+        def consume_unknown():
+            while args and not args[0].startswith("-"):
+                unknown.append(args.pop(0))
+
+        # parse all args
+        parsed_pos = False
+        while args or not parsed_pos:
+            if args and args[0].startswith("-") and args[0] != "-" and args[0] != "--":
+                # optional arg
+                a = args.pop(0)
+                if a in ("-h", "--help"):
+                    self.usage(True)
+                    sys.exit(0)
+                found = False
+                for i, opt in enumerate(self.opt):
+                    if a in opt.names:
+                        arg_vals[i] = opt.parse(a, args)
+                        found = True
+                        break
+                if not found:
+                    if return_unknown:
+                        unknown.append(a)
+                        consume_unknown()
+                    else:
+                        raise _ArgError("unknown option %s" % a)
+            else:
+                # positional arg
+                if parsed_pos:
+                    if return_unknown:
+                        unknown = unknown + args
+                        break
+                    else:
+                        raise _ArgError("extra args: %s" % " ".join(args))
+                for pos in self.pos:
+                    arg_dest.append(pos.dest)
+                    arg_vals.append(pos.parse(pos.names[0], args))
+                parsed_pos = True
+                if return_unknown:
+                    consume_unknown()
+
+        # build and return named tuple with arg values
+        values = namedtuple("args", arg_dest)(*arg_vals)
+        return (values, unknown) if return_unknown else values
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/asyncio_slow.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/asyncio_slow.py
new file mode 100644
index 00000000..89245ce0
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/asyncio_slow.py
@@ -0,0 +1,151 @@
+import time
+import logging
+
+
+log = logging.getLogger("asyncio")
+
+
+# Workaround for not being able to subclass builtin types
+class LoopStop(Exception):
+    pass
+
+class InvalidStateError(Exception):
+    pass
+
+# Object not matching any other object
+_sentinel = []
+
+
+class EventLoop:
+
+    def __init__(self):
+        self.q = []
+
+    def call_soon(self, c, *args):
+        self.q.append((c, args))
+
+    def call_later(self, delay, c, *args):
+        def _delayed(c, args, delay):
+            yield from sleep(delay)
+            self.call_soon(c, *args)
+        Task(_delayed(c, args, delay))
+
+    def run_forever(self):
+        while self.q:
+            c = self.q.pop(0)
+            try:
+                c[0](*c[1])
+            except LoopStop:
+                return
+        # I mean, forever
+        while True:
+            time.sleep(1)
+
+    def stop(self):
+        def _cb():
+            raise LoopStop
+        self.call_soon(_cb)
+
+    def run_until_complete(self, coro):
+        t = ensure_future(coro)
+        t.add_done_callback(lambda a: self.stop())
+        self.run_forever()
+
+    def close(self):
+        pass
+
+
+_def_event_loop = EventLoop()
+
+
+class Future:
+
+    def __init__(self, loop=_def_event_loop):
+        self.loop = loop
+        self.res = _sentinel
+        self.cbs = []
+
+    def result(self):
+        if self.res is _sentinel:
+            raise InvalidStateError
+        return self.res
+
+    def add_done_callback(self, fn):
+        if self.res is _sentinel:
+            self.cbs.append(fn)
+        else:
+            self.loop.call_soon(fn, self)
+
+    def set_result(self, val):
+        self.res = val
+        for f in self.cbs:
+            f(self)
+
+
+class Task(Future):
+
+    def __init__(self, coro, loop=_def_event_loop):
+        super().__init__()
+        self.loop = loop
+        self.c = coro
+        # upstream asyncio forces task to be scheduled on instantiation
+        self.loop.call_soon(self)
+
+    def __call__(self):
+        try:
+            next(self.c)
+            self.loop.call_soon(self)
+        except StopIteration as e:
+            log.debug("Coro finished: %s", self.c)
+            self.set_result(None)
+
+
+def get_event_loop():
+    return _def_event_loop
+
+
+# Decorator
+def coroutine(f):
+    return f
+
+
+def ensure_future(coro):
+    if isinstance(coro, Future):
+        return coro
+    return Task(coro)
+
+
+class _Wait(Future):
+
+    def __init__(self, n):
+        Future.__init__(self)
+        self.n = n
+
+    def _done(self):
+        self.n -= 1
+        log.debug("Wait: remaining tasks: %d", self.n)
+        if not self.n:
+            self.set_result(None)
+
+    def __call__(self):
+        pass
+
+
+def wait(coro_list, loop=_def_event_loop):
+
+    w = _Wait(len(coro_list))
+
+    for c in coro_list:
+        t = ensure_future(c)
+        t.add_done_callback(lambda val: w._done())
+
+    return w
+
+
+def sleep(secs):
+    t = time.time()
+    log.debug("Started sleep at: %s, targetting: %s", t, t + secs)
+    while time.time() < t + secs:
+        time.sleep(0.01)
+        yield
+    log.debug("Finished sleeping %ss", secs)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/base64.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/base64.py
new file mode 100644
index 00000000..be01ba97
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/base64.py
@@ -0,0 +1,414 @@
+#! /usr/bin/env python3
+
+"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
+
+# Modified 04-Oct-1995 by Jack Jansen to use binascii module
+# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
+# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
+
+import re
+import struct
+import binascii
+
+
+__all__ = [
+    # Legacy interface exports traditional RFC 1521 Base64 encodings
+    'encode', 'decode', 'encodebytes', 'decodebytes',
+    # Generalized interface for other encodings
+    'b64encode', 'b64decode', 'b32encode', 'b32decode',
+    'b16encode', 'b16decode',
+    # Standard Base64 encoding
+    'standard_b64encode', 'standard_b64decode',
+    # Some common Base64 alternatives.  As referenced by RFC 3458, see thread
+    # starting at:
+    #
+    # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
+    'urlsafe_b64encode', 'urlsafe_b64decode',
+    ]
+
+
+bytes_types = (bytes, bytearray)  # Types acceptable as binary data
+
+def _bytes_from_decode_data(s):
+    if isinstance(s, str):
+        try:
+            return s.encode('ascii')
+#        except UnicodeEncodeError:
+        except:
+            raise ValueError('string argument should contain only ASCII characters')
+    elif isinstance(s, bytes_types):
+        return s
+    else:
+        raise TypeError("argument should be bytes or ASCII string, not %s" % s.__class__.__name__)
+
+
+
+# Base64 encoding/decoding uses binascii
+
+def b64encode(s, altchars=None):
+    """Encode a byte string using Base64.
+
+    s is the byte string to encode.  Optional altchars must be a byte
+    string of length 2 which specifies an alternative alphabet for the
+    '+' and '/' characters.  This allows an application to
+    e.g. generate url or filesystem safe Base64 strings.
+
+    The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    # Strip off the trailing newline
+    encoded = binascii.b2a_base64(s)[:-1]
+    if altchars is not None:
+        if not isinstance(altchars, bytes_types):
+            raise TypeError("expected bytes, not %s"
+                            % altchars.__class__.__name__)
+        assert len(altchars) == 2, repr(altchars)
+        return encoded.translate(bytes.maketrans(b'+/', altchars))
+    return encoded
+
+
+def b64decode(s, altchars=None, validate=False):
+    """Decode a Base64 encoded byte string.
+
+    s is the byte string to decode.  Optional altchars must be a
+    string of length 2 which specifies the alternative alphabet used
+    instead of the '+' and '/' characters.
+
+    The decoded string is returned.  A binascii.Error is raised if s is
+    incorrectly padded.
+
+    If validate is False (the default), non-base64-alphabet characters are
+    discarded prior to the padding check.  If validate is True,
+    non-base64-alphabet characters in the input result in a binascii.Error.
+    """
+    s = _bytes_from_decode_data(s)
+    if altchars is not None:
+        altchars = _bytes_from_decode_data(altchars)
+        assert len(altchars) == 2, repr(altchars)
+        s = s.translate(bytes.maketrans(altchars, b'+/'))
+    if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
+        raise binascii.Error('Non-base64 digit found')
+    return binascii.a2b_base64(s)
+
+
+def standard_b64encode(s):
+    """Encode a byte string using the standard Base64 alphabet.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    return b64encode(s)
+
+def standard_b64decode(s):
+    """Decode a byte string encoded with the standard Base64 alphabet.
+
+    s is the byte string to decode.  The decoded byte string is
+    returned.  binascii.Error is raised if the input is incorrectly
+    padded or if there are non-alphabet characters present in the
+    input.
+    """
+    return b64decode(s)
+
+
+#_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
+#_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
+
+def urlsafe_b64encode(s):
+    """Encode a byte string using a url-safe Base64 alphabet.
+
+    s is the byte string to encode.  The encoded byte string is
+    returned.  The alphabet uses '-' instead of '+' and '_' instead of
+    '/'.
+    """
+#    return b64encode(s).translate(_urlsafe_encode_translation)
+    raise NotImplementedError()
+
+def urlsafe_b64decode(s):
+    """Decode a byte string encoded with the standard Base64 alphabet.
+
+    s is the byte string to decode.  The decoded byte string is
+    returned.  binascii.Error is raised if the input is incorrectly
+    padded or if there are non-alphabet characters present in the
+    input.
+
+    The alphabet uses '-' instead of '+' and '_' instead of '/'.
+    """
+#    s = _bytes_from_decode_data(s)
+#    s = s.translate(_urlsafe_decode_translation)
+#    return b64decode(s)
+    raise NotImplementedError()
+
+
+
+# Base32 encoding/decoding must be done in Python
+_b32alphabet = {
+    0: b'A',  9: b'J', 18: b'S', 27: b'3',
+    1: b'B', 10: b'K', 19: b'T', 28: b'4',
+    2: b'C', 11: b'L', 20: b'U', 29: b'5',
+    3: b'D', 12: b'M', 21: b'V', 30: b'6',
+    4: b'E', 13: b'N', 22: b'W', 31: b'7',
+    5: b'F', 14: b'O', 23: b'X',
+    6: b'G', 15: b'P', 24: b'Y',
+    7: b'H', 16: b'Q', 25: b'Z',
+    8: b'I', 17: b'R', 26: b'2',
+    }
+
+_b32tab = [v[0] for k, v in sorted(_b32alphabet.items())]
+_b32rev = dict([(v[0], k) for k, v in _b32alphabet.items()])
+
+
+def b32encode(s):
+    """Encode a byte string using Base32.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    quanta, leftover = divmod(len(s), 5)
+    # Pad the last quantum with zero bits if necessary
+    if leftover:
+        s = s + bytes(5 - leftover)  # Don't use += !
+        quanta += 1
+    encoded = bytearray()
+    for i in range(quanta):
+        # c1 and c2 are 16 bits wide, c3 is 8 bits wide.  The intent of this
+        # code is to process the 40 bits in units of 5 bits.  So we take the 1
+        # leftover bit of c1 and tack it onto c2.  Then we take the 2 leftover
+        # bits of c2 and tack them onto c3.  The shifts and masks are intended
+        # to give us values of exactly 5 bits in width.
+        c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
+        c2 += (c1 & 1) << 16 # 17 bits wide
+        c3 += (c2 & 3) << 8  # 10 bits wide
+        encoded += bytes([_b32tab[c1 >> 11],         # bits 1 - 5
+                          _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
+                          _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
+                          _b32tab[c2 >> 12],         # bits 16 - 20 (1 - 5)
+                          _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
+                          _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
+                          _b32tab[c3 >> 5],          # bits 31 - 35 (1 - 5)
+                          _b32tab[c3 & 0x1f],        # bits 36 - 40 (1 - 5)
+                          ])
+    # Adjust for any leftover partial quanta
+    if leftover == 1:
+        encoded = encoded[:-6] + b'======'
+    elif leftover == 2:
+        encoded = encoded[:-4] + b'===='
+    elif leftover == 3:
+        encoded = encoded[:-3] + b'==='
+    elif leftover == 4:
+        encoded = encoded[:-1] + b'='
+    return bytes(encoded)
+
+
+def b32decode(s, casefold=False, map01=None):
+    """Decode a Base32 encoded byte string.
+
+    s is the byte string to decode.  Optional casefold is a flag
+    specifying whether a lowercase alphabet is acceptable as input.
+    For security purposes, the default is False.
+
+    RFC 3548 allows for optional mapping of the digit 0 (zero) to the
+    letter O (oh), and for optional mapping of the digit 1 (one) to
+    either the letter I (eye) or letter L (el).  The optional argument
+    map01 when not None, specifies which letter the digit 1 should be
+    mapped to (when map01 is not None, the digit 0 is always mapped to
+    the letter O).  For security purposes the default is None, so that
+    0 and 1 are not allowed in the input.
+
+    The decoded byte string is returned.  binascii.Error is raised if
+    the input is incorrectly padded or if there are non-alphabet
+    characters present in the input.
+    """
+    s = _bytes_from_decode_data(s)
+    quanta, leftover = divmod(len(s), 8)
+    if leftover:
+        raise binascii.Error('Incorrect padding')
+    # Handle section 2.4 zero and one mapping.  The flag map01 will be either
+    # False, or the character to map the digit 1 (one) to.  It should be
+    # either L (el) or I (eye).
+    if map01 is not None:
+        map01 = _bytes_from_decode_data(map01)
+        assert len(map01) == 1, repr(map01)
+        s = s.translate(bytes.maketrans(b'01', b'O' + map01))
+    if casefold:
+        s = s.upper()
+    # Strip off pad characters from the right.  We need to count the pad
+    # characters because this will tell us how many null bytes to remove from
+    # the end of the decoded string.
+    padchars = s.find(b'=')
+    if padchars > 0:
+        padchars = len(s) - padchars
+        s = s[:-padchars]
+    else:
+        padchars = 0
+
+    # Now decode the full quanta
+    parts = []
+    acc = 0
+    shift = 35
+    for c in s:
+        val = _b32rev.get(c)
+        if val is None:
+            raise binascii.Error('Non-base32 digit found')
+        acc += _b32rev[c] << shift
+        shift -= 5
+        if shift < 0:
+            parts.append(binascii.unhexlify(bytes('%010x' % acc, "ascii")))
+            acc = 0
+            shift = 35
+    # Process the last, partial quanta
+    last = binascii.unhexlify(bytes('%010x' % acc, "ascii"))
+    if padchars == 0:
+        last = b''                      # No characters
+    elif padchars == 1:
+        last = last[:-1]
+    elif padchars == 3:
+        last = last[:-2]
+    elif padchars == 4:
+        last = last[:-3]
+    elif padchars == 6:
+        last = last[:-4]
+    else:
+        raise binascii.Error('Incorrect padding')
+    parts.append(last)
+    return b''.join(parts)
+
+
+
+# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
+# lowercase.  The RFC also recommends against accepting input case
+# insensitively.
+def b16encode(s):
+    """Encode a byte string using Base16.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    return binascii.hexlify(s).upper()
+
+
+def b16decode(s, casefold=False):
+    """Decode a Base16 encoded byte string.
+
+    s is the byte string to decode.  Optional casefold is a flag
+    specifying whether a lowercase alphabet is acceptable as input.
+    For security purposes, the default is False.
+
+    The decoded byte string is returned.  binascii.Error is raised if
+    s were incorrectly padded or if there are non-alphabet characters
+    present in the string.
+    """
+    s = _bytes_from_decode_data(s)
+    if casefold:
+        s = s.upper()
+    if re.search(b'[^0-9A-F]', s):
+        raise binascii.Error('Non-base16 digit found')
+    return binascii.unhexlify(s)
+
+
+
+# Legacy interface.  This code could be cleaned up since I don't believe
+# binascii has any line length limitations.  It just doesn't seem worth it
+# though.  The files should be opened in binary mode.
+
+MAXLINESIZE = 76 # Excluding the CRLF
+MAXBINSIZE = (MAXLINESIZE//4)*3
+
+def encode(input, output):
+    """Encode a file; input and output are binary files."""
+    while True:
+        s = input.read(MAXBINSIZE)
+        if not s:
+            break
+        while len(s) < MAXBINSIZE:
+            ns = input.read(MAXBINSIZE-len(s))
+            if not ns:
+                break
+            s += ns
+        line = binascii.b2a_base64(s)
+        output.write(line)
+
+
+def decode(input, output):
+    """Decode a file; input and output are binary files."""
+    while True:
+        line = input.readline()
+        if not line:
+            break
+        s = binascii.a2b_base64(line)
+        output.write(s)
+
+
+def encodebytes(s):
+    """Encode a bytestring into a bytestring containing multiple lines
+    of base-64 data."""
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    pieces = []
+    for i in range(0, len(s), MAXBINSIZE):
+        chunk = s[i : i + MAXBINSIZE]
+        pieces.append(binascii.b2a_base64(chunk))
+    return b"".join(pieces)
+
+def encodestring(s):
+    """Legacy alias of encodebytes()."""
+    import warnings
+    warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
+                  DeprecationWarning, 2)
+    return encodebytes(s)
+
+
+def decodebytes(s):
+    """Decode a bytestring of base-64 data into a bytestring."""
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    return binascii.a2b_base64(s)
+
+def decodestring(s):
+    """Legacy alias of decodebytes()."""
+    import warnings
+    warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
+                  DeprecationWarning, 2)
+    return decodebytes(s)
+
+
+# Usable as a script...
+def main():
+    """Small main program"""
+    import sys, getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'deut')
+    except getopt.error as msg:
+        sys.stdout = sys.stderr
+        print(msg)
+        print("""usage: %s [-d|-e|-u|-t] [file|-]
+        -d, -u: decode
+        -e: encode (default)
+        -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
+        sys.exit(2)
+    func = encode
+    for o, a in opts:
+        if o == '-e': func = encode
+        if o == '-d': func = decode
+        if o == '-u': func = decode
+        if o == '-t': test(); return
+    if args and args[0] != '-':
+        with open(args[0], 'rb') as f:
+            func(f, sys.stdout.buffer)
+    else:
+        func(sys.stdin.buffer, sys.stdout.buffer)
+
+
+def test():
+    s0 = b"Aladdin:open sesame"
+    print(repr(s0))
+    s1 = encodebytes(s0)
+    print(repr(s1))
+    s2 = decodebytes(s1)
+    print(repr(s2))
+    assert s0 == s2
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/binascii.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/binascii.py
new file mode 100644
index 00000000..dd6744c2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/binascii.py
@@ -0,0 +1,113 @@
+from ubinascii import *
+
+if not "unhexlify" in globals():
+    def unhexlify(data):
+        if len(data) % 2 != 0:
+            raise ValueError("Odd-length string")
+
+        return bytes([ int(data[i:i+2], 16) for i in range(0, len(data), 2) ])
+
+b2a_hex = hexlify
+a2b_hex = unhexlify
+
+# ____________________________________________________________
+
+PAD = '='
+
+table_a2b_base64 = [
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,62, -1,-1,-1,63,
+    52,53,54,55, 56,57,58,59, 60,61,-1,-1, -1,-1,-1,-1, # Note PAD->-1 here
+    -1, 0, 1, 2,  3, 4, 5, 6,  7, 8, 9,10, 11,12,13,14,
+    15,16,17,18, 19,20,21,22, 23,24,25,-1, -1,-1,-1,-1,
+    -1,26,27,28, 29,30,31,32, 33,34,35,36, 37,38,39,40,
+    41,42,43,44, 45,46,47,48, 49,50,51,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
+    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,    
+]
+def _transform(n):
+    if n == -1:
+        return '\xff'
+    else:
+        return chr(n)
+table_a2b_base64 = ''.join(map(_transform, table_a2b_base64))
+assert len(table_a2b_base64) == 256
+
+def a2b_base64(ascii):
+    "Decode a line of base64 data."
+
+    res = []
+    quad_pos = 0
+    leftchar = 0
+    leftbits = 0
+    last_char_was_a_pad = False
+
+    for c in ascii:
+        c = chr(c)
+        if c == PAD:
+            if quad_pos > 2 or (quad_pos == 2 and last_char_was_a_pad):
+                break      # stop on 'xxx=' or on 'xx=='
+            last_char_was_a_pad = True
+        else:
+            n = ord(table_a2b_base64[ord(c)])
+            if n == 0xff:
+                continue    # ignore strange characters
+            #
+            # Shift it in on the low end, and see if there's
+            # a byte ready for output.
+            quad_pos = (quad_pos + 1) & 3
+            leftchar = (leftchar << 6) | n
+            leftbits += 6
+            #
+            if leftbits >= 8:
+                leftbits -= 8
+                res.append((leftchar >> leftbits).to_bytes(1, 'big'))
+                leftchar &= ((1 << leftbits) - 1)
+            #
+            last_char_was_a_pad = False
+    else:
+        if leftbits != 0:
+            raise Exception("Incorrect padding")
+
+    return b''.join(res)
+
+# ____________________________________________________________
+
+table_b2a_base64 = (
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
+
+def b2a_base64(bin):
+    "Base64-code line of data."
+
+    newlength = (len(bin) + 2) // 3
+    newlength = newlength * 4 + 1
+    res = []
+
+    leftchar = 0
+    leftbits = 0
+    for c in bin:
+        # Shift into our buffer, and output any 6bits ready
+        leftchar = (leftchar << 8) | c
+        leftbits += 8
+        res.append(table_b2a_base64[(leftchar >> (leftbits-6)) & 0x3f])
+        leftbits -= 6
+        if leftbits >= 6:
+            res.append(table_b2a_base64[(leftchar >> (leftbits-6)) & 0x3f])
+            leftbits -= 6
+    #
+    if leftbits == 2:
+        res.append(table_b2a_base64[(leftchar & 3) << 4])
+        res.append(PAD)
+        res.append(PAD)
+    elif leftbits == 4:
+        res.append(table_b2a_base64[(leftchar & 0xf) << 2])
+        res.append(PAD)
+    res.append('\n')
+    return ''.join(res).encode('ascii')
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/binhex.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/binhex.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/bisect.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/bisect.py
new file mode 100644
index 00000000..4a4d0525
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/bisect.py
@@ -0,0 +1,92 @@
+"""Bisection algorithms."""
+
+def insort_right(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the right of the rightmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if lo < 0:
+        raise ValueError('lo must be non-negative')
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    a.insert(lo, x)
+
+insort = insort_right   # backward compatibility
+
+def bisect_right(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e <= x, and all e in
+    a[i:] have e > x.  So if x already appears in the list, a.insert(x) will
+    insert just after the rightmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if lo < 0:
+        raise ValueError('lo must be non-negative')
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if x < a[mid]: hi = mid
+        else: lo = mid+1
+    return lo
+
+bisect = bisect_right   # backward compatibility
+
+def insort_left(a, x, lo=0, hi=None):
+    """Insert item x in list a, and keep it sorted assuming a is sorted.
+
+    If x is already in a, insert it to the left of the leftmost x.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if lo < 0:
+        raise ValueError('lo must be non-negative')
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    a.insert(lo, x)
+
+
+def bisect_left(a, x, lo=0, hi=None):
+    """Return the index where to insert item x in list a, assuming a is sorted.
+
+    The return value i is such that all e in a[:i] have e < x, and all e in
+    a[i:] have e >= x.  So if x already appears in the list, a.insert(x) will
+    insert just before the leftmost x already there.
+
+    Optional args lo (default 0) and hi (default len(a)) bound the
+    slice of a to be searched.
+    """
+
+    if lo < 0:
+        raise ValueError('lo must be non-negative')
+    if hi is None:
+        hi = len(a)
+    while lo < hi:
+        mid = (lo+hi)//2
+        if a[mid] < x: lo = mid+1
+        else: hi = mid
+    return lo
+
+# Overwrite above definitions with a fast C implementation
+try:
+    from _bisect import *
+except ImportError:
+    pass
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/calendar.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/calendar.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cgi.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cgi.py
new file mode 100644
index 00000000..dbe44cc8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cgi.py
@@ -0,0 +1,1046 @@
+#! /usr/local/bin/python
+
+# NOTE: the above "/usr/local/bin/python" is NOT a mistake.  It is
+# intentionally NOT "/usr/bin/env python".  On many systems
+# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
+# scripts, and /usr/local/bin is the default directory where Python is
+# installed, so /usr/bin/env would be unable to find python.  Granted,
+# binary installations by Linux vendors often install Python in
+# /usr/bin.  So let those vendors patch cgi.py to match their choice
+# of installation.
+
+"""Support module for CGI (Common Gateway Interface) scripts.
+
+This module defines a number of utilities for use by CGI scripts
+written in Python.
+"""
+
+# History
+# -------
+#
+# Michael McLay started this module.  Steve Majewski changed the
+# interface to SvFormContentDict and FormContentDict.  The multipart
+# parsing was inspired by code submitted by Andreas Paepcke.  Guido van
+# Rossum rewrote, reformatted and documented the module and is currently
+# responsible for its maintenance.
+#
+
+__version__ = "2.6"
+
+
+# Imports
+# =======
+
+from io import StringIO, BytesIO, TextIOWrapper
+import sys
+import os
+import urllib.parse
+from email.parser import FeedParser
+from warnings import warn
+import html
+import locale
+import tempfile
+
+__all__ = ["MiniFieldStorage", "FieldStorage",
+           "parse", "parse_qs", "parse_qsl", "parse_multipart",
+           "parse_header", "print_exception", "print_environ",
+           "print_form", "print_directory", "print_arguments",
+           "print_environ_usage", "escape"]
+
+# Logging support
+# ===============
+
+logfile = ""            # Filename to log to, if not empty
+logfp = None            # File object to log to, if not None
+
+def initlog(*allargs):
+    """Write a log message, if there is a log file.
+
+    Even though this function is called initlog(), you should always
+    use log(); log is a variable that is set either to initlog
+    (initially), to dolog (once the log file has been opened), or to
+    nolog (when logging is disabled).
+
+    The first argument is a format string; the remaining arguments (if
+    any) are arguments to the % operator, so e.g.
+        log("%s: %s", "a", "b")
+    will write "a: b" to the log file, followed by a newline.
+
+    If the global logfp is not None, it should be a file object to
+    which log data is written.
+
+    If the global logfp is None, the global logfile may be a string
+    giving a filename to open, in append mode.  This file should be
+    world writable!!!  If the file can't be opened, logging is
+    silently disabled (since there is no safe place where we could
+    send an error message).
+
+    """
+    global log, logfile, logfp
+    if logfile and not logfp:
+        try:
+            logfp = open(logfile, "a")
+        except IOError:
+            pass
+    if not logfp:
+        log = nolog
+    else:
+        log = dolog
+    log(*allargs)
+
+def dolog(fmt, *args):
+    """Write a log message to the log file.  See initlog() for docs."""
+    logfp.write(fmt%args + "\n")
+
+def nolog(*allargs):
+    """Dummy function, assigned to log when logging is disabled."""
+    pass
+
+def closelog():
+    """Close the log file."""
+    global log, logfile, logfp
+    logfile = ''
+    if logfp:
+        logfp.close()
+        logfp = None
+    log = initlog
+
+log = initlog           # The current logging function
+
+
+# Parsing functions
+# =================
+
+# Maximum input we will accept when REQUEST_METHOD is POST
+# 0 ==> unlimited input
+maxlen = 0
+
+def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
+    """Parse a query in the environment or from a file (default stdin)
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin.buffer
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            percent-encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+    """
+    if fp is None:
+        fp = sys.stdin
+
+    # field keys and values (except for files) are returned as strings
+    # an encoding is required to decode the bytes read from self.fp
+    if hasattr(fp,'encoding'):
+        encoding = fp.encoding
+    else:
+        encoding = 'latin-1'
+
+    # fp.read() must return bytes
+    if isinstance(fp, TextIOWrapper):
+        fp = fp.buffer
+
+    if not 'REQUEST_METHOD' in environ:
+        environ['REQUEST_METHOD'] = 'GET'       # For testing stand-alone
+    if environ['REQUEST_METHOD'] == 'POST':
+        ctype, pdict = parse_header(environ['CONTENT_TYPE'])
+        if ctype == 'multipart/form-data':
+            return parse_multipart(fp, pdict)
+        elif ctype == 'application/x-www-form-urlencoded':
+            clength = int(environ['CONTENT_LENGTH'])
+            if maxlen and clength > maxlen:
+                raise ValueError('Maximum content length exceeded')
+            qs = fp.read(clength).decode(encoding)
+        else:
+            qs = ''                     # Unknown content-type
+        if 'QUERY_STRING' in environ:
+            if qs: qs = qs + '&'
+            qs = qs + environ['QUERY_STRING']
+        elif sys.argv[1:]:
+            if qs: qs = qs + '&'
+            qs = qs + sys.argv[1]
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    elif 'QUERY_STRING' in environ:
+        qs = environ['QUERY_STRING']
+    else:
+        if sys.argv[1:]:
+            qs = sys.argv[1]
+        else:
+            qs = ""
+        environ['QUERY_STRING'] = qs    # XXX Shouldn't, really
+    return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
+                                 encoding=encoding)
+
+
+# parse query string function called from urlparse,
+# this is done in order to maintain backward compatiblity.
+
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument."""
+    warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
+         DeprecationWarning, 2)
+    return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
+
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+    """Parse a query given as a string argument."""
+    warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
+         DeprecationWarning, 2)
+    return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
+
+def parse_multipart(fp, pdict):
+    """Parse multipart input.
+
+    Arguments:
+    fp   : input file
+    pdict: dictionary containing other parameters of content-type header
+
+    Returns a dictionary just like parse_qs(): keys are the field names, each
+    value is a list of values for that field.  This is easy to use but not
+    much good if you are expecting megabytes to be uploaded -- in that case,
+    use the FieldStorage class instead which is much more flexible.  Note
+    that content-type is the raw, unparsed contents of the content-type
+    header.
+
+    XXX This does not parse nested multipart parts -- use FieldStorage for
+    that.
+
+    XXX This should really be subsumed by FieldStorage altogether -- no
+    point in having two implementations of the same parsing algorithm.
+    Also, FieldStorage protects itself better against certain DoS attacks
+    by limiting the size of the data read in one chunk.  The API here
+    does not support that kind of protection.  This also affects parse()
+    since it can call parse_multipart().
+
+    """
+    import http.client
+
+    boundary = b""
+    if 'boundary' in pdict:
+        boundary = pdict['boundary']
+    if not valid_boundary(boundary):
+        raise ValueError('Invalid boundary in multipart form: %r'
+                            % (boundary,))
+
+    nextpart = b"--" + boundary
+    lastpart = b"--" + boundary + b"--"
+    partdict = {}
+    terminator = b""
+
+    while terminator != lastpart:
+        bytes = -1
+        data = None
+        if terminator:
+            # At start of next part.  Read headers first.
+            headers = http.client.parse_headers(fp)
+            clength = headers.get('content-length')
+            if clength:
+                try:
+                    bytes = int(clength)
+                except ValueError:
+                    pass
+            if bytes > 0:
+                if maxlen and bytes > maxlen:
+                    raise ValueError('Maximum content length exceeded')
+                data = fp.read(bytes)
+            else:
+                data = b""
+        # Read lines until end of part.
+        lines = []
+        while 1:
+            line = fp.readline()
+            if not line:
+                terminator = lastpart # End outer loop
+                break
+            if line.startswith(b"--"):
+                terminator = line.rstrip()
+                if terminator in (nextpart, lastpart):
+                    break
+            lines.append(line)
+        # Done with part.
+        if data is None:
+            continue
+        if bytes < 0:
+            if lines:
+                # Strip final line terminator
+                line = lines[-1]
+                if line[-2:] == b"\r\n":
+                    line = line[:-2]
+                elif line[-1:] == b"\n":
+                    line = line[:-1]
+                lines[-1] = line
+                data = b"".join(lines)
+        line = headers['content-disposition']
+        if not line:
+            continue
+        key, params = parse_header(line)
+        if key != 'form-data':
+            continue
+        if 'name' in params:
+            name = params['name']
+        else:
+            continue
+        if name in partdict:
+            partdict[name].append(data)
+        else:
+            partdict[name] = [data]
+
+    return partdict
+
+
+def _parseparam(s):
+    while s[:1] == ';':
+        s = s[1:]
+        end = s.find(';')
+        while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+            end = s.find(';', end + 1)
+        if end < 0:
+            end = len(s)
+        f = s[:end]
+        yield f.strip()
+        s = s[end:]
+
+def parse_header(line):
+    """Parse a Content-type like header.
+
+    Return the main content-type and a dictionary of options.
+
+    """
+    parts = _parseparam(';' + line)
+    key = parts.__next__()
+    pdict = {}
+    for p in parts:
+        i = p.find('=')
+        if i >= 0:
+            name = p[:i].strip().lower()
+            value = p[i+1:].strip()
+            if len(value) >= 2 and value[0] == value[-1] == '"':
+                value = value[1:-1]
+                value = value.replace('\\\\', '\\').replace('\\"', '"')
+            pdict[name] = value
+    return key, pdict
+
+
+# Classes for field storage
+# =========================
+
+class MiniFieldStorage:
+
+    """Like FieldStorage, for use when no file uploads are possible."""
+
+    # Dummy attributes
+    filename = None
+    list = None
+    type = None
+    file = None
+    type_options = {}
+    disposition = None
+    disposition_options = {}
+    headers = {}
+
+    def __init__(self, name, value):
+        """Constructor from field name and value."""
+        self.name = name
+        self.value = value
+        # self.file = StringIO(value)
+
+    def __repr__(self):
+        """Return printable representation."""
+        return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
+
+
+class FieldStorage:
+
+    """Store a sequence of fields, reading multipart/form-data.
+
+    This class provides naming, typing, files stored on disk, and
+    more.  At the top level, it is accessible like a dictionary, whose
+    keys are the field names.  (Note: None can occur as a field name.)
+    The items are either a Python list (if there's multiple values) or
+    another FieldStorage or MiniFieldStorage object.  If it's a single
+    object, it has the following attributes:
+
+    name: the field name, if specified; otherwise None
+
+    filename: the filename, if specified; otherwise None; this is the
+        client side filename, *not* the file name on which it is
+        stored (that's a temporary file you don't deal with)
+
+    value: the value as a *string*; for file uploads, this
+        transparently reads the file every time you request the value
+        and returns *bytes*
+
+    file: the file(-like) object from which you can read the data *as
+        bytes* ; None if the data is stored a simple string
+
+    type: the content-type, or None if not specified
+
+    type_options: dictionary of options specified on the content-type
+        line
+
+    disposition: content-disposition, or None if not specified
+
+    disposition_options: dictionary of corresponding options
+
+    headers: a dictionary(-like) object (sometimes email.message.Message or a
+        subclass thereof) containing *all* headers
+
+    The class is subclassable, mostly for the purpose of overriding
+    the make_file() method, which is called internally to come up with
+    a file open for reading and writing.  This makes it possible to
+    override the default choice of storing all files in a temporary
+    directory and unlinking them as soon as they have been opened.
+
+    """
+    def __init__(self, fp=None, headers=None, outerboundary=b'',
+                 environ=os.environ, keep_blank_values=0, strict_parsing=0,
+                 limit=None, encoding='utf-8', errors='replace'):
+        """Constructor.  Read multipart/* until last part.
+
+        Arguments, all optional:
+
+        fp              : file pointer; default: sys.stdin.buffer
+            (not used when the request method is GET)
+            Can be :
+            1. a TextIOWrapper object
+            2. an object whose read() and readline() methods return bytes
+
+        headers         : header dictionary-like object; default:
+            taken from environ as per CGI spec
+
+        outerboundary   : terminating multipart boundary
+            (for internal use only)
+
+        environ         : environment dictionary; default: os.environ
+
+        keep_blank_values: flag indicating whether blank values in
+            percent-encoded forms should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        limit : used internally to read parts of multipart/form-data forms,
+            to exit from the reading loop when reached. It is the difference
+            between the form content-length and the number of bytes already
+            read
+
+        encoding, errors : the encoding and error handler used to decode the
+            binary stream to strings. Must be the same as the charset defined
+            for the page sending the form (content-type : meta http-equiv or
+            header)
+
+        """
+        method = 'GET'
+        self.keep_blank_values = keep_blank_values
+        self.strict_parsing = strict_parsing
+        if 'REQUEST_METHOD' in environ:
+            method = environ['REQUEST_METHOD'].upper()
+        self.qs_on_post = None
+        if method == 'GET' or method == 'HEAD':
+            if 'QUERY_STRING' in environ:
+                qs = environ['QUERY_STRING']
+            elif sys.argv[1:]:
+                qs = sys.argv[1]
+            else:
+                qs = ""
+            qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
+            fp = BytesIO(qs)
+            if headers is None:
+                headers = {'content-type':
+                           "application/x-www-form-urlencoded"}
+        if headers is None:
+            headers = {}
+            if method == 'POST':
+                # Set default content-type for POST to what's traditional
+                headers['content-type'] = "application/x-www-form-urlencoded"
+            if 'CONTENT_TYPE' in environ:
+                headers['content-type'] = environ['CONTENT_TYPE']
+            if 'QUERY_STRING' in environ:
+                self.qs_on_post = environ['QUERY_STRING']
+            if 'CONTENT_LENGTH' in environ:
+                headers['content-length'] = environ['CONTENT_LENGTH']
+        if fp is None:
+            self.fp = sys.stdin.buffer
+        # self.fp.read() must return bytes
+        elif isinstance(fp, TextIOWrapper):
+            self.fp = fp.buffer
+        else:
+            self.fp = fp
+
+        self.encoding = encoding
+        self.errors = errors
+
+        self.headers = headers
+        if not isinstance(outerboundary, bytes):
+            raise TypeError('outerboundary must be bytes, not %s'
+                            % type(outerboundary).__name__)
+        self.outerboundary = outerboundary
+
+        self.bytes_read = 0
+        self.limit = limit
+
+        # Process content-disposition header
+        cdisp, pdict = "", {}
+        if 'content-disposition' in self.headers:
+            cdisp, pdict = parse_header(self.headers['content-disposition'])
+        self.disposition = cdisp
+        self.disposition_options = pdict
+        self.name = None
+        if 'name' in pdict:
+            self.name = pdict['name']
+        self.filename = None
+        if 'filename' in pdict:
+            self.filename = pdict['filename']
+        self._binary_file = self.filename is not None
+
+        # Process content-type header
+        #
+        # Honor any existing content-type header.  But if there is no
+        # content-type header, use some sensible defaults.  Assume
+        # outerboundary is "" at the outer level, but something non-false
+        # inside a multi-part.  The default for an inner part is text/plain,
+        # but for an outer part it should be urlencoded.  This should catch
+        # bogus clients which erroneously forget to include a content-type
+        # header.
+        #
+        # See below for what we do if there does exist a content-type header,
+        # but it happens to be something we don't understand.
+        if 'content-type' in self.headers:
+            ctype, pdict = parse_header(self.headers['content-type'])
+        elif self.outerboundary or method != 'POST':
+            ctype, pdict = "text/plain", {}
+        else:
+            ctype, pdict = 'application/x-www-form-urlencoded', {}
+        self.type = ctype
+        self.type_options = pdict
+        if 'boundary' in pdict:
+            self.innerboundary = pdict['boundary'].encode(self.encoding)
+        else:
+            self.innerboundary = b""
+
+        clen = -1
+        if 'content-length' in self.headers:
+            try:
+                clen = int(self.headers['content-length'])
+            except ValueError:
+                pass
+            if maxlen and clen > maxlen:
+                raise ValueError('Maximum content length exceeded')
+        self.length = clen
+        if self.limit is None and clen:
+            self.limit = clen
+
+        self.list = self.file = None
+        self.done = 0
+        if ctype == 'application/x-www-form-urlencoded':
+            self.read_urlencoded()
+        elif ctype[:10] == 'multipart/':
+            self.read_multi(environ, keep_blank_values, strict_parsing)
+        else:
+            self.read_single()
+
+    def __repr__(self):
+        """Return a printable representation."""
+        return "FieldStorage(%r, %r, %r)" % (
+                self.name, self.filename, self.value)
+
+    def __iter__(self):
+        return iter(self.keys())
+
+    def __getattr__(self, name):
+        if name != 'value':
+            raise AttributeError(name)
+        if self.file:
+            self.file.seek(0)
+            value = self.file.read()
+            self.file.seek(0)
+        elif self.list is not None:
+            value = self.list
+        else:
+            value = None
+        return value
+
+    def __getitem__(self, key):
+        """Dictionary style indexing."""
+        if self.list is None:
+            raise TypeError("not indexable")
+        found = []
+        for item in self.list:
+            if item.name == key: found.append(item)
+        if not found:
+            raise KeyError(key)
+        if len(found) == 1:
+            return found[0]
+        else:
+            return found
+
+    def getvalue(self, key, default=None):
+        """Dictionary style get() method, including 'value' lookup."""
+        if key in self:
+            value = self[key]
+            if isinstance(value, list):
+                return [x.value for x in value]
+            else:
+                return value.value
+        else:
+            return default
+
+    def getfirst(self, key, default=None):
+        """ Return the first value received."""
+        if key in self:
+            value = self[key]
+            if isinstance(value, list):
+                return value[0].value
+            else:
+                return value.value
+        else:
+            return default
+
+    def getlist(self, key):
+        """ Return list of received values."""
+        if key in self:
+            value = self[key]
+            if isinstance(value, list):
+                return [x.value for x in value]
+            else:
+                return [value.value]
+        else:
+            return []
+
+    def keys(self):
+        """Dictionary style keys() method."""
+        if self.list is None:
+            raise TypeError("not indexable")
+        return list(set(item.name for item in self.list))
+
+    def __contains__(self, key):
+        """Dictionary style __contains__ method."""
+        if self.list is None:
+            raise TypeError("not indexable")
+        return any(item.name == key for item in self.list)
+
+    def __len__(self):
+        """Dictionary style len(x) support."""
+        return len(self.keys())
+
+    def __nonzero__(self):
+        return bool(self.list)
+
+    def read_urlencoded(self):
+        """Internal: read data in query string format."""
+        qs = self.fp.read(self.length)
+        if not isinstance(qs, bytes):
+            raise ValueError("%s should return bytes, got %s" \
+                             % (self.fp, type(qs).__name__))
+        qs = qs.decode(self.encoding, self.errors)
+        if self.qs_on_post:
+            qs += '&' + self.qs_on_post
+        self.list = []
+        query = urllib.parse.parse_qsl(
+            qs, self.keep_blank_values, self.strict_parsing,
+            encoding=self.encoding, errors=self.errors)
+        for key, value in query:
+            self.list.append(MiniFieldStorage(key, value))
+        self.skip_lines()
+
+    FieldStorageClass = None
+
+    def read_multi(self, environ, keep_blank_values, strict_parsing):
+        """Internal: read a part that is itself multipart."""
+        ib = self.innerboundary
+        if not valid_boundary(ib):
+            raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
+        self.list = []
+        if self.qs_on_post:
+            query = urllib.parse.parse_qsl(
+                self.qs_on_post, self.keep_blank_values, self.strict_parsing,
+                encoding=self.encoding, errors=self.errors)
+            for key, value in query:
+                self.list.append(MiniFieldStorage(key, value))
+            FieldStorageClass = None
+
+        klass = self.FieldStorageClass or self.__class__
+        first_line = self.fp.readline() # bytes
+        if not isinstance(first_line, bytes):
+            raise ValueError("%s should return bytes, got %s" \
+                             % (self.fp, type(first_line).__name__))
+        self.bytes_read += len(first_line)
+        # first line holds boundary ; ignore it, or check that
+        # b"--" + ib == first_line.strip() ?
+        while True:
+            parser = FeedParser()
+            hdr_text = b""
+            while True:
+                data = self.fp.readline()
+                hdr_text += data
+                if not data.strip():
+                    break
+            if not hdr_text:
+                break
+            # parser takes strings, not bytes
+            self.bytes_read += len(hdr_text)
+            parser.feed(hdr_text.decode(self.encoding, self.errors))
+            headers = parser.close()
+            part = klass(self.fp, headers, ib, environ, keep_blank_values,
+                         strict_parsing,self.limit-self.bytes_read,
+                         self.encoding, self.errors)
+            self.bytes_read += part.bytes_read
+            self.list.append(part)
+            if part.done or self.bytes_read >= self.length > 0:
+                break
+        self.skip_lines()
+
+    def read_single(self):
+        """Internal: read an atomic part."""
+        if self.length >= 0:
+            self.read_binary()
+            self.skip_lines()
+        else:
+            self.read_lines()
+        self.file.seek(0)
+
+    bufsize = 8*1024            # I/O buffering size for copy to file
+
+    def read_binary(self):
+        """Internal: read binary data."""
+        self.file = self.make_file()
+        todo = self.length
+        if todo >= 0:
+            while todo > 0:
+                data = self.fp.read(min(todo, self.bufsize)) # bytes
+                if not isinstance(data, bytes):
+                    raise ValueError("%s should return bytes, got %s"
+                                     % (self.fp, type(data).__name__))
+                self.bytes_read += len(data)
+                if not data:
+                    self.done = -1
+                    break
+                self.file.write(data)
+                todo = todo - len(data)
+
+    def read_lines(self):
+        """Internal: read lines until EOF or outerboundary."""
+        if self._binary_file:
+            self.file = self.__file = BytesIO() # store data as bytes for files
+        else:
+            self.file = self.__file = StringIO() # as strings for other fields
+        if self.outerboundary:
+            self.read_lines_to_outerboundary()
+        else:
+            self.read_lines_to_eof()
+
+    def __write(self, line):
+        """line is always bytes, not string"""
+        if self.__file is not None:
+            if self.__file.tell() + len(line) > 1000:
+                self.file = self.make_file()
+                data = self.__file.getvalue()
+                self.file.write(data)
+                self.__file = None
+        if self._binary_file:
+            # keep bytes
+            self.file.write(line)
+        else:
+            # decode to string
+            self.file.write(line.decode(self.encoding, self.errors))
+
+    def read_lines_to_eof(self):
+        """Internal: read lines until EOF."""
+        while 1:
+            line = self.fp.readline(1<<16) # bytes
+            self.bytes_read += len(line)
+            if not line:
+                self.done = -1
+                break
+            self.__write(line)
+
+    def read_lines_to_outerboundary(self):
+        """Internal: read lines until outerboundary.
+        Data is read as bytes: boundaries and line ends must be converted
+        to bytes for comparisons.
+        """
+        next_boundary = b"--" + self.outerboundary
+        last_boundary = next_boundary + b"--"
+        delim = b""
+        last_line_lfend = True
+        _read = 0
+        while 1:
+            if _read >= self.limit:
+                break
+            line = self.fp.readline(1<<16) # bytes
+            self.bytes_read += len(line)
+            _read += len(line)
+            if not line:
+                self.done = -1
+                break
+            if delim == b"\r":
+                line = delim + line
+                delim = b""
+            if line.startswith(b"--") and last_line_lfend:
+                strippedline = line.rstrip()
+                if strippedline == next_boundary:
+                    break
+                if strippedline == last_boundary:
+                    self.done = 1
+                    break
+            odelim = delim
+            if line.endswith(b"\r\n"):
+                delim = b"\r\n"
+                line = line[:-2]
+                last_line_lfend = True
+            elif line.endswith(b"\n"):
+                delim = b"\n"
+                line = line[:-1]
+                last_line_lfend = True
+            elif line.endswith(b"\r"):
+                # We may interrupt \r\n sequences if they span the 2**16
+                # byte boundary
+                delim = b"\r"
+                line = line[:-1]
+                last_line_lfend = False
+            else:
+                delim = b""
+                last_line_lfend = False
+            self.__write(odelim + line)
+
+    def skip_lines(self):
+        """Internal: skip lines until outer boundary if defined."""
+        if not self.outerboundary or self.done:
+            return
+        next_boundary = b"--" + self.outerboundary
+        last_boundary = next_boundary + b"--"
+        last_line_lfend = True
+        while True:
+            line = self.fp.readline(1<<16)
+            self.bytes_read += len(line)
+            if not line:
+                self.done = -1
+                break
+            if line.endswith(b"--") and last_line_lfend:
+                strippedline = line.strip()
+                if strippedline == next_boundary:
+                    break
+                if strippedline == last_boundary:
+                    self.done = 1
+                    break
+            last_line_lfend = line.endswith(b'\n')
+
+    def make_file(self):
+        """Overridable: return a readable & writable file.
+
+        The file will be used as follows:
+        - data is written to it
+        - seek(0)
+        - data is read from it
+
+        The file is opened in binary mode for files, in text mode
+        for other fields
+
+        This version opens a temporary file for reading and writing,
+        and immediately deletes (unlinks) it.  The trick (on Unix!) is
+        that the file can still be used, but it can't be opened by
+        another process, and it will automatically be deleted when it
+        is closed or when the current process terminates.
+
+        If you want a more permanent file, you derive a class which
+        overrides this method.  If you want a visible temporary file
+        that is nevertheless automatically deleted when the script
+        terminates, try defining a __del__ method in a derived class
+        which unlinks the temporary files you have created.
+
+        """
+        if self._binary_file:
+            return tempfile.TemporaryFile("wb+")
+        else:
+            return tempfile.TemporaryFile("w+",
+                encoding=self.encoding, newline = '\n')
+
+
+# Test/debug code
+# ===============
+
+def test(environ=os.environ):
+    """Robust test CGI script, usable as main program.
+
+    Write minimal HTTP headers and dump all information provided to
+    the script in HTML form.
+
+    """
+    print("Content-type: text/html")
+    print()
+    #sys.stderr = sys.stdout
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+        print_environ_usage()
+        def f():
+            exec("testing print_exception() -- <I>italics?</I>")
+        def g(f=f):
+            f()
+        print("<H3>What follows is a test, not an actual exception:</H3>")
+        g()
+    except:
+        print_exception()
+
+    print("<H1>Second try with a small maxlen...</H1>")
+
+    global maxlen
+    maxlen = 50
+    try:
+        form = FieldStorage()   # Replace with other classes to test those
+        print_directory()
+        print_arguments()
+        print_form(form)
+        print_environ(environ)
+    except:
+        print_exception()
+
+def print_exception(type=None, value=None, tb=None, limit=None):
+    if type is None:
+        type, value, tb = sys.exc_info()
+    import traceback
+    print()
+    print("<H3>Traceback (most recent call last):</H3>")
+    list = traceback.format_tb(tb, limit) + \
+           traceback.format_exception_only(type, value)
+    print("<PRE>%s<B>%s</B></PRE>" % (
+        html.escape("".join(list[:-1])),
+        html.escape(list[-1]),
+        ))
+    del tb
+
+def print_environ(environ=os.environ):
+    """Dump the shell environment as HTML."""
+    keys = sorted(environ.keys())
+    print()
+    print("<H3>Shell Environment:</H3>")
+    print("<DL>")
+    for key in keys:
+        print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
+    print("</DL>")
+    print()
+
+def print_form(form):
+    """Dump the contents of a form as HTML."""
+    keys = sorted(form.keys())
+    print()
+    print("<H3>Form Contents:</H3>")
+    if not keys:
+        print("<P>No form fields.")
+    print("<DL>")
+    for key in keys:
+        print("<DT>" + html.escape(key) + ":", end=' ')
+        value = form[key]
+        print("<i>" + html.escape(repr(type(value))) + "</i>")
+        print("<DD>" + html.escape(repr(value)))
+    print("</DL>")
+    print()
+
+def print_directory():
+    """Dump the current directory as HTML."""
+    print()
+    print("<H3>Current Working Directory:</H3>")
+    try:
+        pwd = os.getcwd()
+    except os.error as msg:
+        print("os.error:", html.escape(str(msg)))
+    else:
+        print(html.escape(pwd))
+    print()
+
+def print_arguments():
+    print()
+    print("<H3>Command Line Arguments:</H3>")
+    print()
+    print(sys.argv)
+    print()
+
+def print_environ_usage():
+    """Dump a list of environment variables used by CGI as HTML."""
+    print("""
+<H3>These environment variables could have been set:</H3>
+<UL>
+<LI>AUTH_TYPE
+<LI>CONTENT_LENGTH
+<LI>CONTENT_TYPE
+<LI>DATE_GMT
+<LI>DATE_LOCAL
+<LI>DOCUMENT_NAME
+<LI>DOCUMENT_ROOT
+<LI>DOCUMENT_URI
+<LI>GATEWAY_INTERFACE
+<LI>LAST_MODIFIED
+<LI>PATH
+<LI>PATH_INFO
+<LI>PATH_TRANSLATED
+<LI>QUERY_STRING
+<LI>REMOTE_ADDR
+<LI>REMOTE_HOST
+<LI>REMOTE_IDENT
+<LI>REMOTE_USER
+<LI>REQUEST_METHOD
+<LI>SCRIPT_NAME
+<LI>SERVER_NAME
+<LI>SERVER_PORT
+<LI>SERVER_PROTOCOL
+<LI>SERVER_ROOT
+<LI>SERVER_SOFTWARE
+</UL>
+In addition, HTTP headers sent by the server may be passed in the
+environment as well.  Here are some common variable names:
+<UL>
+<LI>HTTP_ACCEPT
+<LI>HTTP_CONNECTION
+<LI>HTTP_HOST
+<LI>HTTP_PRAGMA
+<LI>HTTP_REFERER
+<LI>HTTP_USER_AGENT
+</UL>
+""")
+
+
+# Utilities
+# =========
+
+def escape(s, quote=None):
+    """Deprecated API."""
+    warn("cgi.escape is deprecated, use html.escape instead",
+         DeprecationWarning, stacklevel=2)
+    s = s.replace("&", "&amp;") # Must be done first!
+    s = s.replace("<", "&lt;")
+    s = s.replace(">", "&gt;")
+    if quote:
+        s = s.replace('"', "&quot;")
+    return s
+
+
+def valid_boundary(s, _vb_pattern=None):
+    import re
+    if isinstance(s, bytes):
+        _vb_pattern = b"^[ -~]{0,200}[!-~]$"
+    else:
+        _vb_pattern = "^[ -~]{0,200}[!-~]$"
+    return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+    test()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cmd.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cmd.py
new file mode 100644
index 00000000..4c1bdab3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/cmd.py
@@ -0,0 +1,337 @@
+"""A generic class to build line-oriented command interpreters.
+
+Interpreters constructed with this class obey the following conventions:
+
+1. End of file on input is processed as the command 'EOF'.
+2. A command is parsed out of each line by collecting the prefix composed
+   of characters in the identchars member.
+3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
+   is passed a single argument consisting of the remainder of the line.
+4. Typing an empty line repeats the last command.  (Actually, it calls the
+   method `emptyline', which may be overridden in a subclass.)
+5. There is a predefined `help' method.  Given an argument `topic', it
+   calls the command `help_topic'.  With no arguments, it lists all topics
+   with defined help_ functions, broken into up to three topics; documented
+   commands, miscellaneous help topics, and undocumented commands.
+6. The command '?' is a synonym for `help'.  The command '!' is a synonym
+   for `shell', if a do_shell method exists.
+7. If completion is enabled, completing commands will be done automatically,
+   and completing of commands args is done by calling complete_foo() with
+   arguments text, line, begidx, endidx.  text is string we are matching
+   against, all returned matches must begin with it.  line is the current
+   input line (lstripped), begidx and endidx are the beginning and end
+   indexes of the text being matched, which could be used to provide
+   different completion depending upon which position the argument is in.
+
+The `default' method may be overridden to intercept commands for which there
+is no do_ method.
+
+The `completedefault' method may be overridden to intercept completions for
+commands that have no complete_ method.
+
+The data member `self.ruler' sets the character used to draw separator lines
+in the help messages.  If empty, no ruler line is drawn.  It defaults to "=".
+
+If the value of `self.intro' is nonempty when the cmdloop method is called,
+it is printed out on interpreter startup.  This value may be overridden
+via an optional argument to the cmdloop() method.
+
+The data members `self.doc_header', `self.misc_header', and
+`self.undoc_header' set the headers used for the help function's
+listings of documented functions, miscellaneous topics, and undocumented
+functions respectively.
+
+----------------------------------------------------------------------------
+This is a copy of python's Cmd, but leaves out features that aren't relevant
+or can't currently be implemented for MicroPython.
+
+One of the notable deviations is that since MicroPython strips doc strings,
+this means that that help by doc string feature doesn't work.
+
+completions have also been stripped out.
+"""
+
+#import string, sys
+import sys	# MiroPython doesn't yet have a string module
+
+__all__ = ["Cmd"]
+
+PROMPT = '(Cmd) '
+#IDENTCHARS = string.ascii_letters + string.digits + '_'
+IDENTCHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+
+class Cmd:
+    """A simple framework for writing line-oriented command interpreters.
+
+    These are often useful for test harnesses, administrative tools, and
+    prototypes that will later be wrapped in a more sophisticated interface.
+
+    A Cmd instance or subclass instance is a line-oriented interpreter
+    framework.  There is no good reason to instantiate Cmd itself; rather,
+    it's useful as a superclass of an interpreter class you define yourself
+    in order to inherit Cmd's methods and encapsulate action methods.
+
+    """
+    prompt = PROMPT
+    identchars = IDENTCHARS
+    ruler = '='
+    lastcmd = ''
+    intro = None
+    doc_leader = ""
+    doc_header = "Documented commands (type help <topic>):"
+    misc_header = "Miscellaneous help topics:"
+    undoc_header = "Undocumented commands:"
+    nohelp = "*** No help on %s"
+    use_rawinput = 1
+
+    def __init__(self, stdin=None, stdout=None):
+        """Instantiate a line-oriented interpreter framework.
+
+        The optional arguments stdin and stdout
+        specify alternate input and output file objects; if not specified,
+        sys.stdin and sys.stdout are used.
+
+        """
+        if stdin is not None:
+            self.stdin = stdin
+        else:
+            self.stdin = sys.stdin
+        if stdout is not None:
+            self.stdout = stdout
+        else:
+            self.stdout = sys.stdout
+        self.cmdqueue = []
+
+    def cmdloop(self, intro=None):
+        """Repeatedly issue a prompt, accept input, parse an initial prefix
+        off the received input, and dispatch to action methods, passing them
+        the remainder of the line as argument.
+
+        """
+
+        self.preloop()
+        try:
+            if intro is not None:
+                self.intro = intro
+            if self.intro:
+                self.stdout.write(str(self.intro)+"\n")
+            stop = None
+            while not stop:
+                if self.cmdqueue:
+                    line = self.cmdqueue.pop(0)
+                else:
+                    if self.use_rawinput:
+                        try:
+                            line = input(self.prompt)
+                        except EOFError:
+                            line = 'EOF'
+                    else:
+                        self.stdout.write(self.prompt)
+                        self.stdout.flush()
+                        line = self.stdin.readline()
+                        if not len(line):
+                            line = 'EOF'
+                        else:
+                            line = line.rstrip('\r\n')
+                line = self.precmd(line)
+                stop = self.onecmd(line)
+                stop = self.postcmd(stop, line)
+            self.postloop()
+        finally:
+            pass
+
+    def precmd(self, line):
+        """Hook method executed just before the command line is
+        interpreted, but after the input prompt is generated and issued.
+
+        """
+        return line
+
+    def postcmd(self, stop, line):
+        """Hook method executed just after a command dispatch is finished."""
+        return stop
+
+    def preloop(self):
+        """Hook method executed once when the cmdloop() method is called."""
+        pass
+
+    def postloop(self):
+        """Hook method executed once when the cmdloop() method is about to
+        return.
+
+        """
+        pass
+
+    def parseline(self, line):
+        """Parse the line into a command name and a string containing
+        the arguments.  Returns a tuple containing (command, args, line).
+        'command' and 'args' may be None if the line couldn't be parsed.
+        """
+        line = line.strip()
+        if not line:
+            return None, None, line
+        elif line[0] == '?':
+            line = 'help ' + line[1:]
+        elif line[0] == '!':
+            if hasattr(self, 'do_shell'):
+                line = 'shell ' + line[1:]
+            else:
+                return None, None, line
+        i, n = 0, len(line)
+        while i < n and line[i] in self.identchars: i = i+1
+        cmd, arg = line[:i], line[i:].strip()
+        return cmd, arg, line
+
+    def onecmd(self, line):
+        """Interpret the argument as though it had been typed in response
+        to the prompt.
+
+        This may be overridden, but should not normally need to be;
+        see the precmd() and postcmd() methods for useful execution hooks.
+        The return value is a flag indicating whether interpretation of
+        commands by the interpreter should stop.
+
+        """
+        cmd, arg, line = self.parseline(line)
+        if not line:
+            return self.emptyline()
+        if cmd is None:
+            return self.default(line)
+        self.lastcmd = line
+        if line == 'EOF' :
+            self.lastcmd = ''
+        if cmd == '':
+            return self.default(line)
+        else:
+            try:
+                func = getattr(self, 'do_' + cmd)
+            except AttributeError:
+                return self.default(line)
+            return func(arg)
+
+    def emptyline(self):
+        """Called when an empty line is entered in response to the prompt.
+
+        If this method is not overridden, it repeats the last nonempty
+        command entered.
+
+        """
+        if self.lastcmd:
+            return self.onecmd(self.lastcmd)
+
+    def default(self, line):
+        """Called on an input line when the command prefix is not recognized.
+
+        If this method is not overridden, it prints an error message and
+        returns.
+
+        """
+        self.stdout.write('*** Unknown syntax: %s\n'%line)
+
+    def get_names(self):
+        # This method used to pull in base class attributes
+        # at a time dir() didn't do it yet.
+        return dir(self.__class__)
+
+    def do_help(self, arg):
+        'List available commands with "help" or detailed help with "help cmd".'
+        if arg:
+            # XXX check arg syntax
+            try:
+                func = getattr(self, 'help_' + arg)
+            except AttributeError:
+                self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
+                return
+            func()
+        else:
+            names = self.get_names()
+            cmds_doc = []
+            cmds_undoc = []
+            help = {}
+            for name in names:
+                if name[:5] == 'help_':
+                    help[name[5:]]=1
+            names.sort()
+            # There can be duplicates if routines overridden
+            prevname = ''
+            for name in names:
+                if name[:3] == 'do_':
+                    if name == prevname:
+                        continue
+                    prevname = name
+                    cmd=name[3:]
+                    if cmd in help:
+                        cmds_doc.append(cmd)
+                        del help[cmd]
+                    else:
+                        cmds_undoc.append(cmd)
+            self.stdout.write("%s\n"%str(self.doc_leader))
+            self.print_topics(self.doc_header,   cmds_doc,   15,80)
+            self.print_topics(self.misc_header,  list(help.keys()),15,80)
+            self.print_topics(self.undoc_header, cmds_undoc, 15,80)
+
+    def print_topics(self, header, cmds, cmdlen, maxcol):
+        if cmds:
+            self.stdout.write("%s\n"%str(header))
+            if self.ruler:
+                self.stdout.write("%s\n"%str(self.ruler * len(header)))
+            self.columnize(cmds, maxcol-1)
+            self.stdout.write("\n")
+
+    def columnize(self, list, displaywidth=80):
+        """Display a list of strings as a compact set of columns.
+
+        Each column is only as wide as necessary.
+        Columns are separated by two spaces (one was not legible enough).
+        """
+        if not list:
+            self.stdout.write("<empty>\n")
+            return
+
+        nonstrings = [i for i in range(len(list))
+                        if not isinstance(list[i], str)]
+        if nonstrings:
+            raise TypeError("list[i] not a string for i in %s"
+                            % ", ".join(map(str, nonstrings)))
+        size = len(list)
+        if size == 1:
+            self.stdout.write('%s\n'%str(list[0]))
+            return
+        # Try every row count from 1 upwards
+        for nrows in range(1, len(list)):
+            ncols = (size+nrows-1) // nrows
+            colwidths = []
+            totwidth = -2
+            for col in range(ncols):
+                colwidth = 0
+                for row in range(nrows):
+                    i = row + nrows*col
+                    if i >= size:
+                        break
+                    x = list[i]
+                    colwidth = max(colwidth, len(x))
+                colwidths.append(colwidth)
+                totwidth += colwidth + 2
+                if totwidth > displaywidth:
+                    break
+            if totwidth <= displaywidth:
+                break
+        else:
+            nrows = len(list)
+            ncols = 1
+            colwidths = [0]
+        for row in range(nrows):
+            texts = []
+            for col in range(ncols):
+                i = row + nrows*col
+                if i >= size:
+                    x = ""
+                else:
+                    x = list[i]
+                texts.append(x)
+            while texts and not texts[-1]:
+                del texts[-1]
+            for col in range(len(texts)):
+                #texts[col] = texts[col].ljust(colwidths[col])
+                texts[col] = '%-*s' % (colwidths[col], texts[col])
+            self.stdout.write("%s\n"%str("  ".join(texts)))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/code.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/code.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/codecs.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/codecs.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/codeop.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/codeop.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/__init__.py
new file mode 100644
index 00000000..d152a984
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/__init__.py
@@ -0,0 +1,16 @@
+# Should be reimplemented for MicroPython
+# Reason:
+# CPython implementation brings in metaclasses and other bloat.
+# This is going to be just import-all for other modules in a namespace package
+from ucollections import *
+try:
+    from .defaultdict import defaultdict
+except ImportError:
+    pass
+try:
+    from .deque import deque
+except ImportError:
+    pass
+
+class MutableMapping:
+    pass
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/defaultdict.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/defaultdict.py
new file mode 100644
index 00000000..0582b248
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/defaultdict.py
@@ -0,0 +1,36 @@
+class defaultdict:
+
+    @staticmethod
+    def __new__(cls, default_factory=None, **kwargs):
+        # Some code (e.g. urllib.urlparse) expects that basic defaultdict
+        # functionality will be available to subclasses without them
+        # calling __init__().
+        self = super(defaultdict, cls).__new__(cls)
+        self.d = {}
+        return self
+
+    def __init__(self, default_factory=None, **kwargs):
+        self.d = kwargs
+        self.default_factory = default_factory
+
+    def __getitem__(self, key):
+        try:
+            return self.d[key]
+        except KeyError:
+            v = self.__missing__(key)
+            self.d[key] = v
+            return v
+
+    def __setitem__(self, key, v):
+        self.d[key] = v
+
+    def __delitem__(self, key):
+        del self.d[key]
+
+    def __contains__(self, key):
+        return key in self.d
+
+    def __missing__(self, key):
+        if self.default_factory is None:
+            raise KeyError(key)
+        return self.default_factory()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/deque.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/deque.py
new file mode 100644
index 00000000..b284e5f3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/collections/deque.py
@@ -0,0 +1,37 @@
+class deque:
+
+    def __init__(self, iterable=None):
+        if iterable is None:
+            self.q = []
+        else:
+            self.q = list(iterable)
+
+    def popleft(self):
+        return self.q.pop(0)
+
+    def popright(self):
+        return self.q.pop()
+
+    def pop(self):
+        return self.q.pop()
+
+    def append(self, a):
+        self.q.append(a)
+
+    def appendleft(self, a):
+        self.q.insert(0, a)
+
+    def extend(self, a):
+        self.q.extend(a)
+
+    def __len__(self):
+        return len(self.q)
+
+    def __bool__(self):
+        return bool(self.q)
+
+    def __iter__(self):
+        yield from self.q
+
+    def __str__(self):
+        return 'deque({})'.format(self.q)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/concurrent/futures/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/concurrent/futures/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/contextlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/contextlib.py
new file mode 100644
index 00000000..aca58d71
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/contextlib.py
@@ -0,0 +1,166 @@
+"""Utilities for with-statement contexts.  See PEP 343.
+
+Original source code: https://hg.python.org/cpython/file/3.4/Lib/contextlib.py
+
+Not implemented:
+ - redirect_stdout;
+
+"""
+
+import sys
+from collections import deque
+from ucontextlib import *
+
+
+class closing(object):
+    """Context to automatically close something at the end of a block.
+
+    Code like this:
+
+        with closing(<module>.open(<arguments>)) as f:
+            <block>
+
+    is equivalent to this:
+
+        f = <module>.open(<arguments>)
+        try:
+            <block>
+        finally:
+            f.close()
+
+    """
+    def __init__(self, thing):
+        self.thing = thing
+    def __enter__(self):
+        return self.thing
+    def __exit__(self, *exc_info):
+        self.thing.close()
+
+
+class suppress:
+    """Context manager to suppress specified exceptions
+
+    After the exception is suppressed, execution proceeds with the next
+    statement following the with statement.
+
+         with suppress(FileNotFoundError):
+             os.remove(somefile)
+         # Execution still resumes here if the file was already removed
+    """
+
+    def __init__(self, *exceptions):
+        self._exceptions = exceptions
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exctype, excinst, exctb):
+        # Unlike isinstance and issubclass, CPython exception handling
+        # currently only looks at the concrete type hierarchy (ignoring
+        # the instance and subclass checking hooks). While Guido considers
+        # that a bug rather than a feature, it's a fairly hard one to fix
+        # due to various internal implementation details. suppress provides
+        # the simpler issubclass based semantics, rather than trying to
+        # exactly reproduce the limitations of the CPython interpreter.
+        #
+        # See http://bugs.python.org/issue12029 for more details
+        return exctype is not None and issubclass(exctype, self._exceptions)
+
+# Inspired by discussions on http://bugs.python.org/issue13585
+class ExitStack(object):
+    """Context manager for dynamic management of a stack of exit callbacks
+
+    For example:
+
+        with ExitStack() as stack:
+            files = [stack.enter_context(open(fname)) for fname in filenames]
+            # All opened files will automatically be closed at the end of
+            # the with statement, even if attempts to open files later
+            # in the list raise an exception
+
+    """
+    def __init__(self):
+        self._exit_callbacks = deque()
+
+    def pop_all(self):
+        """Preserve the context stack by transferring it to a new instance"""
+        new_stack = type(self)()
+        new_stack._exit_callbacks = self._exit_callbacks
+        self._exit_callbacks = deque()
+        return new_stack
+
+    def _push_cm_exit(self, cm, cm_exit):
+        """Helper to correctly register callbacks to __exit__ methods"""
+        def _exit_wrapper(*exc_details):
+            return cm_exit(cm, *exc_details)
+        self.push(_exit_wrapper)
+
+    def push(self, exit):
+        """Registers a callback with the standard __exit__ method signature
+
+        Can suppress exceptions the same way __exit__ methods can.
+
+        Also accepts any object with an __exit__ method (registering a call
+        to the method instead of the object itself)
+        """
+        # We use an unbound method rather than a bound method to follow
+        # the standard lookup behaviour for special methods
+        _cb_type = type(exit)
+        try:
+            exit_method = _cb_type.__exit__
+        except AttributeError:
+            # Not a context manager, so assume its a callable
+            self._exit_callbacks.append(exit)
+        else:
+            self._push_cm_exit(exit, exit_method)
+        return exit # Allow use as a decorator
+
+    def callback(self, callback, *args, **kwds):
+        """Registers an arbitrary callback and arguments.
+
+        Cannot suppress exceptions.
+        """
+        def _exit_wrapper(exc_type, exc, tb):
+            callback(*args, **kwds)
+        self.push(_exit_wrapper)
+        return callback # Allow use as a decorator
+
+    def enter_context(self, cm):
+        """Enters the supplied context manager
+
+        If successful, also pushes its __exit__ method as a callback and
+        returns the result of the __enter__ method.
+        """
+        # We look up the special methods on the type to match the with statement
+        _cm_type = type(cm)
+        _exit = _cm_type.__exit__
+        result = _cm_type.__enter__(cm)
+        self._push_cm_exit(cm, _exit)
+        return result
+
+    def close(self):
+        """Immediately unwind the context stack"""
+        self.__exit__(None, None, None)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_details):
+        received_exc = exc_details[0] is not None
+        # Callbacks are invoked in LIFO order to match the behaviour of
+        # nested context managers
+        suppressed_exc = False
+        pending_raise = False
+        while self._exit_callbacks:
+            cb = self._exit_callbacks.pop()
+            try:
+                if cb(*exc_details):
+                    suppressed_exc = True
+                    pending_raise = False
+                    exc_details = (None, None, None)
+            except:
+                exc_details = sys.exc_info()
+                pending_raise = True
+        if pending_raise:
+            raise exc_details[1]
+        return received_exc and suppressed_exc
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/copy.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/copy.py
new file mode 100644
index 00000000..d9948dfd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/copy.py
@@ -0,0 +1,328 @@
+"""Generic (shallow and deep) copying operations.
+
+Interface summary:
+
+        import copy
+
+        x = copy.copy(y)        # make a shallow copy of y
+        x = copy.deepcopy(y)    # make a deep copy of y
+
+For module specific errors, copy.Error is raised.
+
+The difference between shallow and deep copying is only relevant for
+compound objects (objects that contain other objects, like lists or
+class instances).
+
+- A shallow copy constructs a new compound object and then (to the
+  extent possible) inserts *the same objects* into it that the
+  original contains.
+
+- A deep copy constructs a new compound object and then, recursively,
+  inserts *copies* into it of the objects found in the original.
+
+Two problems often exist with deep copy operations that don't exist
+with shallow copy operations:
+
+ a) recursive objects (compound objects that, directly or indirectly,
+    contain a reference to themselves) may cause a recursive loop
+
+ b) because deep copy copies *everything* it may copy too much, e.g.
+    administrative data structures that should be shared even between
+    copies
+
+Python's deep copy operation avoids these problems by:
+
+ a) keeping a table of objects already copied during the current
+    copying pass
+
+ b) letting user-defined classes override the copying operation or the
+    set of components copied
+
+This version does not copy types like module, class, function, method,
+nor stack trace, stack frame, nor file, socket, window, nor array, nor
+any similar types.
+
+Classes can use the same interfaces to control copying that they use
+to control pickling: they can define methods called __getinitargs__(),
+__getstate__() and __setstate__().  See the documentation for module
+"pickle" for information on these methods.
+"""
+
+import types
+#import weakref
+#from copyreg import dispatch_table
+#import builtins
+
+class Error(Exception):
+    pass
+error = Error   # backward compatibility
+
+try:
+    from org.python.core import PyStringMap
+except ImportError:
+    PyStringMap = None
+
+__all__ = ["Error", "copy", "deepcopy"]
+
+def copy(x):
+    """Shallow copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    cls = type(x)
+
+    copier = _copy_dispatch.get(cls)
+    if copier:
+        return copier(x)
+
+    copier = getattr(cls, "__copy__", None)
+    if copier:
+        return copier(x)
+
+    raise Error("un(shallow)copyable object of type %s" % cls)
+
+    dispatch_table = {}
+    reductor = dispatch_table.get(cls)
+    if reductor:
+        rv = reductor(x)
+    else:
+        reductor = getattr(x, "__reduce_ex__", None)
+        if reductor:
+            rv = reductor(2)
+        else:
+            reductor = getattr(x, "__reduce__", None)
+            if reductor:
+                rv = reductor()
+            else:
+                raise Error("un(shallow)copyable object of type %s" % cls)
+
+    return _reconstruct(x, rv, 0)
+
+
+_copy_dispatch = d = {}
+
+def _copy_immutable(x):
+    return x
+for t in (type(None), int, float, bool, str, tuple,
+          type, range,
+          types.BuiltinFunctionType, type(Ellipsis),
+          types.FunctionType):
+    d[t] = _copy_immutable
+t = getattr(types, "CodeType", None)
+if t is not None:
+    d[t] = _copy_immutable
+#for name in ("complex", "unicode"):
+#    t = getattr(builtins, name, None)
+#    if t is not None:
+#        d[t] = _copy_immutable
+
+def _copy_with_constructor(x):
+    return type(x)(x)
+for t in (list, dict, set):
+    d[t] = _copy_with_constructor
+
+def _copy_with_copy_method(x):
+    return x.copy()
+if PyStringMap is not None:
+    d[PyStringMap] = _copy_with_copy_method
+
+del d
+
+def deepcopy(x, memo=None, _nil=[]):
+    """Deep copy operation on arbitrary Python objects.
+
+    See the module's __doc__ string for more info.
+    """
+
+    if memo is None:
+        memo = {}
+
+    d = id(x)
+    y = memo.get(d, _nil)
+    if y is not _nil:
+        return y
+
+    cls = type(x)
+
+    copier = _deepcopy_dispatch.get(cls)
+    if copier:
+        y = copier(x, memo)
+    else:
+        try:
+            issc = issubclass(cls, type)
+        except TypeError: # cls is not a class (old Boost; see SF #502085)
+            issc = 0
+        if issc:
+            y = _deepcopy_atomic(x, memo)
+        else:
+            copier = getattr(x, "__deepcopy__", None)
+            if copier:
+                y = copier(memo)
+            else:
+                reductor = dispatch_table.get(cls)
+                if reductor:
+                    rv = reductor(x)
+                else:
+                    reductor = getattr(x, "__reduce_ex__", None)
+                    if reductor:
+                        rv = reductor(2)
+                    else:
+                        reductor = getattr(x, "__reduce__", None)
+                        if reductor:
+                            rv = reductor()
+                        else:
+                            raise Error(
+                                "un(deep)copyable object of type %s" % cls)
+                y = _reconstruct(x, rv, 1, memo)
+
+    # If is its own copy, don't memoize.
+    if y is not x:
+        memo[d] = y
+        _keep_alive(x, memo) # Make sure x lives at least as long as d
+    return y
+
+_deepcopy_dispatch = d = {}
+
+def _deepcopy_atomic(x, memo):
+    return x
+d[type(None)] = _deepcopy_atomic
+d[type(Ellipsis)] = _deepcopy_atomic
+d[int] = _deepcopy_atomic
+d[float] = _deepcopy_atomic
+d[bool] = _deepcopy_atomic
+try:
+    d[complex] = _deepcopy_atomic
+except NameError:
+    pass
+d[bytes] = _deepcopy_atomic
+d[str] = _deepcopy_atomic
+try:
+    d[types.CodeType] = _deepcopy_atomic
+except AttributeError:
+    pass
+d[type] = _deepcopy_atomic
+d[range] = _deepcopy_atomic
+d[types.BuiltinFunctionType] = _deepcopy_atomic
+d[types.FunctionType] = _deepcopy_atomic
+#d[weakref.ref] = _deepcopy_atomic
+
+def _deepcopy_list(x, memo):
+    y = []
+    memo[id(x)] = y
+    for a in x:
+        y.append(deepcopy(a, memo))
+    return y
+d[list] = _deepcopy_list
+
+def _deepcopy_tuple(x, memo):
+    y = []
+    for a in x:
+        y.append(deepcopy(a, memo))
+    # We're not going to put the tuple in the memo, but it's still important we
+    # check for it, in case the tuple contains recursive mutable structures.
+    try:
+        return memo[id(x)]
+    except KeyError:
+        pass
+    for i in range(len(x)):
+        if x[i] is not y[i]:
+            y = tuple(y)
+            break
+    else:
+        y = x
+    return y
+d[tuple] = _deepcopy_tuple
+
+def _deepcopy_dict(x, memo):
+    y = {}
+    memo[id(x)] = y
+    for key, value in x.items():
+        y[deepcopy(key, memo)] = deepcopy(value, memo)
+    return y
+d[dict] = _deepcopy_dict
+if PyStringMap is not None:
+    d[PyStringMap] = _deepcopy_dict
+
+def _deepcopy_method(x, memo): # Copy instance methods
+    return type(x)(x.__func__, deepcopy(x.__self__, memo))
+_deepcopy_dispatch[types.MethodType] = _deepcopy_method
+
+def _keep_alive(x, memo):
+    """Keeps a reference to the object x in the memo.
+
+    Because we remember objects by their id, we have
+    to assure that possibly temporary objects are kept
+    alive by referencing them.
+    We store a reference at the id of the memo, which should
+    normally not be used unless someone tries to deepcopy
+    the memo itself...
+    """
+    try:
+        memo[id(memo)].append(x)
+    except KeyError:
+        # aha, this is the first one :-)
+        memo[id(memo)]=[x]
+
+def _reconstruct(x, info, deep, memo=None):
+    if isinstance(info, str):
+        return x
+    assert isinstance(info, tuple)
+    if memo is None:
+        memo = {}
+    n = len(info)
+    assert n in (2, 3, 4, 5)
+    callable, args = info[:2]
+    if n > 2:
+        state = info[2]
+    else:
+        state = {}
+    if n > 3:
+        listiter = info[3]
+    else:
+        listiter = None
+    if n > 4:
+        dictiter = info[4]
+    else:
+        dictiter = None
+    if deep:
+        args = deepcopy(args, memo)
+    y = callable(*args)
+    memo[id(x)] = y
+
+    if state:
+        if deep:
+            state = deepcopy(state, memo)
+        if hasattr(y, '__setstate__'):
+            y.__setstate__(state)
+        else:
+            if isinstance(state, tuple) and len(state) == 2:
+                state, slotstate = state
+            else:
+                slotstate = None
+            if state is not None:
+                y.__dict__.update(state)
+            if slotstate is not None:
+                for key, value in slotstate.items():
+                    setattr(y, key, value)
+
+    if listiter is not None:
+        for item in listiter:
+            if deep:
+                item = deepcopy(item, memo)
+            y.append(item)
+    if dictiter is not None:
+        for key, value in dictiter:
+            if deep:
+                key = deepcopy(key, memo)
+                value = deepcopy(value, memo)
+            y[key] = value
+    return y
+
+del d
+
+del types
+
+# Helper for instance creation without calling __init__
+class _EmptyClass:
+    pass
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/csv.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/csv.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/curses/ascii.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/curses/ascii.py
new file mode 100644
index 00000000..800fd8b4
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/curses/ascii.py
@@ -0,0 +1,99 @@
+"""Constants and membership tests for ASCII characters"""
+
+NUL     = 0x00  # ^@
+SOH     = 0x01  # ^A
+STX     = 0x02  # ^B
+ETX     = 0x03  # ^C
+EOT     = 0x04  # ^D
+ENQ     = 0x05  # ^E
+ACK     = 0x06  # ^F
+BEL     = 0x07  # ^G
+BS      = 0x08  # ^H
+TAB     = 0x09  # ^I
+HT      = 0x09  # ^I
+LF      = 0x0a  # ^J
+NL      = 0x0a  # ^J
+VT      = 0x0b  # ^K
+FF      = 0x0c  # ^L
+CR      = 0x0d  # ^M
+SO      = 0x0e  # ^N
+SI      = 0x0f  # ^O
+DLE     = 0x10  # ^P
+DC1     = 0x11  # ^Q
+DC2     = 0x12  # ^R
+DC3     = 0x13  # ^S
+DC4     = 0x14  # ^T
+NAK     = 0x15  # ^U
+SYN     = 0x16  # ^V
+ETB     = 0x17  # ^W
+CAN     = 0x18  # ^X
+EM      = 0x19  # ^Y
+SUB     = 0x1a  # ^Z
+ESC     = 0x1b  # ^[
+FS      = 0x1c  # ^\
+GS      = 0x1d  # ^]
+RS      = 0x1e  # ^^
+US      = 0x1f  # ^_
+SP      = 0x20  # space
+DEL     = 0x7f  # delete
+
+controlnames = [
+"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
+"BS",  "HT",  "LF",  "VT",  "FF",  "CR",  "SO",  "SI",
+"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
+"CAN", "EM",  "SUB", "ESC", "FS",  "GS",  "RS",  "US",
+"SP"
+]
+
+def _ctoi(c):
+    if type(c) == type(""):
+        return ord(c)
+    else:
+        return c
+
+def isalnum(c): return isalpha(c) or isdigit(c)
+def isalpha(c): return isupper(c) or islower(c)
+def isascii(c): return _ctoi(c) <= 127          # ?
+def isblank(c): return _ctoi(c) in (8,32)
+def iscntrl(c): return _ctoi(c) <= 31
+def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
+def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
+def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
+def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
+def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
+def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
+def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
+def isxdigit(c): return isdigit(c) or \
+    (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
+def isctrl(c): return _ctoi(c) < 32
+def ismeta(c): return _ctoi(c) > 127
+
+def ascii(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x7f)
+    else:
+        return _ctoi(c) & 0x7f
+
+def ctrl(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) & 0x1f)
+    else:
+        return _ctoi(c) & 0x1f
+
+def alt(c):
+    if type(c) == type(""):
+        return chr(_ctoi(c) | 0x80)
+    else:
+        return _ctoi(c) | 0x80
+
+def unctrl(c):
+    bits = _ctoi(c)
+    if bits == 0x7f:
+        rep = "^?"
+    elif isprint(bits & 0x7f):
+        rep = chr(bits & 0x7f)
+    else:
+        rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
+    if bits & 0x80:
+        return "!" + rep
+    return rep
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/datetime.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/datetime.py
new file mode 100644
index 00000000..03b4e4cd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/datetime.py
@@ -0,0 +1,2138 @@
+"""Concrete date/time and related types.
+
+See http://www.iana.org/time-zones/repository/tz-link.html for
+time zone and DST data sources.
+"""
+
+import time as _time
+import math as _math
+
+def _cmp(x, y):
+    return 0 if x == y else 1 if x > y else -1
+
+MINYEAR = 1
+MAXYEAR = 9999
+_MAXORDINAL = 3652059 # date.max.toordinal()
+
+# Utility functions, adapted from Python's Demo/classes/Dates.py, which
+# also assumes the current Gregorian calendar indefinitely extended in
+# both directions.  Difference:  Dates.py calls January 1 of year 0 day
+# number 1.  The code here calls January 1 of year 1 day number 1.  This is
+# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
+# and Reingold's "Calendrical Calculations", where it's the base calendar
+# for all computations.  See the book for algorithms for converting between
+# proleptic Gregorian ordinals and many other calendar systems.
+
+_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+_DAYS_BEFORE_MONTH = [None]
+dbm = 0
+for dim in _DAYS_IN_MONTH[1:]:
+    _DAYS_BEFORE_MONTH.append(dbm)
+    dbm += dim
+del dbm, dim
+
+def _is_leap(year):
+    "year -> 1 if leap year, else 0."
+    return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
+
+def _days_before_year(year):
+    "year -> number of days before January 1st of year."
+    y = year - 1
+    return y*365 + y//4 - y//100 + y//400
+
+def _days_in_month(year, month):
+    "year, month -> number of days in that month in that year."
+    assert 1 <= month <= 12, month
+    if month == 2 and _is_leap(year):
+        return 29
+    return _DAYS_IN_MONTH[month]
+
+def _days_before_month(year, month):
+    "year, month -> number of days in year preceding first day of month."
+    assert 1 <= month <= 12, 'month must be in 1..12'
+    return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
+
+def _ymd2ord(year, month, day):
+    "year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
+    assert 1 <= month <= 12, 'month must be in 1..12'
+    dim = _days_in_month(year, month)
+    assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
+    return (_days_before_year(year) +
+            _days_before_month(year, month) +
+            day)
+
+_DI400Y = _days_before_year(401)    # number of days in 400 years
+_DI100Y = _days_before_year(101)    #    "    "   "   " 100   "
+_DI4Y   = _days_before_year(5)      #    "    "   "   "   4   "
+
+# A 4-year cycle has an extra leap day over what we'd get from pasting
+# together 4 single years.
+assert _DI4Y == 4 * 365 + 1
+
+# Similarly, a 400-year cycle has an extra leap day over what we'd get from
+# pasting together 4 100-year cycles.
+assert _DI400Y == 4 * _DI100Y + 1
+
+# OTOH, a 100-year cycle has one fewer leap day than we'd get from
+# pasting together 25 4-year cycles.
+assert _DI100Y == 25 * _DI4Y - 1
+
+def _ord2ymd(n):
+    "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
+
+    # n is a 1-based index, starting at 1-Jan-1.  The pattern of leap years
+    # repeats exactly every 400 years.  The basic strategy is to find the
+    # closest 400-year boundary at or before n, then work with the offset
+    # from that boundary to n.  Life is much clearer if we subtract 1 from
+    # n first -- then the values of n at 400-year boundaries are exactly
+    # those divisible by _DI400Y:
+    #
+    #     D  M   Y            n              n-1
+    #     -- --- ----        ----------     ----------------
+    #     31 Dec -400        -_DI400Y       -_DI400Y -1
+    #      1 Jan -399         -_DI400Y +1   -_DI400Y      400-year boundary
+    #     ...
+    #     30 Dec  000        -1             -2
+    #     31 Dec  000         0             -1
+    #      1 Jan  001         1              0            400-year boundary
+    #      2 Jan  001         2              1
+    #      3 Jan  001         3              2
+    #     ...
+    #     31 Dec  400         _DI400Y        _DI400Y -1
+    #      1 Jan  401         _DI400Y +1     _DI400Y      400-year boundary
+    n -= 1
+    n400, n = divmod(n, _DI400Y)
+    year = n400 * 400 + 1   # ..., -399, 1, 401, ...
+
+    # Now n is the (non-negative) offset, in days, from January 1 of year, to
+    # the desired date.  Now compute how many 100-year cycles precede n.
+    # Note that it's possible for n100 to equal 4!  In that case 4 full
+    # 100-year cycles precede the desired day, which implies the desired
+    # day is December 31 at the end of a 400-year cycle.
+    n100, n = divmod(n, _DI100Y)
+
+    # Now compute how many 4-year cycles precede it.
+    n4, n = divmod(n, _DI4Y)
+
+    # And now how many single years.  Again n1 can be 4, and again meaning
+    # that the desired day is December 31 at the end of the 4-year cycle.
+    n1, n = divmod(n, 365)
+
+    year += n100 * 100 + n4 * 4 + n1
+    if n1 == 4 or n100 == 4:
+        assert n == 0
+        return year-1, 12, 31
+
+    # Now the year is correct, and n is the offset from January 1.  We find
+    # the month via an estimate that's either exact or one too large.
+    leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
+    assert leapyear == _is_leap(year)
+    month = (n + 50) >> 5
+    preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
+    if preceding > n:  # estimate is too large
+        month -= 1
+        preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
+    n -= preceding
+    assert 0 <= n < _days_in_month(year, month)
+
+    # Now the year and month are correct, and n is the offset from the
+    # start of that month:  we're done!
+    return year, month, n+1
+
+# Month and day names.  For localized versions, see the calendar module.
+_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+                     "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+
+
+def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
+    wday = (_ymd2ord(y, m, d) + 6) % 7
+    dnum = _days_before_month(y, m) + d
+    return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
+
+def _format_time(hh, mm, ss, us):
+    # Skip trailing microseconds when us==0.
+    result = "%02d:%02d:%02d" % (hh, mm, ss)
+    if us:
+        result += ".%06d" % us
+    return result
+
+# Correctly substitute for %z and %Z escapes in strftime formats.
+def _wrap_strftime(object, format, timetuple):
+    # Don't call utcoffset() or tzname() unless actually needed.
+    freplace = None # the string to use for %f
+    zreplace = None # the string to use for %z
+    Zreplace = None # the string to use for %Z
+
+    # Scan format for %z and %Z escapes, replacing as needed.
+    newformat = []
+    push = newformat.append
+    i, n = 0, len(format)
+    while i < n:
+        ch = format[i]
+        i += 1
+        if ch == '%':
+            if i < n:
+                ch = format[i]
+                i += 1
+                if ch == 'f':
+                    if freplace is None:
+                        freplace = '%06d' % getattr(object,
+                                                    'microsecond', 0)
+                    newformat.append(freplace)
+                elif ch == 'z':
+                    if zreplace is None:
+                        zreplace = ""
+                        if hasattr(object, "utcoffset"):
+                            offset = object.utcoffset()
+                            if offset is not None:
+                                sign = '+'
+                                if offset.days < 0:
+                                    offset = -offset
+                                    sign = '-'
+                                h, m = divmod(offset, timedelta(hours=1))
+                                assert not m % timedelta(minutes=1), "whole minute"
+                                m //= timedelta(minutes=1)
+                                zreplace = '%c%02d%02d' % (sign, h, m)
+                    assert '%' not in zreplace
+                    newformat.append(zreplace)
+                elif ch == 'Z':
+                    if Zreplace is None:
+                        Zreplace = ""
+                        if hasattr(object, "tzname"):
+                            s = object.tzname()
+                            if s is not None:
+                                # strftime is going to have at this: escape %
+                                Zreplace = s.replace('%', '%%')
+                    newformat.append(Zreplace)
+                else:
+                    push('%')
+                    push(ch)
+            else:
+                push('%')
+        else:
+            push(ch)
+    newformat = "".join(newformat)
+    return _time.strftime(newformat, timetuple)
+
+def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
+    if tzinfo is None:
+        return None
+    return getattr(tzinfo, methname)(tzinfoarg)
+
+# Just raise TypeError if the arg isn't None or a string.
+def _check_tzname(name):
+    if name is not None and not isinstance(name, str):
+        raise TypeError("tzinfo.tzname() must return None or string, "
+                        "not '%s'" % type(name))
+
+# name is the offset-producing method, "utcoffset" or "dst".
+# offset is what it returned.
+# If offset isn't None or timedelta, raises TypeError.
+# If offset is None, returns None.
+# Else offset is checked for being in range, and a whole # of minutes.
+# If it is, its integer value is returned.  Else ValueError is raised.
+def _check_utc_offset(name, offset):
+    assert name in ("utcoffset", "dst")
+    if offset is None:
+        return
+    if not isinstance(offset, timedelta):
+        raise TypeError("tzinfo.%s() must return None "
+                        "or timedelta, not '%s'" % (name, type(offset)))
+    if offset % timedelta(minutes=1) or offset.microseconds:
+        raise ValueError("tzinfo.%s() must return a whole number "
+                         "of minutes, got %s" % (name, offset))
+    if not -timedelta(1) < offset < timedelta(1):
+        raise ValueError("%s()=%s, must be must be strictly between"
+                         " -timedelta(hours=24) and timedelta(hours=24)"
+                         % (name, offset))
+
+def _check_date_fields(year, month, day):
+    if not isinstance(year, int):
+        raise TypeError('int expected')
+    if not MINYEAR <= year <= MAXYEAR:
+        raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
+    if not 1 <= month <= 12:
+        raise ValueError('month must be in 1..12', month)
+    dim = _days_in_month(year, month)
+    if not 1 <= day <= dim:
+        raise ValueError('day must be in 1..%d' % dim, day)
+
+def _check_time_fields(hour, minute, second, microsecond):
+    if not isinstance(hour, int):
+        raise TypeError('int expected')
+    if not 0 <= hour <= 23:
+        raise ValueError('hour must be in 0..23', hour)
+    if not 0 <= minute <= 59:
+        raise ValueError('minute must be in 0..59', minute)
+    if not 0 <= second <= 59:
+        raise ValueError('second must be in 0..59', second)
+    if not 0 <= microsecond <= 999999:
+        raise ValueError('microsecond must be in 0..999999', microsecond)
+
+def _check_tzinfo_arg(tz):
+    if tz is not None and not isinstance(tz, tzinfo):
+        raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
+
+def _cmperror(x, y):
+    raise TypeError("can't compare '%s' to '%s'" % (
+                    type(x).__name__, type(y).__name__))
+
+class timedelta:
+    """Represent the difference between two datetime objects.
+
+    Supported operators:
+
+    - add, subtract timedelta
+    - unary plus, minus, abs
+    - compare to timedelta
+    - multiply, divide by int
+
+    In addition, datetime supports subtraction of two datetime objects
+    returning a timedelta, and addition or subtraction of a datetime
+    and a timedelta giving a datetime.
+
+    Representation: (days, seconds, microseconds).  Why?  Because I
+    felt like it.
+    """
+    __slots__ = '_days', '_seconds', '_microseconds'
+
+    def __new__(cls, days=0, seconds=0, microseconds=0,
+                milliseconds=0, minutes=0, hours=0, weeks=0):
+        # Doing this efficiently and accurately in C is going to be difficult
+        # and error-prone, due to ubiquitous overflow possibilities, and that
+        # C double doesn't have enough bits of precision to represent
+        # microseconds over 10K years faithfully.  The code here tries to make
+        # explicit where go-fast assumptions can be relied on, in order to
+        # guide the C implementation; it's way more convoluted than speed-
+        # ignoring auto-overflow-to-long idiomatic Python could be.
+
+        # XXX Check that all inputs are ints or floats.
+
+        # Final values, all integer.
+        # s and us fit in 32-bit signed ints; d isn't bounded.
+        d = s = us = 0
+
+        # Normalize everything to days, seconds, microseconds.
+        days += weeks*7
+        seconds += minutes*60 + hours*3600
+        microseconds += milliseconds*1000
+
+        # Get rid of all fractions, and normalize s and us.
+        # Take a deep breath <wink>.
+        if isinstance(days, float):
+            dayfrac, days = _math.modf(days)
+            daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
+            assert daysecondswhole == int(daysecondswhole)  # can't overflow
+            s = int(daysecondswhole)
+            assert days == int(days)
+            d = int(days)
+        else:
+            daysecondsfrac = 0.0
+            d = days
+        assert isinstance(daysecondsfrac, float)
+        assert abs(daysecondsfrac) <= 1.0
+        assert isinstance(d, int)
+        assert abs(s) <= 24 * 3600
+        # days isn't referenced again before redefinition
+
+        if isinstance(seconds, float):
+            secondsfrac, seconds = _math.modf(seconds)
+            assert seconds == int(seconds)
+            seconds = int(seconds)
+            secondsfrac += daysecondsfrac
+            assert abs(secondsfrac) <= 2.0
+        else:
+            secondsfrac = daysecondsfrac
+        # daysecondsfrac isn't referenced again
+        assert isinstance(secondsfrac, float)
+        assert abs(secondsfrac) <= 2.0
+
+        assert isinstance(seconds, int)
+        days, seconds = divmod(seconds, 24*3600)
+        d += days
+        s += int(seconds)    # can't overflow
+        assert isinstance(s, int)
+        assert abs(s) <= 2 * 24 * 3600
+        # seconds isn't referenced again before redefinition
+
+        usdouble = secondsfrac * 1e6
+        assert abs(usdouble) < 2.1e6    # exact value not critical
+        # secondsfrac isn't referenced again
+
+        if isinstance(microseconds, float):
+            microseconds += usdouble
+            microseconds = round(microseconds, 0)
+            seconds, microseconds = divmod(microseconds, 1e6)
+            assert microseconds == int(microseconds)
+            assert seconds == int(seconds)
+            days, seconds = divmod(seconds, 24.*3600.)
+            assert days == int(days)
+            assert seconds == int(seconds)
+            d += int(days)
+            s += int(seconds)   # can't overflow
+            assert isinstance(s, int)
+            assert abs(s) <= 3 * 24 * 3600
+        else:
+            seconds, microseconds = divmod(microseconds, 1000000)
+            days, seconds = divmod(seconds, 24*3600)
+            d += days
+            s += int(seconds)    # can't overflow
+            assert isinstance(s, int)
+            assert abs(s) <= 3 * 24 * 3600
+            microseconds = float(microseconds)
+            microseconds += usdouble
+            microseconds = round(microseconds, 0)
+        assert abs(s) <= 3 * 24 * 3600
+        assert abs(microseconds) < 3.1e6
+
+        # Just a little bit of carrying possible for microseconds and seconds.
+        assert isinstance(microseconds, float)
+        assert int(microseconds) == microseconds
+        us = int(microseconds)
+        seconds, us = divmod(us, 1000000)
+        s += seconds    # cant't overflow
+        assert isinstance(s, int)
+        days, s = divmod(s, 24*3600)
+        d += days
+
+        assert isinstance(d, int)
+        assert isinstance(s, int) and 0 <= s < 24*3600
+        assert isinstance(us, int) and 0 <= us < 1000000
+
+        self = object.__new__(cls)
+
+        self._days = d
+        self._seconds = s
+        self._microseconds = us
+        if abs(d) > 999999999:
+            raise OverflowError("timedelta # of days is too large: %d" % d)
+
+        return self
+
+    def __repr__(self):
+        if self._microseconds:
+            return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
+                                       self._days,
+                                       self._seconds,
+                                       self._microseconds)
+        if self._seconds:
+            return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
+                                   self._days,
+                                   self._seconds)
+        return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
+
+    def __str__(self):
+        mm, ss = divmod(self._seconds, 60)
+        hh, mm = divmod(mm, 60)
+        s = "%d:%02d:%02d" % (hh, mm, ss)
+        if self._days:
+            def plural(n):
+                return n, abs(n) != 1 and "s" or ""
+            s = ("%d day%s, " % plural(self._days)) + s
+        if self._microseconds:
+            s = s + ".%06d" % self._microseconds
+        return s
+
+    def total_seconds(self):
+        """Total seconds in the duration."""
+        return ((self.days * 86400 + self.seconds)*10**6 +
+                self.microseconds) / 10**6
+
+    # Read-only field accessors
+    @property
+    def days(self):
+        """days"""
+        return self._days
+
+    @property
+    def seconds(self):
+        """seconds"""
+        return self._seconds
+
+    @property
+    def microseconds(self):
+        """microseconds"""
+        return self._microseconds
+
+    def __add__(self, other):
+        if isinstance(other, timedelta):
+            # for CPython compatibility, we cannot use
+            # our __class__ here, but need a real timedelta
+            return timedelta(self._days + other._days,
+                             self._seconds + other._seconds,
+                             self._microseconds + other._microseconds)
+        return NotImplemented
+
+    __radd__ = __add__
+
+    def __sub__(self, other):
+        if isinstance(other, timedelta):
+            # for CPython compatibility, we cannot use
+            # our __class__ here, but need a real timedelta
+            return timedelta(self._days - other._days,
+                             self._seconds - other._seconds,
+                             self._microseconds - other._microseconds)
+        return NotImplemented
+
+    def __rsub__(self, other):
+        if isinstance(other, timedelta):
+            return -self + other
+        return NotImplemented
+
+    def __neg__(self):
+        # for CPython compatibility, we cannot use
+        # our __class__ here, but need a real timedelta
+        return timedelta(-self._days,
+                         -self._seconds,
+                         -self._microseconds)
+
+    def __pos__(self):
+        return self
+
+    def __abs__(self):
+        if self._days < 0:
+            return -self
+        else:
+            return self
+
+    def __mul__(self, other):
+        if isinstance(other, int):
+            # for CPython compatibility, we cannot use
+            # our __class__ here, but need a real timedelta
+            return timedelta(self._days * other,
+                             self._seconds * other,
+                             self._microseconds * other)
+        if isinstance(other, float):
+            #a, b = other.as_integer_ratio()
+            #return self * a / b
+            usec = self._to_microseconds()
+            return timedelta(0, 0, round(usec * other))
+        return NotImplemented
+
+    __rmul__ = __mul__
+
+    def _to_microseconds(self):
+        return ((self._days * (24*3600) + self._seconds) * 1000000 +
+                self._microseconds)
+
+    def __floordiv__(self, other):
+        if not isinstance(other, (int, timedelta)):
+            return NotImplemented
+        usec = self._to_microseconds()
+        if isinstance(other, timedelta):
+            return usec // other._to_microseconds()
+        if isinstance(other, int):
+            return timedelta(0, 0, usec // other)
+
+    def __truediv__(self, other):
+        if not isinstance(other, (int, float, timedelta)):
+            return NotImplemented
+        usec = self._to_microseconds()
+        if isinstance(other, timedelta):
+            return usec / other._to_microseconds()
+        if isinstance(other, int):
+            return timedelta(0, 0, usec / other)
+        if isinstance(other, float):
+#            a, b = other.as_integer_ratio()
+#            return timedelta(0, 0, b * usec / a)
+            return timedelta(0, 0, round(usec / other))
+
+    def __mod__(self, other):
+        if isinstance(other, timedelta):
+            r = self._to_microseconds() % other._to_microseconds()
+            return timedelta(0, 0, r)
+        return NotImplemented
+
+    def __divmod__(self, other):
+        if isinstance(other, timedelta):
+            q, r = divmod(self._to_microseconds(),
+                          other._to_microseconds())
+            return q, timedelta(0, 0, r)
+        return NotImplemented
+
+    # Comparisons of timedelta objects with other.
+
+    def __eq__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) == 0
+        else:
+            return False
+
+    def __ne__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) != 0
+        else:
+            return True
+
+    def __le__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) <= 0
+        else:
+            _cmperror(self, other)
+
+    def __lt__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) < 0
+        else:
+            _cmperror(self, other)
+
+    def __ge__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) >= 0
+        else:
+            _cmperror(self, other)
+
+    def __gt__(self, other):
+        if isinstance(other, timedelta):
+            return self._cmp(other) > 0
+        else:
+            _cmperror(self, other)
+
+    def _cmp(self, other):
+        assert isinstance(other, timedelta)
+        return _cmp(self._getstate(), other._getstate())
+
+    def __hash__(self):
+        return hash(self._getstate())
+
+    def __bool__(self):
+        return (self._days != 0 or
+                self._seconds != 0 or
+                self._microseconds != 0)
+
+    # Pickle support.
+
+    def _getstate(self):
+        return (self._days, self._seconds, self._microseconds)
+
+    def __reduce__(self):
+        return (self.__class__, self._getstate())
+
+timedelta.min = timedelta(-999999999)
+timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
+                          microseconds=999999)
+timedelta.resolution = timedelta(microseconds=1)
+
+class date:
+    """Concrete date type.
+
+    Constructors:
+
+    __new__()
+    fromtimestamp()
+    today()
+    fromordinal()
+
+    Operators:
+
+    __repr__, __str__
+    __cmp__, __hash__
+    __add__, __radd__, __sub__ (add/radd only with timedelta arg)
+
+    Methods:
+
+    timetuple()
+    toordinal()
+    weekday()
+    isoweekday(), isocalendar(), isoformat()
+    ctime()
+    strftime()
+
+    Properties (readonly):
+    year, month, day
+    """
+    __slots__ = '_year', '_month', '_day'
+
+    def __new__(cls, year, month=None, day=None):
+        """Constructor.
+
+        Arguments:
+
+        year, month, day (required, base 1)
+        """
+        if (isinstance(year, bytes) and len(year) == 4 and
+            1 <= year[2] <= 12 and month is None):  # Month is sane
+            # Pickle support
+            self = object.__new__(cls)
+            self.__setstate(year)
+            return self
+        _check_date_fields(year, month, day)
+        self = object.__new__(cls)
+        self._year = year
+        self._month = month
+        self._day = day
+        return self
+
+    # Additional constructors
+
+    @classmethod
+    def fromtimestamp(cls, t):
+        "Construct a date from a POSIX timestamp (like time.time())."
+        y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
+        return cls(y, m, d)
+
+    @classmethod
+    def today(cls):
+        "Construct a date from time.time()."
+        t = _time.time()
+        return cls.fromtimestamp(t)
+
+    @classmethod
+    def fromordinal(cls, n):
+        """Contruct a date from a proleptic Gregorian ordinal.
+
+        January 1 of year 1 is day 1.  Only the year, month and day are
+        non-zero in the result.
+        """
+        y, m, d = _ord2ymd(n)
+        return cls(y, m, d)
+
+    # Conversions to string
+
+    def __repr__(self):
+        """Convert to formal string, for repr().
+
+        >>> dt = datetime(2010, 1, 1)
+        >>> repr(dt)
+        'datetime.datetime(2010, 1, 1, 0, 0)'
+
+        >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
+        >>> repr(dt)
+        'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
+        """
+        return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
+                                   self._year,
+                                   self._month,
+                                   self._day)
+    # XXX These shouldn't depend on time.localtime(), because that
+    # clips the usable dates to [1970 .. 2038).  At least ctime() is
+    # easily done without using strftime() -- that's better too because
+    # strftime("%c", ...) is locale specific.
+
+
+    def ctime(self):
+        "Return ctime() style string."
+        weekday = self.toordinal() % 7 or 7
+        return "%s %s %2d 00:00:00 %04d" % (
+            _DAYNAMES[weekday],
+            _MONTHNAMES[self._month],
+            self._day, self._year)
+
+    def strftime(self, fmt):
+        "Format using strftime()."
+        return _wrap_strftime(self, fmt, self.timetuple())
+
+    def __format__(self, fmt):
+        if len(fmt) != 0:
+            return self.strftime(fmt)
+        return str(self)
+
+    def isoformat(self):
+        """Return the date formatted according to ISO.
+
+        This is 'YYYY-MM-DD'.
+
+        References:
+        - http://www.w3.org/TR/NOTE-datetime
+        - http://www.cl.cam.ac.uk/~mgk25/iso-time.html
+        """
+        return "%04d-%02d-%02d" % (self._year, self._month, self._day)
+
+    __str__ = isoformat
+
+    # Read-only field accessors
+    @property
+    def year(self):
+        """year (1-9999)"""
+        return self._year
+
+    @property
+    def month(self):
+        """month (1-12)"""
+        return self._month
+
+    @property
+    def day(self):
+        """day (1-31)"""
+        return self._day
+
+    # Standard conversions, __cmp__, __hash__ (and helpers)
+
+    def timetuple(self):
+        "Return local time tuple compatible with time.localtime()."
+        return _build_struct_time(self._year, self._month, self._day,
+                                  0, 0, 0, -1)
+
+    def toordinal(self):
+        """Return proleptic Gregorian ordinal for the year, month and day.
+
+        January 1 of year 1 is day 1.  Only the year, month and day values
+        contribute to the result.
+        """
+        return _ymd2ord(self._year, self._month, self._day)
+
+    def replace(self, year=None, month=None, day=None):
+        """Return a new date with new values for the specified fields."""
+        if year is None:
+            year = self._year
+        if month is None:
+            month = self._month
+        if day is None:
+            day = self._day
+        _check_date_fields(year, month, day)
+        return date(year, month, day)
+
+    # Comparisons of date objects with other.
+
+    def __eq__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) == 0
+        return NotImplemented
+
+    def __ne__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) != 0
+        return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) <= 0
+        return NotImplemented
+
+    def __lt__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) < 0
+        return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) >= 0
+        return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, date):
+            return self._cmp(other) > 0
+        return NotImplemented
+
+    def _cmp(self, other):
+        assert isinstance(other, date)
+        y, m, d = self._year, self._month, self._day
+        y2, m2, d2 = other._year, other._month, other._day
+        return _cmp((y, m, d), (y2, m2, d2))
+
+    def __hash__(self):
+        "Hash."
+        return hash(self._getstate())
+
+    # Computations
+
+    def __add__(self, other):
+        "Add a date to a timedelta."
+        if isinstance(other, timedelta):
+            o = self.toordinal() + other.days
+            if 0 < o <= _MAXORDINAL:
+                return date.fromordinal(o)
+            raise OverflowError("result out of range")
+        return NotImplemented
+
+    __radd__ = __add__
+
+    def __sub__(self, other):
+        """Subtract two dates, or a date and a timedelta."""
+        if isinstance(other, timedelta):
+            return self + timedelta(-other.days)
+        if isinstance(other, date):
+            days1 = self.toordinal()
+            days2 = other.toordinal()
+            return timedelta(days1 - days2)
+        return NotImplemented
+
+    def weekday(self):
+        "Return day of the week, where Monday == 0 ... Sunday == 6."
+        return (self.toordinal() + 6) % 7
+
+    # Day-of-the-week and week-of-the-year, according to ISO
+
+    def isoweekday(self):
+        "Return day of the week, where Monday == 1 ... Sunday == 7."
+        # 1-Jan-0001 is a Monday
+        return self.toordinal() % 7 or 7
+
+    def isocalendar(self):
+        """Return a 3-tuple containing ISO year, week number, and weekday.
+
+        The first ISO week of the year is the (Mon-Sun) week
+        containing the year's first Thursday; everything else derives
+        from that.
+
+        The first week is 1; Monday is 1 ... Sunday is 7.
+
+        ISO calendar algorithm taken from
+        http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
+        """
+        year = self._year
+        week1monday = _isoweek1monday(year)
+        today = _ymd2ord(self._year, self._month, self._day)
+        # Internally, week and day have origin 0
+        week, day = divmod(today - week1monday, 7)
+        if week < 0:
+            year -= 1
+            week1monday = _isoweek1monday(year)
+            week, day = divmod(today - week1monday, 7)
+        elif week >= 52:
+            if today >= _isoweek1monday(year+1):
+                year += 1
+                week = 0
+        return year, week+1, day+1
+
+    # Pickle support.
+
+    def _getstate(self):
+        yhi, ylo = divmod(self._year, 256)
+        return bytes([yhi, ylo, self._month, self._day]),
+
+    def __setstate(self, string):
+        if len(string) != 4 or not (1 <= string[2] <= 12):
+            raise TypeError("not enough arguments")
+        yhi, ylo, self._month, self._day = string
+        self._year = yhi * 256 + ylo
+
+    def __reduce__(self):
+        return (self.__class__, self._getstate())
+
+_date_class = date  # so functions w/ args named "date" can get at the class
+
+date.min = date(1, 1, 1)
+date.max = date(9999, 12, 31)
+date.resolution = timedelta(days=1)
+
+class tzinfo:
+    """Abstract base class for time zone info classes.
+
+    Subclasses must override the name(), utcoffset() and dst() methods.
+    """
+    __slots__ = ()
+    def tzname(self, dt):
+        "datetime -> string name of time zone."
+        raise NotImplementedError("tzinfo subclass must override tzname()")
+
+    def utcoffset(self, dt):
+        "datetime -> minutes east of UTC (negative for west of UTC)"
+        raise NotImplementedError("tzinfo subclass must override utcoffset()")
+
+    def dst(self, dt):
+        """datetime -> DST offset in minutes east of UTC.
+
+        Return 0 if DST not in effect.  utcoffset() must include the DST
+        offset.
+        """
+        raise NotImplementedError("tzinfo subclass must override dst()")
+
+    def fromutc(self, dt):
+        "datetime in UTC -> datetime in local time."
+
+        if not isinstance(dt, datetime):
+            raise TypeError("fromutc() requires a datetime argument")
+        if dt.tzinfo is not self:
+            raise ValueError("dt.tzinfo is not self")
+
+        dtoff = dt.utcoffset()
+        if dtoff is None:
+            raise ValueError("fromutc() requires a non-None utcoffset() "
+                             "result")
+
+        # See the long comment block at the end of this file for an
+        # explanation of this algorithm.
+        dtdst = dt.dst()
+        if dtdst is None:
+            raise ValueError("fromutc() requires a non-None dst() result")
+        delta = dtoff - dtdst
+        if delta:
+            dt += delta
+            dtdst = dt.dst()
+            if dtdst is None:
+                raise ValueError("fromutc(): dt.dst gave inconsistent "
+                                 "results; cannot convert")
+        return dt + dtdst
+
+    # Pickle support.
+
+    def __reduce__(self):
+        getinitargs = getattr(self, "__getinitargs__", None)
+        if getinitargs:
+            args = getinitargs()
+        else:
+            args = ()
+        getstate = getattr(self, "__getstate__", None)
+        if getstate:
+            state = getstate()
+        else:
+            state = getattr(self, "__dict__", None) or None
+        if state is None:
+            return (self.__class__, args)
+        else:
+            return (self.__class__, args, state)
+
+_tzinfo_class = tzinfo
+
+class time:
+    """Time with time zone.
+
+    Constructors:
+
+    __new__()
+
+    Operators:
+
+    __repr__, __str__
+    __cmp__, __hash__
+
+    Methods:
+
+    strftime()
+    isoformat()
+    utcoffset()
+    tzname()
+    dst()
+
+    Properties (readonly):
+    hour, minute, second, microsecond, tzinfo
+    """
+
+    def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
+        """Constructor.
+
+        Arguments:
+
+        hour, minute (required)
+        second, microsecond (default to zero)
+        tzinfo (default to None)
+        """
+        self = object.__new__(cls)
+        if isinstance(hour, bytes) and len(hour) == 6:
+            # Pickle support
+            self.__setstate(hour, minute or None)
+            return self
+        _check_tzinfo_arg(tzinfo)
+        _check_time_fields(hour, minute, second, microsecond)
+        self._hour = hour
+        self._minute = minute
+        self._second = second
+        self._microsecond = microsecond
+        self._tzinfo = tzinfo
+        return self
+
+    # Read-only field accessors
+    @property
+    def hour(self):
+        """hour (0-23)"""
+        return self._hour
+
+    @property
+    def minute(self):
+        """minute (0-59)"""
+        return self._minute
+
+    @property
+    def second(self):
+        """second (0-59)"""
+        return self._second
+
+    @property
+    def microsecond(self):
+        """microsecond (0-999999)"""
+        return self._microsecond
+
+    @property
+    def tzinfo(self):
+        """timezone info object"""
+        return self._tzinfo
+
+    # Standard conversions, __hash__ (and helpers)
+
+    # Comparisons of time objects with other.
+
+    def __eq__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other, allow_mixed=True) == 0
+        else:
+            return False
+
+    def __ne__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other, allow_mixed=True) != 0
+        else:
+            return True
+
+    def __le__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other) <= 0
+        else:
+            _cmperror(self, other)
+
+    def __lt__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other) < 0
+        else:
+            _cmperror(self, other)
+
+    def __ge__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other) >= 0
+        else:
+            _cmperror(self, other)
+
+    def __gt__(self, other):
+        if isinstance(other, time):
+            return self._cmp(other) > 0
+        else:
+            _cmperror(self, other)
+
+    def _cmp(self, other, allow_mixed=False):
+        assert isinstance(other, time)
+        mytz = self._tzinfo
+        ottz = other._tzinfo
+        myoff = otoff = None
+
+        if mytz is ottz:
+            base_compare = True
+        else:
+            myoff = self.utcoffset()
+            otoff = other.utcoffset()
+            base_compare = myoff == otoff
+
+        if base_compare:
+            return _cmp((self._hour, self._minute, self._second,
+                         self._microsecond),
+                       (other._hour, other._minute, other._second,
+                        other._microsecond))
+        if myoff is None or otoff is None:
+            if allow_mixed:
+                return 2 # arbitrary non-zero value
+            else:
+                raise TypeError("cannot compare naive and aware times")
+        myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
+        othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
+        return _cmp((myhhmm, self._second, self._microsecond),
+                    (othhmm, other._second, other._microsecond))
+
+    def __hash__(self):
+        """Hash."""
+        tzoff = self.utcoffset()
+        if not tzoff: # zero or None
+            return hash(self._getstate()[0])
+        h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
+                      timedelta(hours=1))
+        assert not m % timedelta(minutes=1), "whole minute"
+        m //= timedelta(minutes=1)
+        if 0 <= h < 24:
+            return hash(time(h, m, self.second, self.microsecond))
+        return hash((h, m, self.second, self.microsecond))
+
+    # Conversion to string
+
+    def _tzstr(self, sep=":"):
+        """Return formatted timezone offset (+xx:xx) or None."""
+        off = self.utcoffset()
+        if off is not None:
+            if off.days < 0:
+                sign = "-"
+                off = -off
+            else:
+                sign = "+"
+            hh, mm = divmod(off, timedelta(hours=1))
+            assert not mm % timedelta(minutes=1), "whole minute"
+            mm //= timedelta(minutes=1)
+            assert 0 <= hh < 24
+            off = "%s%02d%s%02d" % (sign, hh, sep, mm)
+        return off
+
+    def __repr__(self):
+        """Convert to formal string, for repr()."""
+        if self._microsecond != 0:
+            s = ", %d, %d" % (self._second, self._microsecond)
+        elif self._second != 0:
+            s = ", %d" % self._second
+        else:
+            s = ""
+        s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
+                             self._hour, self._minute, s)
+        if self._tzinfo is not None:
+            assert s[-1:] == ")"
+            s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
+        return s
+
+    def isoformat(self):
+        """Return the time formatted according to ISO.
+
+        This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
+        self.microsecond == 0.
+        """
+        s = _format_time(self._hour, self._minute, self._second,
+                         self._microsecond)
+        tz = self._tzstr()
+        if tz:
+            s += tz
+        return s
+
+    __str__ = isoformat
+
+    def strftime(self, fmt):
+        """Format using strftime().  The date part of the timestamp passed
+        to underlying strftime should not be used.
+        """
+        # The year must be >= 1000 else Python's strftime implementation
+        # can raise a bogus exception.
+        timetuple = (1900, 1, 1,
+                     self._hour, self._minute, self._second,
+                     0, 1, -1)
+        return _wrap_strftime(self, fmt, timetuple)
+
+    def __format__(self, fmt):
+        if len(fmt) != 0:
+            return self.strftime(fmt)
+        return str(self)
+
+    # Timezone functions
+
+    def utcoffset(self):
+        """Return the timezone offset in minutes east of UTC (negative west of
+        UTC)."""
+        if self._tzinfo is None:
+            return None
+        offset = self._tzinfo.utcoffset(None)
+        _check_utc_offset("utcoffset", offset)
+        return offset
+
+    def tzname(self):
+        """Return the timezone name.
+
+        Note that the name is 100% informational -- there's no requirement that
+        it mean anything in particular. For example, "GMT", "UTC", "-500",
+        "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
+        """
+        if self._tzinfo is None:
+            return None
+        name = self._tzinfo.tzname(None)
+        _check_tzname(name)
+        return name
+
+    def dst(self):
+        """Return 0 if DST is not in effect, or the DST offset (in minutes
+        eastward) if DST is in effect.
+
+        This is purely informational; the DST offset has already been added to
+        the UTC offset returned by utcoffset() if applicable, so there's no
+        need to consult dst() unless you're interested in displaying the DST
+        info.
+        """
+        if self._tzinfo is None:
+            return None
+        offset = self._tzinfo.dst(None)
+        _check_utc_offset("dst", offset)
+        return offset
+
+    def replace(self, hour=None, minute=None, second=None, microsecond=None,
+                tzinfo=True):
+        """Return a new time with new values for the specified fields."""
+        if hour is None:
+            hour = self.hour
+        if minute is None:
+            minute = self.minute
+        if second is None:
+            second = self.second
+        if microsecond is None:
+            microsecond = self.microsecond
+        if tzinfo is True:
+            tzinfo = self.tzinfo
+        _check_time_fields(hour, minute, second, microsecond)
+        _check_tzinfo_arg(tzinfo)
+        return time(hour, minute, second, microsecond, tzinfo)
+
+    def __bool__(self):
+        if self.second or self.microsecond:
+            return True
+        offset = self.utcoffset() or timedelta(0)
+        return timedelta(hours=self.hour, minutes=self.minute) != offset
+
+    # Pickle support.
+
+    def _getstate(self):
+        us2, us3 = divmod(self._microsecond, 256)
+        us1, us2 = divmod(us2, 256)
+        basestate = bytes([self._hour, self._minute, self._second,
+                           us1, us2, us3])
+        if self._tzinfo is None:
+            return (basestate,)
+        else:
+            return (basestate, self._tzinfo)
+
+    def __setstate(self, string, tzinfo):
+        if len(string) != 6 or string[0] >= 24:
+            raise TypeError("an integer is required")
+        (self._hour, self._minute, self._second,
+         us1, us2, us3) = string
+        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
+            self._tzinfo = tzinfo
+        else:
+            raise TypeError("bad tzinfo state arg %r" % tzinfo)
+
+    def __reduce__(self):
+        return (time, self._getstate())
+
+_time_class = time  # so functions w/ args named "time" can get at the class
+
+time.min = time(0, 0, 0)
+time.max = time(23, 59, 59, 999999)
+time.resolution = timedelta(microseconds=1)
+
+class datetime(date):
+    """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
+
+    The year, month and day arguments are required. tzinfo may be None, or an
+    instance of a tzinfo subclass. The remaining arguments may be ints.
+    """
+
+    __slots__ = date.__slots__ + (
+        '_hour', '_minute', '_second',
+        '_microsecond', '_tzinfo')
+    def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
+                microsecond=0, tzinfo=None):
+        if isinstance(year, bytes) and len(year) == 10:
+            # Pickle support
+            self = date.__new__(cls, year[:4])
+            self.__setstate(year, month)
+            return self
+        _check_tzinfo_arg(tzinfo)
+        _check_time_fields(hour, minute, second, microsecond)
+        self = date.__new__(cls, year, month, day)
+        self._hour = hour
+        self._minute = minute
+        self._second = second
+        self._microsecond = microsecond
+        self._tzinfo = tzinfo
+        return self
+
+    # Read-only field accessors
+    @property
+    def hour(self):
+        """hour (0-23)"""
+        return self._hour
+
+    @property
+    def minute(self):
+        """minute (0-59)"""
+        return self._minute
+
+    @property
+    def second(self):
+        """second (0-59)"""
+        return self._second
+
+    @property
+    def microsecond(self):
+        """microsecond (0-999999)"""
+        return self._microsecond
+
+    @property
+    def tzinfo(self):
+        """timezone info object"""
+        return self._tzinfo
+
+    @classmethod
+    def fromtimestamp(cls, t, tz=None):
+        """Construct a datetime from a POSIX timestamp (like time.time()).
+
+        A timezone info object may be passed in as well.
+        """
+
+        _check_tzinfo_arg(tz)
+
+        converter = _time.localtime if tz is None else _time.gmtime
+
+        t, frac = divmod(t, 1.0)
+        us = int(frac * 1e6)
+
+        # If timestamp is less than one microsecond smaller than a
+        # full second, us can be rounded up to 1000000.  In this case,
+        # roll over to seconds, otherwise, ValueError is raised
+        # by the constructor.
+        if us == 1000000:
+            t += 1
+            us = 0
+        y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
+        ss = min(ss, 59)    # clamp out leap seconds if the platform has them
+        result = cls(y, m, d, hh, mm, ss, us, tz)
+        if tz is not None:
+            result = tz.fromutc(result)
+        return result
+
+    @classmethod
+    def utcfromtimestamp(cls, t):
+        "Construct a UTC datetime from a POSIX timestamp (like time.time())."
+        t, frac = divmod(t, 1.0)
+        us = int(frac * 1e6)
+
+        # If timestamp is less than one microsecond smaller than a
+        # full second, us can be rounded up to 1000000.  In this case,
+        # roll over to seconds, otherwise, ValueError is raised
+        # by the constructor.
+        if us == 1000000:
+            t += 1
+            us = 0
+        y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
+        ss = min(ss, 59)    # clamp out leap seconds if the platform has them
+        return cls(y, m, d, hh, mm, ss, us)
+
+    # XXX This is supposed to do better than we *can* do by using time.time(),
+    # XXX if the platform supports a more accurate way.  The C implementation
+    # XXX uses gettimeofday on platforms that have it, but that isn't
+    # XXX available from Python.  So now() may return different results
+    # XXX across the implementations.
+    @classmethod
+    def now(cls, tz=None):
+        "Construct a datetime from time.time() and optional time zone info."
+        t = _time.time()
+        return cls.fromtimestamp(t, tz)
+
+    @classmethod
+    def utcnow(cls):
+        "Construct a UTC datetime from time.time()."
+        t = _time.time()
+        return cls.utcfromtimestamp(t)
+
+    @classmethod
+    def combine(cls, date, time):
+        "Construct a datetime from a given date and a given time."
+        if not isinstance(date, _date_class):
+            raise TypeError("date argument must be a date instance")
+        if not isinstance(time, _time_class):
+            raise TypeError("time argument must be a time instance")
+        return cls(date.year, date.month, date.day,
+                   time.hour, time.minute, time.second, time.microsecond,
+                   time.tzinfo)
+
+    def timetuple(self):
+        "Return local time tuple compatible with time.localtime()."
+        dst = self.dst()
+        if dst is None:
+            dst = -1
+        elif dst:
+            dst = 1
+        else:
+            dst = 0
+        return _build_struct_time(self.year, self.month, self.day,
+                                  self.hour, self.minute, self.second,
+                                  dst)
+
+    def timestamp(self):
+        "Return POSIX timestamp as float"
+        if self._tzinfo is None:
+            return _time.mktime((self.year, self.month, self.day,
+                                 self.hour, self.minute, self.second,
+                                 -1, -1, -1)) + self.microsecond / 1e6
+        else:
+            return (self - _EPOCH).total_seconds()
+
+    def utctimetuple(self):
+        "Return UTC time tuple compatible with time.gmtime()."
+        offset = self.utcoffset()
+        if offset:
+            self -= offset
+        y, m, d = self.year, self.month, self.day
+        hh, mm, ss = self.hour, self.minute, self.second
+        return _build_struct_time(y, m, d, hh, mm, ss, 0)
+
+    def date(self):
+        "Return the date part."
+        return date(self._year, self._month, self._day)
+
+    def time(self):
+        "Return the time part, with tzinfo None."
+        return time(self.hour, self.minute, self.second, self.microsecond)
+
+    def timetz(self):
+        "Return the time part, with same tzinfo."
+        return time(self.hour, self.minute, self.second, self.microsecond,
+                    self._tzinfo)
+
+    def replace(self, year=None, month=None, day=None, hour=None,
+                minute=None, second=None, microsecond=None, tzinfo=True):
+        """Return a new datetime with new values for the specified fields."""
+        if year is None:
+            year = self.year
+        if month is None:
+            month = self.month
+        if day is None:
+            day = self.day
+        if hour is None:
+            hour = self.hour
+        if minute is None:
+            minute = self.minute
+        if second is None:
+            second = self.second
+        if microsecond is None:
+            microsecond = self.microsecond
+        if tzinfo is True:
+            tzinfo = self.tzinfo
+        _check_date_fields(year, month, day)
+        _check_time_fields(hour, minute, second, microsecond)
+        _check_tzinfo_arg(tzinfo)
+        return datetime(year, month, day, hour, minute, second,
+                          microsecond, tzinfo)
+
+    def astimezone(self, tz=None):
+        if tz is None:
+            if self.tzinfo is None:
+                raise ValueError("astimezone() requires an aware datetime")
+            ts = (self - _EPOCH) // timedelta(seconds=1)
+            localtm = _time.localtime(ts)
+            local = datetime(*localtm[:6])
+            try:
+                # Extract TZ data if available
+                gmtoff = localtm.tm_gmtoff
+                zone = localtm.tm_zone
+            except AttributeError:
+                # Compute UTC offset and compare with the value implied
+                # by tm_isdst.  If the values match, use the zone name
+                # implied by tm_isdst.
+                delta = local - datetime(*_time.gmtime(ts)[:6])
+                dst = _time.daylight and localtm.tm_isdst > 0
+                gmtoff = -(_time.altzone if dst else _time.timezone)
+                if delta == timedelta(seconds=gmtoff):
+                    tz = timezone(delta, _time.tzname[dst])
+                else:
+                    tz = timezone(delta)
+            else:
+                tz = timezone(timedelta(seconds=gmtoff), zone)
+
+        elif not isinstance(tz, tzinfo):
+            raise TypeError("tz argument must be an instance of tzinfo")
+
+        mytz = self.tzinfo
+        if mytz is None:
+            raise ValueError("astimezone() requires an aware datetime")
+
+        if tz is mytz:
+            return self
+
+        # Convert self to UTC, and attach the new time zone object.
+        myoffset = self.utcoffset()
+        if myoffset is None:
+            raise ValueError("astimezone() requires an aware datetime")
+        utc = (self - myoffset).replace(tzinfo=tz)
+
+        # Convert from UTC to tz's local time.
+        return tz.fromutc(utc)
+
+    # Ways to produce a string.
+
+    def ctime(self):
+        "Return ctime() style string."
+        weekday = self.toordinal() % 7 or 7
+        return "%s %s %2d %02d:%02d:%02d %04d" % (
+            _DAYNAMES[weekday],
+            _MONTHNAMES[self._month],
+            self._day,
+            self._hour, self._minute, self._second,
+            self._year)
+
+    def isoformat(self, sep='T'):
+        """Return the time formatted according to ISO.
+
+        This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
+        self.microsecond == 0.
+
+        If self.tzinfo is not None, the UTC offset is also attached, giving
+        'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
+
+        Optional argument sep specifies the separator between date and
+        time, default 'T'.
+        """
+        s = ("%04d-%02d-%02d%s" % (self._year, self._month, self._day,
+                                  sep) +
+                _format_time(self._hour, self._minute, self._second,
+                             self._microsecond))
+        off = self.utcoffset()
+        if off is not None:
+            if off.days < 0:
+                sign = "-"
+                off = -off
+            else:
+                sign = "+"
+            hh, mm = divmod(off, timedelta(hours=1))
+            assert not mm % timedelta(minutes=1), "whole minute"
+            mm //= timedelta(minutes=1)
+            s += "%s%02d:%02d" % (sign, hh, mm)
+        return s
+
+    def __repr__(self):
+        """Convert to formal string, for repr()."""
+        L = [self._year, self._month, self._day, # These are never zero
+             self._hour, self._minute, self._second, self._microsecond]
+        if L[-1] == 0:
+            del L[-1]
+        if L[-1] == 0:
+            del L[-1]
+        s = ", ".join(map(str, L))
+        s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
+        if self._tzinfo is not None:
+            assert s[-1:] == ")"
+            s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
+        return s
+
+    def __str__(self):
+        "Convert to string, for str()."
+        return self.isoformat(sep=' ')
+
+    @classmethod
+    def strptime(cls, date_string, format):
+        'string, format -> new datetime parsed from a string (like time.strptime()).'
+        import _strptime
+        return _strptime._strptime_datetime(cls, date_string, format)
+
+    def utcoffset(self):
+        """Return the timezone offset in minutes east of UTC (negative west of
+        UTC)."""
+        if self._tzinfo is None:
+            return None
+        offset = self._tzinfo.utcoffset(self)
+        _check_utc_offset("utcoffset", offset)
+        return offset
+
+    def tzname(self):
+        """Return the timezone name.
+
+        Note that the name is 100% informational -- there's no requirement that
+        it mean anything in particular. For example, "GMT", "UTC", "-500",
+        "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
+        """
+        name = _call_tzinfo_method(self._tzinfo, "tzname", self)
+        _check_tzname(name)
+        return name
+
+    def dst(self):
+        """Return 0 if DST is not in effect, or the DST offset (in minutes
+        eastward) if DST is in effect.
+
+        This is purely informational; the DST offset has already been added to
+        the UTC offset returned by utcoffset() if applicable, so there's no
+        need to consult dst() unless you're interested in displaying the DST
+        info.
+        """
+        if self._tzinfo is None:
+            return None
+        offset = self._tzinfo.dst(self)
+        _check_utc_offset("dst", offset)
+        return offset
+
+    # Comparisons of datetime objects with other.
+
+    def __eq__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other, allow_mixed=True) == 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            return False
+
+    def __ne__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other, allow_mixed=True) != 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            return True
+
+    def __le__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other) <= 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            _cmperror(self, other)
+
+    def __lt__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other) < 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            _cmperror(self, other)
+
+    def __ge__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other) >= 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            _cmperror(self, other)
+
+    def __gt__(self, other):
+        if isinstance(other, datetime):
+            return self._cmp(other) > 0
+        elif not isinstance(other, date):
+            return NotImplemented
+        else:
+            _cmperror(self, other)
+
+    def _cmp(self, other, allow_mixed=False):
+        assert isinstance(other, datetime)
+        mytz = self._tzinfo
+        ottz = other._tzinfo
+        myoff = otoff = None
+
+        if mytz is ottz:
+            base_compare = True
+        else:
+            myoff = self.utcoffset()
+            otoff = other.utcoffset()
+            base_compare = myoff == otoff
+
+        if base_compare:
+            return _cmp((self._year, self._month, self._day,
+                         self._hour, self._minute, self._second,
+                         self._microsecond),
+                       (other._year, other._month, other._day,
+                        other._hour, other._minute, other._second,
+                        other._microsecond))
+        if myoff is None or otoff is None:
+            if allow_mixed:
+                return 2 # arbitrary non-zero value
+            else:
+                raise TypeError("cannot compare naive and aware datetimes")
+        # XXX What follows could be done more efficiently...
+        diff = self - other     # this will take offsets into account
+        if diff.days < 0:
+            return -1
+        return diff and 1 or 0
+
+    def __add__(self, other):
+        "Add a datetime and a timedelta."
+        if not isinstance(other, timedelta):
+            return NotImplemented
+        delta = timedelta(self.toordinal(),
+                          hours=self._hour,
+                          minutes=self._minute,
+                          seconds=self._second,
+                          microseconds=self._microsecond)
+        delta += other
+        hour, rem = divmod(delta.seconds, 3600)
+        minute, second = divmod(rem, 60)
+        if 0 < delta.days <= _MAXORDINAL:
+            return datetime.combine(date.fromordinal(delta.days),
+                                    time(hour, minute, second,
+                                         delta.microseconds,
+                                         tzinfo=self._tzinfo))
+        raise OverflowError("result out of range")
+
+    __radd__ = __add__
+
+    def __sub__(self, other):
+        "Subtract two datetimes, or a datetime and a timedelta."
+        if not isinstance(other, datetime):
+            if isinstance(other, timedelta):
+                return self + -other
+            return NotImplemented
+
+        days1 = self.toordinal()
+        days2 = other.toordinal()
+        secs1 = self._second + self._minute * 60 + self._hour * 3600
+        secs2 = other._second + other._minute * 60 + other._hour * 3600
+        base = timedelta(days1 - days2,
+                         secs1 - secs2,
+                         self._microsecond - other._microsecond)
+        if self._tzinfo is other._tzinfo:
+            return base
+        myoff = self.utcoffset()
+        otoff = other.utcoffset()
+        if myoff == otoff:
+            return base
+        if myoff is None or otoff is None:
+            raise TypeError("cannot mix naive and timezone-aware time")
+        return base + otoff - myoff
+
+    def __hash__(self):
+        tzoff = self.utcoffset()
+        if tzoff is None:
+            return hash(self._getstate()[0])
+        days = _ymd2ord(self.year, self.month, self.day)
+        seconds = self.hour * 3600 + self.minute * 60 + self.second
+        return hash(timedelta(days, seconds, self.microsecond) - tzoff)
+
+    # Pickle support.
+
+    def _getstate(self):
+        yhi, ylo = divmod(self._year, 256)
+        us2, us3 = divmod(self._microsecond, 256)
+        us1, us2 = divmod(us2, 256)
+        basestate = bytes([yhi, ylo, self._month, self._day,
+                           self._hour, self._minute, self._second,
+                           us1, us2, us3])
+        if self._tzinfo is None:
+            return (basestate,)
+        else:
+            return (basestate, self._tzinfo)
+
+    def __setstate(self, string, tzinfo):
+        (yhi, ylo, self._month, self._day, self._hour,
+         self._minute, self._second, us1, us2, us3) = string
+        self._year = yhi * 256 + ylo
+        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
+            self._tzinfo = tzinfo
+        else:
+            raise TypeError("bad tzinfo state arg %r" % tzinfo)
+
+    def __reduce__(self):
+        return (self.__class__, self._getstate())
+
+
+datetime.min = datetime(1, 1, 1)
+datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
+datetime.resolution = timedelta(microseconds=1)
+
+
+def _isoweek1monday(year):
+    # Helper to calculate the day number of the Monday starting week 1
+    # XXX This could be done more efficiently
+    THURSDAY = 3
+    firstday = _ymd2ord(year, 1, 1)
+    firstweekday = (firstday + 6) % 7 # See weekday() above
+    week1monday = firstday - firstweekday
+    if firstweekday > THURSDAY:
+        week1monday += 7
+    return week1monday
+
+class timezone(tzinfo):
+    __slots__ = '_offset', '_name'
+
+    # Sentinel value to disallow None
+    _Omitted = object()
+    def __new__(cls, offset, name=_Omitted):
+        if not isinstance(offset, timedelta):
+            raise TypeError("offset must be a timedelta")
+        if name is cls._Omitted:
+            if not offset:
+                return cls.utc
+            name = None
+        elif not isinstance(name, str):
+            raise TypeError("name must be a string")
+        if not cls._minoffset <= offset <= cls._maxoffset:
+            raise ValueError("offset must be a timedelta"
+                             " strictly between -timedelta(hours=24) and"
+                             " timedelta(hours=24).")
+        if (offset.microseconds != 0 or
+            offset.seconds % 60 != 0):
+            raise ValueError("offset must be a timedelta"
+                             " representing a whole number of minutes")
+        return cls._create(offset, name)
+
+    @classmethod
+    def _create(cls, offset, name=None):
+        self = tzinfo.__new__(cls)
+        self._offset = offset
+        self._name = name
+        return self
+
+    def __getinitargs__(self):
+        """pickle support"""
+        if self._name is None:
+            return (self._offset,)
+        return (self._offset, self._name)
+
+    def __eq__(self, other):
+        if type(other) != timezone:
+            return False
+        return self._offset == other._offset
+
+    def __hash__(self):
+        return hash(self._offset)
+
+    def __repr__(self):
+        """Convert to formal string, for repr().
+
+        >>> tz = timezone.utc
+        >>> repr(tz)
+        'datetime.timezone.utc'
+        >>> tz = timezone(timedelta(hours=-5), 'EST')
+        >>> repr(tz)
+        "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
+        """
+        if self is self.utc:
+            return 'datetime.timezone.utc'
+        if self._name is None:
+            return "%s(%r)" % ('datetime.' + self.__class__.__name__,
+                               self._offset)
+        return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
+                               self._offset, self._name)
+
+    def __str__(self):
+        return self.tzname(None)
+
+    def utcoffset(self, dt):
+        if isinstance(dt, datetime) or dt is None:
+            return self._offset
+        raise TypeError("utcoffset() argument must be a datetime instance"
+                        " or None")
+
+    def tzname(self, dt):
+        if isinstance(dt, datetime) or dt is None:
+            if self._name is None:
+                return self._name_from_offset(self._offset)
+            return self._name
+        raise TypeError("tzname() argument must be a datetime instance"
+                        " or None")
+
+    def dst(self, dt):
+        if isinstance(dt, datetime) or dt is None:
+            return None
+        raise TypeError("dst() argument must be a datetime instance"
+                        " or None")
+
+    def fromutc(self, dt):
+        if isinstance(dt, datetime):
+            if dt.tzinfo is not self:
+                raise ValueError("fromutc: dt.tzinfo "
+                                 "is not self")
+            return dt + self._offset
+        raise TypeError("fromutc() argument must be a datetime instance"
+                        " or None")
+
+    _maxoffset = timedelta(hours=23, minutes=59)
+    _minoffset = -_maxoffset
+
+    @staticmethod
+    def _name_from_offset(delta):
+        if delta < timedelta(0):
+            sign = '-'
+            delta = -delta
+        else:
+            sign = '+'
+        hours, rest = divmod(delta, timedelta(hours=1))
+        minutes = rest // timedelta(minutes=1)
+        return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
+
+timezone.utc = timezone._create(timedelta(0))
+timezone.min = timezone._create(timezone._minoffset)
+timezone.max = timezone._create(timezone._maxoffset)
+_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
+"""
+Some time zone algebra.  For a datetime x, let
+    x.n = x stripped of its timezone -- its naive time.
+    x.o = x.utcoffset(), and assuming that doesn't raise an exception or
+          return None
+    x.d = x.dst(), and assuming that doesn't raise an exception or
+          return None
+    x.s = x's standard offset, x.o - x.d
+
+Now some derived rules, where k is a duration (timedelta).
+
+1. x.o = x.s + x.d
+   This follows from the definition of x.s.
+
+2. If x and y have the same tzinfo member, x.s = y.s.
+   This is actually a requirement, an assumption we need to make about
+   sane tzinfo classes.
+
+3. The naive UTC time corresponding to x is x.n - x.o.
+   This is again a requirement for a sane tzinfo class.
+
+4. (x+k).s = x.s
+   This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
+
+5. (x+k).n = x.n + k
+   Again follows from how arithmetic is defined.
+
+Now we can explain tz.fromutc(x).  Let's assume it's an interesting case
+(meaning that the various tzinfo methods exist, and don't blow up or return
+None when called).
+
+The function wants to return a datetime y with timezone tz, equivalent to x.
+x is already in UTC.
+
+By #3, we want
+
+    y.n - y.o = x.n                             [1]
+
+The algorithm starts by attaching tz to x.n, and calling that y.  So
+x.n = y.n at the start.  Then it wants to add a duration k to y, so that [1]
+becomes true; in effect, we want to solve [2] for k:
+
+   (y+k).n - (y+k).o = x.n                      [2]
+
+By #1, this is the same as
+
+   (y+k).n - ((y+k).s + (y+k).d) = x.n          [3]
+
+By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
+Substituting that into [3],
+
+   x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
+   k - (y+k).s - (y+k).d = 0; rearranging,
+   k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
+   k = y.s - (y+k).d
+
+On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
+approximate k by ignoring the (y+k).d term at first.  Note that k can't be
+very large, since all offset-returning methods return a duration of magnitude
+less than 24 hours.  For that reason, if y is firmly in std time, (y+k).d must
+be 0, so ignoring it has no consequence then.
+
+In any case, the new value is
+
+    z = y + y.s                                 [4]
+
+It's helpful to step back at look at [4] from a higher level:  it's simply
+mapping from UTC to tz's standard time.
+
+At this point, if
+
+    z.n - z.o = x.n                             [5]
+
+we have an equivalent time, and are almost done.  The insecurity here is
+at the start of daylight time.  Picture US Eastern for concreteness.  The wall
+time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
+sense then.  The docs ask that an Eastern tzinfo class consider such a time to
+be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
+on the day DST starts.  We want to return the 1:MM EST spelling because that's
+the only spelling that makes sense on the local wall clock.
+
+In fact, if [5] holds at this point, we do have the standard-time spelling,
+but that takes a bit of proof.  We first prove a stronger result.  What's the
+difference between the LHS and RHS of [5]?  Let
+
+    diff = x.n - (z.n - z.o)                    [6]
+
+Now
+    z.n =                       by [4]
+    (y + y.s).n =               by #5
+    y.n + y.s =                 since y.n = x.n
+    x.n + y.s =                 since z and y are have the same tzinfo member,
+                                    y.s = z.s by #2
+    x.n + z.s
+
+Plugging that back into [6] gives
+
+    diff =
+    x.n - ((x.n + z.s) - z.o) =     expanding
+    x.n - x.n - z.s + z.o =         cancelling
+    - z.s + z.o =                   by #2
+    z.d
+
+So diff = z.d.
+
+If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
+spelling we wanted in the endcase described above.  We're done.  Contrarily,
+if z.d = 0, then we have a UTC equivalent, and are also done.
+
+If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
+add to z (in effect, z is in tz's standard time, and we need to shift the
+local clock into tz's daylight time).
+
+Let
+
+    z' = z + z.d = z + diff                     [7]
+
+and we can again ask whether
+
+    z'.n - z'.o = x.n                           [8]
+
+If so, we're done.  If not, the tzinfo class is insane, according to the
+assumptions we've made.  This also requires a bit of proof.  As before, let's
+compute the difference between the LHS and RHS of [8] (and skipping some of
+the justifications for the kinds of substitutions we've done several times
+already):
+
+    diff' = x.n - (z'.n - z'.o) =           replacing z'.n via [7]
+            x.n  - (z.n + diff - z'.o) =    replacing diff via [6]
+            x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
+            x.n - z.n - x.n + z.n - z.o + z'.o =    cancel x.n
+            - z.n + z.n - z.o + z'.o =              cancel z.n
+            - z.o + z'.o =                      #1 twice
+            -z.s - z.d + z'.s + z'.d =          z and z' have same tzinfo
+            z'.d - z.d
+
+So z' is UTC-equivalent to x iff z'.d = z.d at this point.  If they are equal,
+we've found the UTC-equivalent so are done.  In fact, we stop with [7] and
+return z', not bothering to compute z'.d.
+
+How could z.d and z'd differ?  z' = z + z.d [7], so merely moving z' by
+a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
+would have to change the result dst() returns:  we start in DST, and moving
+a little further into it takes us out of DST.
+
+There isn't a sane case where this can happen.  The closest it gets is at
+the end of DST, where there's an hour in UTC with no spelling in a hybrid
+tzinfo class.  In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT.  During
+that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
+UTC) because the docs insist on that, but 0:MM is taken as being in daylight
+time (4:MM UTC).  There is no local time mapping to 5:MM UTC.  The local
+clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
+standard time.  Since that's what the local clock *does*, we want to map both
+UTC hours 5:MM and 6:MM to 1:MM Eastern.  The result is ambiguous
+in local time, but so it goes -- it's the way the local clock works.
+
+When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
+so z=0:MM.  z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
+z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
+(correctly) concludes that z' is not UTC-equivalent to x.
+
+Because we know z.d said z was in daylight time (else [5] would have held and
+we would have stopped then), and we know z.d != z'.d (else [8] would have held
+and we have stopped then), and there are only 2 possible values dst() can
+return in Eastern, it follows that z'.d must be 0 (which it is in the example,
+but the reasoning doesn't depend on the example -- it depends on there being
+two possible dst() outcomes, one zero and the other non-zero).  Therefore
+z' must be in standard time, and is the spelling we want in this case.
+
+Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
+concerned (because it takes z' as being in standard time rather than the
+daylight time we intend here), but returning it gives the real-life "local
+clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
+tz.
+
+When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
+the 1:MM standard time spelling we want.
+
+So how can this break?  One of the assumptions must be violated.  Two
+possibilities:
+
+1) [2] effectively says that y.s is invariant across all y belong to a given
+   time zone.  This isn't true if, for political reasons or continental drift,
+   a region decides to change its base offset from UTC.
+
+2) There may be versions of "double daylight" time where the tail end of
+   the analysis gives up a step too early.  I haven't thought about that
+   enough to say.
+
+In any case, it's clear that the default fromutc() is strong enough to handle
+"almost all" time zones:  so long as the standard offset is invariant, it
+doesn't matter if daylight time transition points change from year to year, or
+if daylight time is skipped in some years; it doesn't matter how large or
+small dst() may get within its bounds; and it doesn't even matter if some
+perverse time zone returns a negative dst()).  So a breaking case must be
+pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
+"""
+try:
+    from _datetime import *
+except ImportError:
+    pass
+else:
+    # Clean up unused names
+    del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
+         _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
+         _build_struct_time, _call_tzinfo_method, _check_date_fields,
+         _check_time_fields, _check_tzinfo_arg, _check_tzname,
+         _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
+         _days_before_year, _days_in_month, _format_time, _is_leap,
+         _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
+         _wrap_strftime, _ymd2ord)
+    # XXX Since import * above excludes names that start with _,
+    # docstring does not get overwritten. In the future, it may be
+    # appropriate to maintain a single module level docstring and
+    # remove the following line.
+    from _datetime import __doc__
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/dbm.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/dbm.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/decimal.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/decimal.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/difflib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/difflib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_encoded_words.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_encoded_words.py
new file mode 100644
index 00000000..9e0cc75b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_encoded_words.py
@@ -0,0 +1,221 @@
+""" Routines for manipulating RFC2047 encoded words.
+
+This is currently a package-private API, but will be considered for promotion
+to a public API if there is demand.
+
+"""
+
+# An ecoded word looks like this:
+#
+#        =?charset[*lang]?cte?encoded_string?=
+#
+# for more information about charset see the charset module.  Here it is one
+# of the preferred MIME charset names (hopefully; you never know when parsing).
+# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case).  In
+# theory other letters could be used for other encodings, but in practice this
+# (almost?) never happens.  There could be a public API for adding entries
+# to the CTE tables, but YAGNI for now.  'q' is Quoted Printable, 'b' is
+# Base64.  The meaning of encoded_string should be obvious.  'lang' is optional
+# as indicated by the brackets (they are not part of the syntax) but is almost
+# never encountered in practice.
+#
+# The general interface for a CTE decoder is that it takes the encoded_string
+# as its argument, and returns a tuple (cte_decoded_string, defects).  The
+# cte_decoded_string is the original binary that was encoded using the
+# specified cte.  'defects' is a list of MessageDefect instances indicating any
+# problems encountered during conversion.  'charset' and 'lang' are the
+# corresponding strings extracted from the EW, case preserved.
+#
+# The general interface for a CTE encoder is that it takes a binary sequence
+# as input and returns the cte_encoded_string, which is an ascii-only string.
+#
+# Each decoder must also supply a length function that takes the binary
+# sequence as its argument and returns the length of the resulting encoded
+# string.
+#
+# The main API functions for the module are decode, which calls the decoder
+# referenced by the cte specifier, and encode, which adds the appropriate
+# RFC 2047 "chrome" to the encoded string, and can optionally automatically
+# select the shortest possible encoding.  See their docstrings below for
+# details.
+
+import re
+import base64
+import binascii
+import functools
+from string import ascii_letters, digits
+from email import errors
+
+__all__ = ['decode_q',
+           'encode_q',
+           'decode_b',
+           'encode_b',
+           'len_q',
+           'len_b',
+           'decode',
+           'encode',
+           ]
+
+#
+# Quoted Printable
+#
+
+# regex based decoder.
+_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
+        lambda m: bytes([int(m.group(1), 16)]))
+
+def decode_q(encoded):
+    encoded = encoded.replace(b'_', b' ')
+    return _q_byte_subber(encoded), []
+
+
+# dict mapping bytes to their encoded form
+class _QByteMap(dict):
+
+    safe = b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')
+
+    def __missing__(self, key):
+        if key in self.safe:
+            self[key] = chr(key)
+        else:
+            self[key] = "={:02X}".format(key)
+        return self[key]
+
+_q_byte_map = _QByteMap()
+
+# In headers spaces are mapped to '_'.
+_q_byte_map[ord(' ')] = '_'
+
+def encode_q(bstring):
+    return ''.join(_q_byte_map[x] for x in bstring)
+
+def len_q(bstring):
+    return sum(len(_q_byte_map[x]) for x in bstring)
+
+
+#
+# Base64
+#
+
+def decode_b(encoded):
+    defects = []
+    pad_err = len(encoded) % 4
+    if pad_err:
+        defects.append(errors.InvalidBase64PaddingDefect())
+        padded_encoded = encoded + b'==='[:4-pad_err]
+    else:
+        padded_encoded = encoded
+    try:
+        return base64.b64decode(padded_encoded, validate=True), defects
+    except binascii.Error:
+        # Since we had correct padding, this must an invalid char error.
+        defects = [errors.InvalidBase64CharactersDefect()]
+        # The non-alphabet characters are ignored as far as padding
+        # goes, but we don't know how many there are.  So we'll just
+        # try various padding lengths until something works.
+        for i in 0, 1, 2, 3:
+            try:
+                return base64.b64decode(encoded+b'='*i, validate=False), defects
+            except binascii.Error:
+                if i==0:
+                    defects.append(errors.InvalidBase64PaddingDefect())
+        else:
+            # This should never happen.
+            raise AssertionError("unexpected binascii.Error")
+
+def encode_b(bstring):
+    return base64.b64encode(bstring).decode('ascii')
+
+def len_b(bstring):
+    groups_of_3, leftover = divmod(len(bstring), 3)
+    # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+    return groups_of_3 * 4 + (4 if leftover else 0)
+
+
+_cte_decoders = {
+    'q': decode_q,
+    'b': decode_b,
+    }
+
+def decode(ew):
+    """Decode encoded word and return (string, charset, lang, defects) tuple.
+
+    An RFC 2047/2243 encoded word has the form:
+
+        =?charset*lang?cte?encoded_string?=
+
+    where '*lang' may be omitted but the other parts may not be.
+
+    This function expects exactly such a string (that is, it does not check the
+    syntax and may raise errors if the string is not well formed), and returns
+    the encoded_string decoded first from its Content Transfer Encoding and
+    then from the resulting bytes into unicode using the specified charset.  If
+    the cte-decoded string does not successfully decode using the specified
+    character set, a defect is added to the defects list and the unknown octets
+    are replaced by the unicode 'unknown' character \uFDFF.
+
+    The specified charset and language are returned.  The default for language,
+    which is rarely if ever encountered, is the empty string.
+
+    """
+    _, charset, cte, cte_string, _ = ew.split('?')
+    charset, _, lang = charset.partition('*')
+    cte = cte.lower()
+    # Recover the original bytes and do CTE decoding.
+    bstring = cte_string.encode('ascii', 'surrogateescape')
+    bstring, defects = _cte_decoders[cte](bstring)
+    # Turn the CTE decoded bytes into unicode.
+    try:
+        string = bstring.decode(charset)
+    except UnicodeError:
+        defects.append(errors.UndecodableBytesDefect("Encoded word "
+            "contains bytes not decodable using {} charset".format(charset)))
+        string = bstring.decode(charset, 'surrogateescape')
+    except LookupError:
+        string = bstring.decode('ascii', 'surrogateescape')
+        if charset.lower() != 'unknown-8bit':
+            defects.append(errors.CharsetError("Unknown charset {} "
+                "in encoded word; decoded as unknown bytes".format(charset)))
+    return string, charset, lang, defects
+
+
+_cte_encoders = {
+    'q': encode_q,
+    'b': encode_b,
+    }
+
+_cte_encode_length = {
+    'q': len_q,
+    'b': len_b,
+    }
+
+def encode(string, charset='utf-8', encoding=None, lang=''):
+    """Encode string using the CTE encoding that produces the shorter result.
+
+    Produces an RFC 2047/2243 encoded word of the form:
+
+        =?charset*lang?cte?encoded_string?=
+
+    where '*lang' is omitted unless the 'lang' parameter is given a value.
+    Optional argument charset (defaults to utf-8) specifies the charset to use
+    to encode the string to binary before CTE encoding it.  Optional argument
+    'encoding' is the cte specifier for the encoding that should be used ('q'
+    or 'b'); if it is None (the default) the encoding which produces the
+    shortest encoded sequence is used, except that 'q' is preferred if it is up
+    to five characters longer.  Optional argument 'lang' (default '') gives the
+    RFC 2243 language string to specify in the encoded word.
+
+    """
+    if charset == 'unknown-8bit':
+        bstring = string.encode('ascii', 'surrogateescape')
+    else:
+        bstring = string.encode(charset)
+    if encoding is None:
+        qlen = _cte_encode_length['q'](bstring)
+        blen = _cte_encode_length['b'](bstring)
+        # Bias toward q.  5 is arbitrary.
+        encoding = 'q' if qlen - blen < 5 else 'b'
+    encoded = _cte_encoders[encoding](bstring)
+    if lang:
+        lang = '*' + lang
+    return "=?{}{}?{}?{}?=".format(charset, lang, encoding, encoded)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_parseaddr.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_parseaddr.py
new file mode 100644
index 00000000..cdfa3729
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_parseaddr.py
@@ -0,0 +1,540 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Contact: email-sig@python.org
+
+"""Email address parsing code.
+
+Lifted directly from rfc822.py.  This should eventually be rewritten.
+"""
+
+__all__ = [
+    'mktime_tz',
+    'parsedate',
+    'parsedate_tz',
+    'quote',
+    ]
+
+import time, calendar
+
+SPACE = ' '
+EMPTYSTRING = ''
+COMMASPACE = ', '
+
+# Parse a date field
+_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+               'aug', 'sep', 'oct', 'nov', 'dec',
+               'january', 'february', 'march', 'april', 'may', 'june', 'july',
+               'august', 'september', 'october', 'november', 'december']
+
+_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# The timezone table does not include the military time zones defined
+# in RFC822, other than Z.  According to RFC1123, the description in
+# RFC822 gets the signs wrong, so we can't rely on any such time
+# zones.  RFC1123 recommends that numeric timezone indicators be used
+# instead of timezone names.
+
+_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
+              'AST': -400, 'ADT': -300,  # Atlantic (used in Canada)
+              'EST': -500, 'EDT': -400,  # Eastern
+              'CST': -600, 'CDT': -500,  # Central
+              'MST': -700, 'MDT': -600,  # Mountain
+              'PST': -800, 'PDT': -700   # Pacific
+              }
+
+
+def parsedate_tz(data):
+    """Convert a date string to a time tuple.
+
+    Accounts for military timezones.
+    """
+    res = _parsedate_tz(data)
+    if not res:
+        return
+    if res[9] is None:
+        res[9] = 0
+    return tuple(res)
+
+def _parsedate_tz(data):
+    """Convert date to extended time tuple.
+
+    The last (additional) element is the time zone offset in seconds, except if
+    the timezone was specified as -0000.  In that case the last element is
+    None.  This indicates a UTC timestamp that explicitly declaims knowledge of
+    the source timezone, as opposed to a +0000 timestamp that indicates the
+    source timezone really was UTC.
+
+    """
+    if not data:
+        return
+    data = data.split()
+    # The FWS after the comma after the day-of-week is optional, so search and
+    # adjust for this.
+    if data[0].endswith(',') or data[0].lower() in _daynames:
+        # There's a dayname here. Skip it
+        del data[0]
+    else:
+        i = data[0].rfind(',')
+        if i >= 0:
+            data[0] = data[0][i+1:]
+    if len(data) == 3: # RFC 850 date, deprecated
+        stuff = data[0].split('-')
+        if len(stuff) == 3:
+            data = stuff + data[1:]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i == -1:
+            i = s.find('-')
+        if i > 0:
+            data[3:] = [s[:i], s[i:]]
+        else:
+            data.append('') # Dummy tz
+    if len(data) < 5:
+        return None
+    data = data[:5]
+    [dd, mm, yy, tm, tz] = data
+    mm = mm.lower()
+    if mm not in _monthnames:
+        dd, mm = mm, dd.lower()
+        if mm not in _monthnames:
+            return None
+    mm = _monthnames.index(mm) + 1
+    if mm > 12:
+        mm -= 12
+    if dd[-1] == ',':
+        dd = dd[:-1]
+    i = yy.find(':')
+    if i > 0:
+        yy, tm = tm, yy
+    if yy[-1] == ',':
+        yy = yy[:-1]
+    if not yy[0].isdigit():
+        yy, tz = tz, yy
+    if tm[-1] == ',':
+        tm = tm[:-1]
+    tm = tm.split(':')
+    if len(tm) == 2:
+        [thh, tmm] = tm
+        tss = '0'
+    elif len(tm) == 3:
+        [thh, tmm, tss] = tm
+    elif len(tm) == 1 and '.' in tm[0]:
+        # Some non-compliant MUAs use '.' to separate time elements.
+        tm = tm[0].split('.')
+        if len(tm) == 2:
+            [thh, tmm] = tm
+            tss = 0
+        elif len(tm) == 3:
+            [thh, tmm, tss] = tm
+    else:
+        return None
+    try:
+        yy = int(yy)
+        dd = int(dd)
+        thh = int(thh)
+        tmm = int(tmm)
+        tss = int(tss)
+    except ValueError:
+        return None
+    # Check for a yy specified in two-digit format, then convert it to the
+    # appropriate four-digit format, according to the POSIX standard. RFC 822
+    # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
+    # mandates a 4-digit yy. For more information, see the documentation for
+    # the time module.
+    if yy < 100:
+        # The year is between 1969 and 1999 (inclusive).
+        if yy > 68:
+            yy += 1900
+        # The year is between 2000 and 2068 (inclusive).
+        else:
+            yy += 2000
+    tzoffset = None
+    tz = tz.upper()
+    if tz in _timezones:
+        tzoffset = _timezones[tz]
+    else:
+        try:
+            tzoffset = int(tz)
+        except ValueError:
+            pass
+        if tzoffset==0 and tz.startswith('-'):
+            tzoffset = None
+    # Convert a timezone offset into seconds ; -0500 -> -18000
+    if tzoffset:
+        if tzoffset < 0:
+            tzsign = -1
+            tzoffset = -tzoffset
+        else:
+            tzsign = 1
+        tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
+    # Daylight Saving Time flag is set to -1, since DST is unknown.
+    return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
+
+
+def parsedate(data):
+    """Convert a time string to a time tuple."""
+    t = parsedate_tz(data)
+    if isinstance(t, tuple):
+        return t[:9]
+    else:
+        return t
+
+
+def mktime_tz(data):
+    """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
+    if data[9] is None:
+        # No zone info, so localtime is better assumption than GMT
+        return time.mktime(data[:8] + (-1,))
+    else:
+        t = calendar.timegm(data)
+        return t - data[9]
+
+
+def quote(str):
+    """Prepare string to be used in a quoted string.
+
+    Turns backslash and double quote characters into quoted pairs.  These
+    are the only characters that need to be quoted inside a quoted string.
+    Does not add the surrounding double quotes.
+    """
+    return str.replace('\\', '\\\\').replace('"', '\\"')
+
+
+class AddrlistClass:
+    """Address parser class by Ben Escoto.
+
+    To understand what this class does, it helps to have a copy of RFC 2822 in
+    front of you.
+
+    Note: this class interface is deprecated and may be removed in the future.
+    Use email.utils.AddressList instead.
+    """
+
+    def __init__(self, field):
+        """Initialize a new instance.
+
+        `field' is an unparsed address header field, containing
+        one or more addresses.
+        """
+        self.specials = '()<>@,:;.\"[]'
+        self.pos = 0
+        self.LWS = ' \t'
+        self.CR = '\r\n'
+        self.FWS = self.LWS + self.CR
+        self.atomends = self.specials + self.LWS + self.CR
+        # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
+        # is obsolete syntax.  RFC 2822 requires that we recognize obsolete
+        # syntax, so allow dots in phrases.
+        self.phraseends = self.atomends.replace('.', '')
+        self.field = field
+        self.commentlist = []
+
+    def gotonext(self):
+        """Skip white space and extract comments."""
+        wslist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS + '\n\r':
+                if self.field[self.pos] not in '\n\r':
+                    wslist.append(self.field[self.pos])
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            else:
+                break
+        return EMPTYSTRING.join(wslist)
+
+    def getaddrlist(self):
+        """Parse all addresses.
+
+        Returns a list containing all of the addresses.
+        """
+        result = []
+        while self.pos < len(self.field):
+            ad = self.getaddress()
+            if ad:
+                result += ad
+            else:
+                result.append(('', ''))
+        return result
+
+    def getaddress(self):
+        """Parse the next address."""
+        self.commentlist = []
+        self.gotonext()
+
+        oldpos = self.pos
+        oldcl = self.commentlist
+        plist = self.getphraselist()
+
+        self.gotonext()
+        returnlist = []
+
+        if self.pos >= len(self.field):
+            # Bad email address technically, no domain.
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+
+        elif self.field[self.pos] in '.@':
+            # email address is just an addrspec
+            # this isn't very efficient since we start over
+            self.pos = oldpos
+            self.commentlist = oldcl
+            addrspec = self.getaddrspec()
+            returnlist = [(SPACE.join(self.commentlist), addrspec)]
+
+        elif self.field[self.pos] == ':':
+            # address is a group
+            returnlist = []
+
+            fieldlen = len(self.field)
+            self.pos += 1
+            while self.pos < len(self.field):
+                self.gotonext()
+                if self.pos < fieldlen and self.field[self.pos] == ';':
+                    self.pos += 1
+                    break
+                returnlist = returnlist + self.getaddress()
+
+        elif self.field[self.pos] == '<':
+            # Address is a phrase then a route addr
+            routeaddr = self.getrouteaddr()
+
+            if self.commentlist:
+                returnlist = [(SPACE.join(plist) + ' (' +
+                               ' '.join(self.commentlist) + ')', routeaddr)]
+            else:
+                returnlist = [(SPACE.join(plist), routeaddr)]
+
+        else:
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+            elif self.field[self.pos] in self.specials:
+                self.pos += 1
+
+        self.gotonext()
+        if self.pos < len(self.field) and self.field[self.pos] == ',':
+            self.pos += 1
+        return returnlist
+
+    def getrouteaddr(self):
+        """Parse a route address (Return-path value).
+
+        This method just skips all the route stuff and returns the addrspec.
+        """
+        if self.field[self.pos] != '<':
+            return
+
+        expectroute = False
+        self.pos += 1
+        self.gotonext()
+        adlist = ''
+        while self.pos < len(self.field):
+            if expectroute:
+                self.getdomain()
+                expectroute = False
+            elif self.field[self.pos] == '>':
+                self.pos += 1
+                break
+            elif self.field[self.pos] == '@':
+                self.pos += 1
+                expectroute = True
+            elif self.field[self.pos] == ':':
+                self.pos += 1
+            else:
+                adlist = self.getaddrspec()
+                self.pos += 1
+                break
+            self.gotonext()
+
+        return adlist
+
+    def getaddrspec(self):
+        """Parse an RFC 2822 addr-spec."""
+        aslist = []
+
+        self.gotonext()
+        while self.pos < len(self.field):
+            preserve_ws = True
+            if self.field[self.pos] == '.':
+                if aslist and not aslist[-1].strip():
+                    aslist.pop()
+                aslist.append('.')
+                self.pos += 1
+                preserve_ws = False
+            elif self.field[self.pos] == '"':
+                aslist.append('"%s"' % quote(self.getquote()))
+            elif self.field[self.pos] in self.atomends:
+                if aslist and not aslist[-1].strip():
+                    aslist.pop()
+                break
+            else:
+                aslist.append(self.getatom())
+            ws = self.gotonext()
+            if preserve_ws and ws:
+                aslist.append(ws)
+
+        if self.pos >= len(self.field) or self.field[self.pos] != '@':
+            return EMPTYSTRING.join(aslist)
+
+        aslist.append('@')
+        self.pos += 1
+        self.gotonext()
+        return EMPTYSTRING.join(aslist) + self.getdomain()
+
+    def getdomain(self):
+        """Get the complete domain name from an address."""
+        sdlist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] == '[':
+                sdlist.append(self.getdomainliteral())
+            elif self.field[self.pos] == '.':
+                self.pos += 1
+                sdlist.append('.')
+            elif self.field[self.pos] in self.atomends:
+                break
+            else:
+                sdlist.append(self.getatom())
+        return EMPTYSTRING.join(sdlist)
+
+    def getdelimited(self, beginchar, endchars, allowcomments=True):
+        """Parse a header fragment delimited by special characters.
+
+        `beginchar' is the start character for the fragment.
+        If self is not looking at an instance of `beginchar' then
+        getdelimited returns the empty string.
+
+        `endchars' is a sequence of allowable end-delimiting characters.
+        Parsing stops when one of these is encountered.
+
+        If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
+        within the parsed fragment.
+        """
+        if self.field[self.pos] != beginchar:
+            return ''
+
+        slist = ['']
+        quote = False
+        self.pos += 1
+        while self.pos < len(self.field):
+            if quote:
+                slist.append(self.field[self.pos])
+                quote = False
+            elif self.field[self.pos] in endchars:
+                self.pos += 1
+                break
+            elif allowcomments and self.field[self.pos] == '(':
+                slist.append(self.getcomment())
+                continue        # have already advanced pos from getcomment
+            elif self.field[self.pos] == '\\':
+                quote = True
+            else:
+                slist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(slist)
+
+    def getquote(self):
+        """Get a quote-delimited fragment from self's field."""
+        return self.getdelimited('"', '"\r', False)
+
+    def getcomment(self):
+        """Get a parenthesis-delimited fragment from self's field."""
+        return self.getdelimited('(', ')\r', True)
+
+    def getdomainliteral(self):
+        """Parse an RFC 2822 domain-literal."""
+        return '[%s]' % self.getdelimited('[', ']\r', False)
+
+    def getatom(self, atomends=None):
+        """Parse an RFC 2822 atom.
+
+        Optional atomends specifies a different set of end token delimiters
+        (the default is to use self.atomends).  This is used e.g. in
+        getphraselist() since phrase endings must not include the `.' (which
+        is legal in phrases)."""
+        atomlist = ['']
+        if atomends is None:
+            atomends = self.atomends
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in atomends:
+                break
+            else:
+                atomlist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(atomlist)
+
+    def getphraselist(self):
+        """Parse a sequence of RFC 2822 phrases.
+
+        A phrase is a sequence of words, which are in turn either RFC 2822
+        atoms or quoted-strings.  Phrases are canonicalized by squeezing all
+        runs of continuous whitespace into one space.
+        """
+        plist = []
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.FWS:
+                self.pos += 1
+            elif self.field[self.pos] == '"':
+                plist.append(self.getquote())
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] in self.phraseends:
+                break
+            else:
+                plist.append(self.getatom(self.phraseends))
+
+        return plist
+
+class AddressList(AddrlistClass):
+    """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
+    def __init__(self, field):
+        AddrlistClass.__init__(self, field)
+        if field:
+            self.addresslist = self.getaddrlist()
+        else:
+            self.addresslist = []
+
+    def __len__(self):
+        return len(self.addresslist)
+
+    def __add__(self, other):
+        # Set union
+        newaddr = AddressList(None)
+        newaddr.addresslist = self.addresslist[:]
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __iadd__(self, other):
+        # Set union, in-place
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                self.addresslist.append(x)
+        return self
+
+    def __sub__(self, other):
+        # Set difference
+        newaddr = AddressList(None)
+        for x in self.addresslist:
+            if not x in other.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __isub__(self, other):
+        # Set difference, in-place
+        for x in other.addresslist:
+            if x in self.addresslist:
+                self.addresslist.remove(x)
+        return self
+
+    def __getitem__(self, index):
+        # Make indexing, slices, and 'in' work
+        return self.addresslist[index]
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_policybase.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_policybase.py
new file mode 100644
index 00000000..bb533220
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/_policybase.py
@@ -0,0 +1,359 @@
+"""Policy framework for the email package.
+
+Allows fine grained feature control of how the package parses and emits data.
+"""
+
+import abc
+from email import header
+from email import charset as _charset
+from email.utils import _has_surrogates
+
+__all__ = [
+    'Policy',
+    'Compat32',
+    'compat32',
+    ]
+
+
+class _PolicyBase:
+
+    """Policy Object basic framework.
+
+    This class is useless unless subclassed.  A subclass should define
+    class attributes with defaults for any values that are to be
+    managed by the Policy object.  The constructor will then allow
+    non-default values to be set for these attributes at instance
+    creation time.  The instance will be callable, taking these same
+    attributes keyword arguments, and returning a new instance
+    identical to the called instance except for those values changed
+    by the keyword arguments.  Instances may be added, yielding new
+    instances with any non-default values from the right hand
+    operand overriding those in the left hand operand.  That is,
+
+        A + B == A(<non-default values of B>)
+
+    The repr of an instance can be used to reconstruct the object
+    if and only if the repr of the values can be used to reconstruct
+    those values.
+
+    """
+
+    def __init__(self, **kw):
+        """Create new Policy, possibly overriding some defaults.
+
+        See class docstring for a list of overridable attributes.
+
+        """
+        for name, value in kw.items():
+            if hasattr(self, name):
+                super(_PolicyBase,self).__setattr__(name, value)
+            else:
+                raise TypeError(
+                    "{!r} is an invalid keyword argument for {}".format(
+                        name, self.__class__.__name__))
+
+    def __repr__(self):
+        args = [ "{}={!r}".format(name, value)
+                 for name, value in self.__dict__.items() ]
+        return "{}({})".format(self.__class__.__name__, ', '.join(args))
+
+    def clone(self, **kw):
+        """Return a new instance with specified attributes changed.
+
+        The new instance has the same attribute values as the current object,
+        except for the changes passed in as keyword arguments.
+
+        """
+        newpolicy = self.__class__.__new__(self.__class__)
+        for attr, value in self.__dict__.items():
+            object.__setattr__(newpolicy, attr, value)
+        for attr, value in kw.items():
+            if not hasattr(self, attr):
+                raise TypeError(
+                    "{!r} is an invalid keyword argument for {}".format(
+                        attr, self.__class__.__name__))
+            object.__setattr__(newpolicy, attr, value)
+        return newpolicy
+
+    def __setattr__(self, name, value):
+        if hasattr(self, name):
+            msg = "{!r} object attribute {!r} is read-only"
+        else:
+            msg = "{!r} object has no attribute {!r}"
+        raise AttributeError(msg.format(self.__class__.__name__, name))
+
+    def __add__(self, other):
+        """Non-default values from right operand override those from left.
+
+        The object returned is a new instance of the subclass.
+
+        """
+        return self.clone(**other.__dict__)
+
+
+def _append_doc(doc, added_doc):
+    doc = doc.rsplit('\n', 1)[0]
+    added_doc = added_doc.split('\n', 1)[1]
+    return doc + '\n' + added_doc
+
+def _extend_docstrings(cls):
+    return cls
+    if cls.__doc__ and cls.__doc__.startswith('+'):
+        cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
+    for name, attr in cls.__dict__.items():
+        if attr.__doc__ and attr.__doc__.startswith('+'):
+            for c in (c for base in cls.__bases__ for c in base.mro()):
+                doc = getattr(getattr(c, name), '__doc__')
+                if doc:
+                    attr.__doc__ = _append_doc(doc, attr.__doc__)
+                    break
+    return cls
+
+
+class Policy(_PolicyBase):#, metaclass=abc.ABCMeta):
+
+    r"""Controls for how messages are interpreted and formatted.
+
+    Most of the classes and many of the methods in the email package accept
+    Policy objects as parameters.  A Policy object contains a set of values and
+    functions that control how input is interpreted and how output is rendered.
+    For example, the parameter 'raise_on_defect' controls whether or not an RFC
+    violation results in an error being raised or not, while 'max_line_length'
+    controls the maximum length of output lines when a Message is serialized.
+
+    Any valid attribute may be overridden when a Policy is created by passing
+    it as a keyword argument to the constructor.  Policy objects are immutable,
+    but a new Policy object can be created with only certain values changed by
+    calling the Policy instance with keyword arguments.  Policy objects can
+    also be added, producing a new Policy object in which the non-default
+    attributes set in the right hand operand overwrite those specified in the
+    left operand.
+
+    Settable attributes:
+
+    raise_on_defect     -- If true, then defects should be raised as errors.
+                           Default: False.
+
+    linesep             -- string containing the value to use as separation
+                           between output lines.  Default '\n'.
+
+    cte_type            -- Type of allowed content transfer encodings
+
+                           7bit  -- ASCII only
+                           8bit  -- Content-Transfer-Encoding: 8bit is allowed
+
+                           Default: 8bit.  Also controls the disposition of
+                           (RFC invalid) binary data in headers; see the
+                           documentation of the binary_fold method.
+
+    max_line_length     -- maximum length of lines, excluding 'linesep',
+                           during serialization.  None or 0 means no line
+                           wrapping is done.  Default is 78.
+
+    """
+
+    raise_on_defect = False
+    linesep = '\n'
+    cte_type = '8bit'
+    max_line_length = 78
+
+    def handle_defect(self, obj, defect):
+        """Based on policy, either raise defect or call register_defect.
+
+            handle_defect(obj, defect)
+
+        defect should be a Defect subclass, but in any case must be an
+        Exception subclass.  obj is the object on which the defect should be
+        registered if it is not raised.  If the raise_on_defect is True, the
+        defect is raised as an error, otherwise the object and the defect are
+        passed to register_defect.
+
+        This method is intended to be called by parsers that discover defects.
+        The email package parsers always call it with Defect instances.
+
+        """
+        if self.raise_on_defect:
+            raise defect
+        self.register_defect(obj, defect)
+
+    def register_defect(self, obj, defect):
+        """Record 'defect' on 'obj'.
+
+        Called by handle_defect if raise_on_defect is False.  This method is
+        part of the Policy API so that Policy subclasses can implement custom
+        defect handling.  The default implementation calls the append method of
+        the defects attribute of obj.  The objects used by the email package by
+        default that get passed to this method will always have a defects
+        attribute with an append method.
+
+        """
+        obj.defects.append(defect)
+
+    def header_max_count(self, name):
+        """Return the maximum allowed number of headers named 'name'.
+
+        Called when a header is added to a Message object.  If the returned
+        value is not 0 or None, and there are already a number of headers with
+        the name 'name' equal to the value returned, a ValueError is raised.
+
+        Because the default behavior of Message's __setitem__ is to append the
+        value to the list of headers, it is easy to create duplicate headers
+        without realizing it.  This method allows certain headers to be limited
+        in the number of instances of that header that may be added to a
+        Message programmatically.  (The limit is not observed by the parser,
+        which will faithfully produce as many headers as exist in the message
+        being parsed.)
+
+        The default implementation returns None for all header names.
+        """
+        return None
+
+    @abc.abstractmethod
+    def header_source_parse(self, sourcelines):
+        """Given a list of linesep terminated strings constituting the lines of
+        a single header, return the (name, value) tuple that should be stored
+        in the model.  The input lines should retain their terminating linesep
+        characters.  The lines passed in by the email package may contain
+        surrogateescaped binary data.
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def header_store_parse(self, name, value):
+        """Given the header name and the value provided by the application
+        program, return the (name, value) that should be stored in the model.
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def header_fetch_parse(self, name, value):
+        """Given the header name and the value from the model, return the value
+        to be returned to the application program that is requesting that
+        header.  The value passed in by the email package may contain
+        surrogateescaped binary data if the lines were parsed by a BytesParser.
+        The returned value should not contain any surrogateescaped data.
+
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def fold(self, name, value):
+        """Given the header name and the value from the model, return a string
+        containing linesep characters that implement the folding of the header
+        according to the policy controls.  The value passed in by the email
+        package may contain surrogateescaped binary data if the lines were
+        parsed by a BytesParser.  The returned value should not contain any
+        surrogateescaped data.
+
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def fold_binary(self, name, value):
+        """Given the header name and the value from the model, return binary
+        data containing linesep characters that implement the folding of the
+        header according to the policy controls.  The value passed in by the
+        email package may contain surrogateescaped binary data.
+
+        """
+        raise NotImplementedError
+
+
+@_extend_docstrings
+class Compat32(Policy):
+
+    """+
+    This particular policy is the backward compatibility Policy.  It
+    replicates the behavior of the email package version 5.1.
+    """
+
+    def _sanitize_header(self, name, value):
+        # If the header value contains surrogates, return a Header using
+        # the unknown-8bit charset to encode the bytes as encoded words.
+        if not isinstance(value, str):
+            # Assume it is already a header object
+            return value
+        if _has_surrogates(value):
+            return header.Header(value, charset=_charset.UNKNOWN8BIT,
+                                 header_name=name)
+        else:
+            return value
+
+    def header_source_parse(self, sourcelines):
+        """+
+        The name is parsed as everything up to the ':' and returned unmodified.
+        The value is determined by stripping leading whitespace off the
+        remainder of the first line, joining all subsequent lines together, and
+        stripping any trailing carriage return or linefeed characters.
+
+        """
+        name, value = sourcelines[0].split(':', 1)
+        value = value.lstrip(' \t') + ''.join(sourcelines[1:])
+        return (name, value.rstrip('\r\n'))
+
+    def header_store_parse(self, name, value):
+        """+
+        The name and value are returned unmodified.
+        """
+        return (name, value)
+
+    def header_fetch_parse(self, name, value):
+        """+
+        If the value contains binary data, it is converted into a Header object
+        using the unknown-8bit charset.  Otherwise it is returned unmodified.
+        """
+        return self._sanitize_header(name, value)
+
+    def fold(self, name, value):
+        """+
+        Headers are folded using the Header folding algorithm, which preserves
+        existing line breaks in the value, and wraps each resulting line to the
+        max_line_length.  Non-ASCII binary data are CTE encoded using the
+        unknown-8bit charset.
+
+        """
+        return self._fold(name, value, sanitize=True)
+
+    def fold_binary(self, name, value):
+        """+
+        Headers are folded using the Header folding algorithm, which preserves
+        existing line breaks in the value, and wraps each resulting line to the
+        max_line_length.  If cte_type is 7bit, non-ascii binary data is CTE
+        encoded using the unknown-8bit charset.  Otherwise the original source
+        header is used, with its existing line breaks and/or binary data.
+
+        """
+        folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
+        return folded.encode('ascii', 'surrogateescape')
+
+    def _fold(self, name, value, sanitize):
+        parts = []
+        parts.append('%s: ' % name)
+        if isinstance(value, str):
+            if _has_surrogates(value):
+                if sanitize:
+                    h = header.Header(value,
+                                      charset=_charset.UNKNOWN8BIT,
+                                      header_name=name)
+                else:
+                    # If we have raw 8bit data in a byte string, we have no idea
+                    # what the encoding is.  There is no safe way to split this
+                    # string.  If it's ascii-subset, then we could do a normal
+                    # ascii split, but if it's multibyte then we could break the
+                    # string.  There's no way to know so the least harm seems to
+                    # be to not split the string and risk it being too long.
+                    parts.append(value)
+                    h = None
+            else:
+                h = header.Header(value, header_name=name)
+        else:
+            # Assume it is a Header-like object.
+            h = value
+        if h is not None:
+            parts.append(h.encode(linesep=self.linesep,
+                                  maxlinelen=self.max_line_length))
+        parts.append(self.linesep)
+        return ''.join(parts)
+
+
+compat32 = Compat32()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/base64mime.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/base64mime.py
new file mode 100644
index 00000000..f3bbac1c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/base64mime.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Base64 content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
+characters encoding known as Base64.
+
+It is used in the MIME standards for email to attach images, audio, and text
+using some 8-bit character sets to messages.
+
+This module provides an interface to encode and decode both headers and bodies
+with Base64 encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:, From:, Cc:, etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character conversion
+necessary for proper internationalized headers; it only does dumb encoding and
+decoding.  To deal with the various line wrapping issues, use the email.header
+module.
+"""
+
+__all__ = [
+    'body_decode',
+    'body_encode',
+    'decode',
+    'decodestring',
+    'header_encode',
+    'header_length',
+    ]
+
+
+from base64 import b64encode
+from binascii import b2a_base64, a2b_base64
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# See also Charset.py
+MISC_LEN = 7
+
+
+
+# Helpers
+def header_length(bytearray):
+    """Return the length of s when it is encoded with base64."""
+    groups_of_3, leftover = divmod(len(bytearray), 3)
+    # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+    n = groups_of_3 * 4
+    if leftover:
+        n += 4
+    return n
+
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+    """Encode a single header line with Base64 encoding in a given charset.
+
+    charset names the character set to use to encode the header.  It defaults
+    to iso-8859-1.  Base64 encoding is defined in RFC 2045.
+    """
+    if not header_bytes:
+        return ""
+    if isinstance(header_bytes, str):
+        header_bytes = header_bytes.encode(charset)
+    encoded = b64encode(header_bytes).decode("ascii")
+    return '=?%s?b?%s?=' % (charset, encoded)
+
+
+
+def body_encode(s, maxlinelen=76, eol=NL):
+    r"""Encode a string with base64.
+
+    Each line will be wrapped at, at most, maxlinelen characters (defaults to
+    76 characters).
+
+    Each line of encoded text will end with eol, which defaults to "\n".  Set
+    this to "\r\n" if you will be using the result of this function directly
+    in an email.
+    """
+    if not s:
+        return s
+
+    encvec = []
+    max_unencoded = maxlinelen * 3 // 4
+    for i in range(0, len(s), max_unencoded):
+        # BAW: should encode() inherit b2a_base64()'s dubious behavior in
+        # adding a newline to the encoded string?
+        enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
+        if enc.endswith(NL) and eol != NL:
+            enc = enc[:-1] + eol
+        encvec.append(enc)
+    return EMPTYSTRING.join(encvec)
+
+
+
+def decode(string):
+    """Decode a raw base64 string, returning a bytes object.
+
+    This function does not parse a full MIME header value encoded with
+    base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
+    level email.header class for that functionality.
+    """
+    if not string:
+        return bytes()
+    elif isinstance(string, str):
+        return a2b_base64(string.encode('raw-unicode-escape'))
+    else:
+        return a2b_base64(string)
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/charset.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/charset.py
new file mode 100644
index 00000000..892bab54
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/charset.py
@@ -0,0 +1,412 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+__all__ = [
+    'Charset',
+    'add_alias',
+    'add_charset',
+    'add_codec',
+    ]
+
+from functools import partial
+
+import email.base64mime
+import email.quoprimime
+
+from email import errors
+from email.encoders import encode_7or8bit
+
+
+
+# Flags for types of header encodings
+QP          = 1 # Quoted-Printable
+BASE64      = 2 # Base64
+SHORTEST    = 3 # the shorter of QP and base64, but only for headers
+
+# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
+RFC2047_CHROME_LEN = 7
+
+DEFAULT_CHARSET = 'us-ascii'
+UNKNOWN8BIT = 'unknown-8bit'
+EMPTYSTRING = ''
+
+
+
+# Defaults
+CHARSETS = {
+    # input        header enc  body enc output conv
+    'iso-8859-1':  (QP,        QP,      None),
+    'iso-8859-2':  (QP,        QP,      None),
+    'iso-8859-3':  (QP,        QP,      None),
+    'iso-8859-4':  (QP,        QP,      None),
+    # iso-8859-5 is Cyrillic, and not especially used
+    # iso-8859-6 is Arabic, also not particularly used
+    # iso-8859-7 is Greek, QP will not make it readable
+    # iso-8859-8 is Hebrew, QP will not make it readable
+    'iso-8859-9':  (QP,        QP,      None),
+    'iso-8859-10': (QP,        QP,      None),
+    # iso-8859-11 is Thai, QP will not make it readable
+    'iso-8859-13': (QP,        QP,      None),
+    'iso-8859-14': (QP,        QP,      None),
+    'iso-8859-15': (QP,        QP,      None),
+    'iso-8859-16': (QP,        QP,      None),
+    'windows-1252':(QP,        QP,      None),
+    'viscii':      (QP,        QP,      None),
+    'us-ascii':    (None,      None,    None),
+    'big5':        (BASE64,    BASE64,  None),
+    'gb2312':      (BASE64,    BASE64,  None),
+    'euc-jp':      (BASE64,    None,    'iso-2022-jp'),
+    'shift_jis':   (BASE64,    None,    'iso-2022-jp'),
+    'iso-2022-jp': (BASE64,    None,    None),
+    'koi8-r':      (BASE64,    BASE64,  None),
+    'utf-8':       (SHORTEST,  BASE64, 'utf-8'),
+    }
+
+# Aliases for other commonly-used names for character sets.  Map
+# them to the real ones used in email.
+ALIASES = {
+    'latin_1': 'iso-8859-1',
+    'latin-1': 'iso-8859-1',
+    'latin_2': 'iso-8859-2',
+    'latin-2': 'iso-8859-2',
+    'latin_3': 'iso-8859-3',
+    'latin-3': 'iso-8859-3',
+    'latin_4': 'iso-8859-4',
+    'latin-4': 'iso-8859-4',
+    'latin_5': 'iso-8859-9',
+    'latin-5': 'iso-8859-9',
+    'latin_6': 'iso-8859-10',
+    'latin-6': 'iso-8859-10',
+    'latin_7': 'iso-8859-13',
+    'latin-7': 'iso-8859-13',
+    'latin_8': 'iso-8859-14',
+    'latin-8': 'iso-8859-14',
+    'latin_9': 'iso-8859-15',
+    'latin-9': 'iso-8859-15',
+    'latin_10':'iso-8859-16',
+    'latin-10':'iso-8859-16',
+    'cp949':   'ks_c_5601-1987',
+    'euc_jp':  'euc-jp',
+    'euc_kr':  'euc-kr',
+    'ascii':   'us-ascii',
+    }
+
+
+# Map charsets to their Unicode codec strings.
+CODEC_MAP = {
+    'gb2312':      'eucgb2312_cn',
+    'big5':        'big5_tw',
+    # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
+    # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
+    # Let that stuff pass through without conversion to/from Unicode.
+    'us-ascii':    None,
+    }
+
+
+
+# Convenience functions for extending the above mappings
+def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
+    """Add character set properties to the global registry.
+
+    charset is the input character set, and must be the canonical name of a
+    character set.
+
+    Optional header_enc and body_enc is either Charset.QP for
+    quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
+    the shortest of qp or base64 encoding, or None for no encoding.  SHORTEST
+    is only valid for header_enc.  It describes how message headers and
+    message bodies in the input charset are to be encoded.  Default is no
+    encoding.
+
+    Optional output_charset is the character set that the output should be
+    in.  Conversions will proceed from input charset, to Unicode, to the
+    output charset when the method Charset.convert() is called.  The default
+    is to output in the same character set as the input.
+
+    Both input_charset and output_charset must have Unicode codec entries in
+    the module's charset-to-codec mapping; use add_codec(charset, codecname)
+    to add codecs the module does not know about.  See the codecs module's
+    documentation for more information.
+    """
+    if body_enc == SHORTEST:
+        raise ValueError('SHORTEST not allowed for body_enc')
+    CHARSETS[charset] = (header_enc, body_enc, output_charset)
+
+
+def add_alias(alias, canonical):
+    """Add a character set alias.
+
+    alias is the alias name, e.g. latin-1
+    canonical is the character set's canonical name, e.g. iso-8859-1
+    """
+    ALIASES[alias] = canonical
+
+
+def add_codec(charset, codecname):
+    """Add a codec that map characters in the given charset to/from Unicode.
+
+    charset is the canonical name of a character set.  codecname is the name
+    of a Python codec, as appropriate for the second argument to the unicode()
+    built-in, or to the encode() method of a Unicode string.
+    """
+    CODEC_MAP[charset] = codecname
+
+
+
+# Convenience function for encoding strings, taking into account
+# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
+def _encode(string, codec):
+    if codec == UNKNOWN8BIT:
+        return string.encode('ascii', 'surrogateescape')
+    else:
+        return string.encode(codec)
+
+
+
+class Charset:
+    """Map character sets to their email properties.
+
+    This class provides information about the requirements imposed on email
+    for a specific character set.  It also provides convenience routines for
+    converting between character sets, given the availability of the
+    applicable codecs.  Given a character set, it will do its best to provide
+    information on how to use that character set in an email in an
+    RFC-compliant way.
+
+    Certain character sets must be encoded with quoted-printable or base64
+    when used in email headers or bodies.  Certain character sets must be
+    converted outright, and are not allowed in email.  Instances of this
+    module expose the following information about a character set:
+
+    input_charset: The initial character set specified.  Common aliases
+                   are converted to their `official' email names (e.g. latin_1
+                   is converted to iso-8859-1).  Defaults to 7-bit us-ascii.
+
+    header_encoding: If the character set must be encoded before it can be
+                     used in an email header, this attribute will be set to
+                     Charset.QP (for quoted-printable), Charset.BASE64 (for
+                     base64 encoding), or Charset.SHORTEST for the shortest of
+                     QP or BASE64 encoding.  Otherwise, it will be None.
+
+    body_encoding: Same as header_encoding, but describes the encoding for the
+                   mail message's body, which indeed may be different than the
+                   header encoding.  Charset.SHORTEST is not allowed for
+                   body_encoding.
+
+    output_charset: Some character sets must be converted before they can be
+                    used in email headers or bodies.  If the input_charset is
+                    one of them, this attribute will contain the name of the
+                    charset output will be converted to.  Otherwise, it will
+                    be None.
+
+    input_codec: The name of the Python codec used to convert the
+                 input_charset to Unicode.  If no conversion codec is
+                 necessary, this attribute will be None.
+
+    output_codec: The name of the Python codec used to convert Unicode
+                  to the output_charset.  If no conversion codec is necessary,
+                  this attribute will have the same value as the input_codec.
+    """
+    def __init__(self, input_charset=DEFAULT_CHARSET):
+        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
+        # unicode because its .lower() is locale insensitive.  If the argument
+        # is already a unicode, we leave it at that, but ensure that the
+        # charset is ASCII, as the standard (RFC XXX) requires.
+        try:
+            if isinstance(input_charset, str):
+                input_charset.encode('ascii')
+            else:
+                input_charset = str(input_charset, 'ascii')
+        except UnicodeError:
+            raise errors.CharsetError(input_charset)
+        input_charset = input_charset.lower()
+        # Set the input charset after filtering through the aliases
+        self.input_charset = ALIASES.get(input_charset, input_charset)
+        # We can try to guess which encoding and conversion to use by the
+        # charset_map dictionary.  Try that first, but let the user override
+        # it.
+        henc, benc, conv = CHARSETS.get(self.input_charset,
+                                        (SHORTEST, BASE64, None))
+        if not conv:
+            conv = self.input_charset
+        # Set the attributes, allowing the arguments to override the default.
+        self.header_encoding = henc
+        self.body_encoding = benc
+        self.output_charset = ALIASES.get(conv, conv)
+        # Now set the codecs.  If one isn't defined for input_charset,
+        # guess and try a Unicode codec with the same name as input_codec.
+        self.input_codec = CODEC_MAP.get(self.input_charset,
+                                         self.input_charset)
+        self.output_codec = CODEC_MAP.get(self.output_charset,
+                                          self.output_charset)
+
+    def __str__(self):
+        return self.input_charset.lower()
+
+    __repr__ = __str__
+
+    def __eq__(self, other):
+        return str(self) == str(other).lower()
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get_body_encoding(self):
+        """Return the content-transfer-encoding used for body encoding.
+
+        This is either the string `quoted-printable' or `base64' depending on
+        the encoding used, or it is a function in which case you should call
+        the function with a single argument, the Message object being
+        encoded.  The function should then set the Content-Transfer-Encoding
+        header itself to whatever is appropriate.
+
+        Returns "quoted-printable" if self.body_encoding is QP.
+        Returns "base64" if self.body_encoding is BASE64.
+        Returns conversion function otherwise.
+        """
+        assert self.body_encoding != SHORTEST
+        if self.body_encoding == QP:
+            return 'quoted-printable'
+        elif self.body_encoding == BASE64:
+            return 'base64'
+        else:
+            return encode_7or8bit
+
+    def get_output_charset(self):
+        """Return the output character set.
+
+        This is self.output_charset if that is not None, otherwise it is
+        self.input_charset.
+        """
+        return self.output_charset or self.input_charset
+
+    def header_encode(self, string):
+        """Header-encode a string by converting it first to bytes.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        this charset's `header_encoding`.
+
+        :param string: A unicode string for the header.  It must be possible
+            to encode this string to bytes using the character set's
+            output codec.
+        :return: The encoded string, with RFC 2047 chrome.
+        """
+        codec = self.output_codec or 'us-ascii'
+        header_bytes = _encode(string, codec)
+        # 7bit/8bit encodings return the string unchanged (modulo conversions)
+        encoder_module = self._get_encoder(header_bytes)
+        if encoder_module is None:
+            return string
+        return encoder_module.header_encode(header_bytes, codec)
+
+    def header_encode_lines(self, string, maxlengths):
+        """Header-encode a string by converting it first to bytes.
+
+        This is similar to `header_encode()` except that the string is fit
+        into maximum line lengths as given by the argument.
+
+        :param string: A unicode string for the header.  It must be possible
+            to encode this string to bytes using the character set's
+            output codec.
+        :param maxlengths: Maximum line length iterator.  Each element
+            returned from this iterator will provide the next maximum line
+            length.  This parameter is used as an argument to built-in next()
+            and should never be exhausted.  The maximum line lengths should
+            not count the RFC 2047 chrome.  These line lengths are only a
+            hint; the splitter does the best it can.
+        :return: Lines of encoded strings, each with RFC 2047 chrome.
+        """
+        # See which encoding we should use.
+        codec = self.output_codec or 'us-ascii'
+        header_bytes = _encode(string, codec)
+        encoder_module = self._get_encoder(header_bytes)
+        encoder = partial(encoder_module.header_encode, charset=codec)
+        # Calculate the number of characters that the RFC 2047 chrome will
+        # contribute to each line.
+        charset = self.get_output_charset()
+        extra = len(charset) + RFC2047_CHROME_LEN
+        # Now comes the hard part.  We must encode bytes but we can't split on
+        # bytes because some character sets are variable length and each
+        # encoded word must stand on its own.  So the problem is you have to
+        # encode to bytes to figure out this word's length, but you must split
+        # on characters.  This causes two problems: first, we don't know how
+        # many octets a specific substring of unicode characters will get
+        # encoded to, and second, we don't know how many ASCII characters
+        # those octets will get encoded to.  Unless we try it.  Which seems
+        # inefficient.  In the interest of being correct rather than fast (and
+        # in the hope that there will be few encoded headers in any such
+        # message), brute force it. :(
+        lines = []
+        current_line = []
+        maxlen = next(maxlengths) - extra
+        for character in string:
+            current_line.append(character)
+            this_line = EMPTYSTRING.join(current_line)
+            length = encoder_module.header_length(_encode(this_line, charset))
+            if length > maxlen:
+                # This last character doesn't fit so pop it off.
+                current_line.pop()
+                # Does nothing fit on the first line?
+                if not lines and not current_line:
+                    lines.append(None)
+                else:
+                    separator = (' ' if lines else '')
+                    joined_line = EMPTYSTRING.join(current_line)
+                    header_bytes = _encode(joined_line, codec)
+                    lines.append(encoder(header_bytes))
+                current_line = [character]
+                maxlen = next(maxlengths) - extra
+        joined_line = EMPTYSTRING.join(current_line)
+        header_bytes = _encode(joined_line, codec)
+        lines.append(encoder(header_bytes))
+        return lines
+
+    def _get_encoder(self, header_bytes):
+        if self.header_encoding == BASE64:
+            return email.base64mime
+        elif self.header_encoding == QP:
+            return email.quoprimime
+        elif self.header_encoding == SHORTEST:
+            len64 = email.base64mime.header_length(header_bytes)
+            lenqp = email.quoprimime.header_length(header_bytes)
+            if len64 < lenqp:
+                return email.base64mime
+            else:
+                return email.quoprimime
+        else:
+            return None
+
+    def body_encode(self, string):
+        """Body-encode a string by converting it first to bytes.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        self.body_encoding.  If body_encoding is None, we assume the
+        output charset is a 7bit encoding, so re-encoding the decoded
+        string using the ascii codec produces the correct string version
+        of the content.
+        """
+        # 7bit/8bit encodings return the string unchanged (module conversions)
+        if self.body_encoding is BASE64:
+            if isinstance(string, str):
+                string = string.encode(self.output_charset)
+            return email.base64mime.body_encode(string)
+        elif self.body_encoding is QP:
+            # quopromime.body_encode takes a string, but operates on it as if
+            # it were a list of byte codes.  For a (minimal) history on why
+            # this is so, see changeset 0cf700464177.  To correctly encode a
+            # character set, then, we must turn it into pseudo bytes via the
+            # latin1 charset, which will encode any byte as a single code point
+            # between 0 and 255, which is what body_encode is expecting.
+            #
+            # Note that this clause doesn't handle the case of a _payload that
+            # is already bytes.  It never did, and the semantics of _payload
+            # being bytes has never been nailed down, so fixing that is a
+            # longer term TODO.
+            if isinstance(string, str):
+                string = string.encode(self.output_charset).decode('latin1')
+            return email.quoprimime.body_encode(string)
+        else:
+            if isinstance(string, str):
+                string = string.encode(self.output_charset).decode('ascii')
+            return string
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/encoders.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/encoders.py
new file mode 100644
index 00000000..f9657f0a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/encoders.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Encodings and related functions."""
+
+__all__ = [
+    'encode_7or8bit',
+    'encode_base64',
+    'encode_noop',
+    'encode_quopri',
+    ]
+
+
+from base64 import encodebytes as _bencode
+from quopri import encodestring as _encodestring
+
+
+
+def _qencode(s):
+    enc = _encodestring(s, quotetabs=True)
+    # Must encode spaces, which quopri.encodestring() doesn't do
+    return enc.replace(b' ', b'=20')
+
+
+def encode_base64(msg):
+    """Encode the message's payload in Base64.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload(decode=True)
+    encdata = str(_bencode(orig), 'ascii')
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'base64'
+
+
+
+def encode_quopri(msg):
+    """Encode the message's payload in quoted-printable.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload(decode=True)
+    encdata = _qencode(orig)
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'quoted-printable'
+
+
+
+def encode_7or8bit(msg):
+    """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
+    orig = msg.get_payload(decode=True)
+    if orig is None:
+        # There's no payload.  For backwards compatibility we use 7bit
+        msg['Content-Transfer-Encoding'] = '7bit'
+        return
+    # We play a trick to make this go fast.  If encoding/decode to ASCII
+    # succeeds, we know the data must be 7bit, otherwise treat it as 8bit.
+    try:
+        if isinstance(orig, str):
+            orig.encode('ascii')
+        else:
+            orig.decode('ascii')
+    except UnicodeError:
+        charset = msg.get_charset()
+        output_cset = charset and charset.output_charset
+        # iso-2022-* is non-ASCII but encodes to a 7-bit representation
+        if output_cset and output_cset.lower().startswith('iso-2022-'):
+            msg['Content-Transfer-Encoding'] = '7bit'
+        else:
+            msg['Content-Transfer-Encoding'] = '8bit'
+    else:
+        msg['Content-Transfer-Encoding'] = '7bit'
+
+
+
+def encode_noop(msg):
+    """Do nothing."""
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/errors.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/errors.py
new file mode 100644
index 00000000..d5444f6e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/errors.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""email package exception classes."""
+
+
+class MessageError(Exception):
+    """Base class for errors in the email package."""
+
+
+class MessageParseError(MessageError):
+    """Base class for message parsing errors."""
+
+
+class HeaderParseError(MessageParseError):
+    """Error while parsing headers."""
+
+
+class BoundaryError(MessageParseError):
+    """Couldn't find terminating boundary."""
+
+
+class MultipartConversionError(MessageError):#, TypeError):
+    """Conversion to a multipart is prohibited."""
+
+
+class CharsetError(MessageError):
+    """An illegal charset was given."""
+
+
+# These are parsing defects which the parser was able to work around.
+class MessageDefect(ValueError):
+    """Base class for a message defect."""
+
+    def __init__(self, line=None):
+        if line is not None:
+            super().__init__(line)
+        self.line = line
+
+class NoBoundaryInMultipartDefect(MessageDefect):
+    """A message claimed to be a multipart but had no boundary parameter."""
+
+class StartBoundaryNotFoundDefect(MessageDefect):
+    """The claimed start boundary was never found."""
+
+class CloseBoundaryNotFoundDefect(MessageDefect):
+    """A start boundary was found, but not the corresponding close boundary."""
+
+class FirstHeaderLineIsContinuationDefect(MessageDefect):
+    """A message had a continuation line as its first header line."""
+
+class MisplacedEnvelopeHeaderDefect(MessageDefect):
+    """A 'Unix-from' header was found in the middle of a header block."""
+
+class MissingHeaderBodySeparatorDefect(MessageDefect):
+    """Found line with no leading whitespace and no colon before blank line."""
+# XXX: backward compatibility, just in case (it was never emitted).
+MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
+
+class MultipartInvariantViolationDefect(MessageDefect):
+    """A message claimed to be a multipart but no subparts were found."""
+
+class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
+    """An invalid content transfer encoding was set on the multipart itself."""
+
+class UndecodableBytesDefect(MessageDefect):
+    """Header contained bytes that could not be decoded"""
+
+class InvalidBase64PaddingDefect(MessageDefect):
+    """base64 encoded sequence had an incorrect length"""
+
+class InvalidBase64CharactersDefect(MessageDefect):
+    """base64 encoded sequence had characters not in base64 alphabet"""
+
+# These errors are specific to header parsing.
+
+class HeaderDefect(MessageDefect):
+    """Base class for a header defect."""
+
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+
+class InvalidHeaderDefect(HeaderDefect):
+    """Header is not valid, message gives details."""
+
+class HeaderMissingRequiredValue(HeaderDefect):
+    """A header that must have a value had none"""
+
+class NonPrintableDefect(HeaderDefect):
+    """ASCII characters outside the ascii-printable range found"""
+
+    def __init__(self, non_printables):
+        super().__init__(non_printables)
+        self.non_printables = non_printables
+
+    def __str__(self):
+        return ("the following ASCII non-printables found in header: "
+            "{}".format(self.non_printables))
+
+class ObsoleteHeaderDefect(HeaderDefect):
+    """Header uses syntax declared obsolete by RFC 5322"""
+
+class NonASCIILocalPartDefect(HeaderDefect):
+    """local_part contains non-ASCII characters"""
+    # This defect only occurs during unicode parsing, not when
+    # parsing messages decoded from binary.
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/feedparser.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/feedparser.py
new file mode 100644
index 00000000..12b64ce1
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/feedparser.py
@@ -0,0 +1,516 @@
+# Copyright (C) 2004-2006 Python Software Foundation
+# Authors: Baxter, Wouters and Warsaw
+# Contact: email-sig@python.org
+
+"""FeedParser - An email feed parser.
+
+The feed parser implements an interface for incrementally parsing an email
+message, line by line.  This has advantages for certain applications, such as
+those reading email messages off a socket.
+
+FeedParser.feed() is the primary interface for pushing new data into the
+parser.  It returns when there's nothing more it can do with the available
+data.  When you have no more data to push into the parser, call .close().
+This completes the parsing and returns the root message object.
+
+The other advantage of this parser is that it will never raise a parsing
+exception.  Instead, when it finds something unexpected, it adds a 'defect' to
+the current message.  Defects are just instances that live on the message
+object's .defects attribute.
+"""
+
+__all__ = ['FeedParser', 'BytesFeedParser']
+
+import re
+
+from email import errors
+from email import message
+from email._policybase import compat32
+
+NLCRE = re.compile('\r\n|\r|\n')
+NLCRE_bol = re.compile('(\r\n|\r|\n)')
+NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
+NLCRE_crack = re.compile('(\r\n|\r|\n)')
+# RFC 2822 $3.6.8 Optional fields.  ftext is %d33-57 / %d59-126, Any character
+# except controls, SP, and ":".
+headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
+EMPTYSTRING = ''
+NL = '\n'
+
+NeedMoreData = object()
+
+
+
+class BufferedSubFile(object):
+    """A file-ish object that can have new data loaded into it.
+
+    You can also push and pop line-matching predicates onto a stack.  When the
+    current predicate matches the current line, a false EOF response
+    (i.e. empty string) is returned instead.  This lets the parser adhere to a
+    simple abstraction -- it parses until EOF closes the current message.
+    """
+    def __init__(self):
+        # The last partial line pushed into this object.
+        self._partial = ''
+        # The list of full, pushed lines, in reverse order
+        self._lines = []
+        # The stack of false-EOF checking predicates.
+        self._eofstack = []
+        # A flag indicating whether the file has been closed or not.
+        self._closed = False
+
+    def push_eof_matcher(self, pred):
+        self._eofstack.append(pred)
+
+    def pop_eof_matcher(self):
+        return self._eofstack.pop()
+
+    def close(self):
+        # Don't forget any trailing partial line.
+        self._lines.append(self._partial)
+        self._partial = ''
+        self._closed = True
+
+    def readline(self):
+        if not self._lines:
+            if self._closed:
+                return ''
+            return NeedMoreData
+        # Pop the line off the stack and see if it matches the current
+        # false-EOF predicate.
+        line = self._lines.pop()
+        # RFC 2046, section 5.1.2 requires us to recognize outer level
+        # boundaries at any level of inner nesting.  Do this, but be sure it's
+        # in the order of most to least nested.
+        for ateof in self._eofstack[::-1]:
+            if ateof(line):
+                # We're at the false EOF.  But push the last line back first.
+                self._lines.append(line)
+                return ''
+        return line
+
+    def unreadline(self, line):
+        # Let the consumer push a line back into the buffer.
+        assert line is not NeedMoreData
+        self._lines.append(line)
+
+    def push(self, data):
+        """Push some new data into this object."""
+        # Handle any previous leftovers
+        data, self._partial = self._partial + data, ''
+        # Crack into lines, but preserve the newlines on the end of each
+        parts = NLCRE_crack.split(data)
+        # The *ahem* interesting behaviour of re.split when supplied grouping
+        # parentheses is that the last element of the resulting list is the
+        # data after the final RE.  In the case of a NL/CR terminated string,
+        # this is the empty string.
+        self._partial = parts.pop()
+        #GAN 29Mar09  bugs 1555570, 1721862  Confusion at 8K boundary ending with \r:
+        # is there a \n to follow later?
+        if not self._partial and parts and parts[-1].endswith('\r'):
+            self._partial = parts.pop(-2)+parts.pop()
+        # parts is a list of strings, alternating between the line contents
+        # and the eol character(s).  Gather up a list of lines after
+        # re-attaching the newlines.
+        lines = []
+        for i in range(len(parts) // 2):
+            lines.append(parts[i*2] + parts[i*2+1])
+        self.pushlines(lines)
+
+    def pushlines(self, lines):
+        # Reverse and insert at the front of the lines.
+        self._lines[:0] = lines[::-1]
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        line = self.readline()
+        if line == '':
+            raise StopIteration
+        return line
+
+
+
+class FeedParser:
+    """A feed-style parser of email."""
+
+    def __init__(self, _factory=message.Message, policy=compat32):
+        """_factory is called with no arguments to create a new message obj
+
+        The policy keyword specifies a policy object that controls a number of
+        aspects of the parser's operation.  The default policy maintains
+        backward compatibility.
+
+        """
+        self._factory = _factory
+        self.policy = policy
+        try:
+            _factory(policy=self.policy)
+            self._factory_kwds = lambda: {'policy': self.policy}
+        except TypeError:
+            # Assume this is an old-style factory
+            self._factory_kwds = lambda: {}
+        self._input = BufferedSubFile()
+        self._msgstack = []
+        self._parse = self._parsegen().__next__
+        self._cur = None
+        self._last = None
+        self._headersonly = False
+
+    # Non-public interface for supporting Parser's headersonly flag
+    def _set_headersonly(self):
+        self._headersonly = True
+
+    def feed(self, data):
+        """Push more data into the parser."""
+        self._input.push(data)
+        self._call_parse()
+
+    def _call_parse(self):
+        try:
+            self._parse()
+        except StopIteration:
+            pass
+
+    def close(self):
+        """Parse all remaining data and return the root message object."""
+        self._input.close()
+        self._call_parse()
+        root = self._pop_message()
+        assert not self._msgstack
+        # Look for final set of defects
+        if root.get_content_maintype() == 'multipart' \
+               and not root.is_multipart():
+            defect = errors.MultipartInvariantViolationDefect()
+            self.policy.handle_defect(root, defect)
+        return root
+
+    def _new_message(self):
+        msg = self._factory(**self._factory_kwds())
+        if self._cur and self._cur.get_content_type() == 'multipart/digest':
+            msg.set_default_type('message/rfc822')
+        if self._msgstack:
+            self._msgstack[-1].attach(msg)
+        self._msgstack.append(msg)
+        self._cur = msg
+        self._last = msg
+
+    def _pop_message(self):
+        retval = self._msgstack.pop()
+        if self._msgstack:
+            self._cur = self._msgstack[-1]
+        else:
+            self._cur = None
+        return retval
+
+    def _parsegen(self):
+        # Create a new message and start by parsing headers.
+        self._new_message()
+        headers = []
+        # Collect the headers, searching for a line that doesn't match the RFC
+        # 2822 header or continuation pattern (including an empty line).
+        for line in self._input:
+            if line is NeedMoreData:
+                yield NeedMoreData
+                continue
+            if not headerRE.match(line):
+                # If we saw the RFC defined header/body separator
+                # (i.e. newline), just throw it away. Otherwise the line is
+                # part of the body so push it back.
+                if not NLCRE.match(line):
+                    defect = errors.MissingHeaderBodySeparatorDefect()
+                    self.policy.handle_defect(self._cur, defect)
+                    self._input.unreadline(line)
+                break
+            headers.append(line)
+        # Done with the headers, so parse them and figure out what we're
+        # supposed to see in the body of the message.
+        self._parse_headers(headers)
+        # Headers-only parsing is a backwards compatibility hack, which was
+        # necessary in the older parser, which could raise errors.  All
+        # remaining lines in the input are thrown into the message body.
+        if self._headersonly:
+            lines = []
+            while True:
+                line = self._input.readline()
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                if line == '':
+                    break
+                lines.append(line)
+            self._cur.set_payload(EMPTYSTRING.join(lines))
+            return
+        if self._cur.get_content_type() == 'message/delivery-status':
+            # message/delivery-status contains blocks of headers separated by
+            # a blank line.  We'll represent each header block as a separate
+            # nested message object, but the processing is a bit different
+            # than standard message/* types because there is no body for the
+            # nested messages.  A blank line separates the subparts.
+            while True:
+                self._input.push_eof_matcher(NLCRE.match)
+                for retval in self._parsegen():
+                    if retval is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                msg = self._pop_message()
+                # We need to pop the EOF matcher in order to tell if we're at
+                # the end of the current file, not the end of the last block
+                # of message headers.
+                self._input.pop_eof_matcher()
+                # The input stream must be sitting at the newline or at the
+                # EOF.  We want to see if we're at the end of this subpart, so
+                # first consume the blank line, then test the next line to see
+                # if we're at this subpart's EOF.
+                while True:
+                    line = self._input.readline()
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                while True:
+                    line = self._input.readline()
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                if line == '':
+                    break
+                # Not at EOF so this is a line we're going to need.
+                self._input.unreadline(line)
+            return
+        if self._cur.get_content_maintype() == 'message':
+            # The message claims to be a message/* type, then what follows is
+            # another RFC 2822 message.
+            for retval in self._parsegen():
+                if retval is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                break
+            self._pop_message()
+            return
+        if self._cur.get_content_maintype() == 'multipart':
+            boundary = self._cur.get_boundary()
+            if boundary is None:
+                # The message /claims/ to be a multipart but it has not
+                # defined a boundary.  That's a problem which we'll handle by
+                # reading everything until the EOF and marking the message as
+                # defective.
+                defect = errors.NoBoundaryInMultipartDefect()
+                self.policy.handle_defect(self._cur, defect)
+                lines = []
+                for line in self._input:
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    lines.append(line)
+                self._cur.set_payload(EMPTYSTRING.join(lines))
+                return
+            # Make sure a valid content type was specified per RFC 2045:6.4.
+            if (self._cur.get('content-transfer-encoding', '8bit').lower()
+                    not in ('7bit', '8bit', 'binary')):
+                defect = errors.InvalidMultipartContentTransferEncodingDefect()
+                self.policy.handle_defect(self._cur, defect)
+            # Create a line match predicate which matches the inter-part
+            # boundary as well as the end-of-multipart boundary.  Don't push
+            # this onto the input stream until we've scanned past the
+            # preamble.
+            separator = '--' + boundary
+            boundaryre = re.compile(
+                '(?P<sep>' + re.escape(separator) +
+                r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
+            capturing_preamble = True
+            preamble = []
+            linesep = False
+            close_boundary_seen = False
+            while True:
+                line = self._input.readline()
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                if line == '':
+                    break
+                mo = boundaryre.match(line)
+                if mo:
+                    # If we're looking at the end boundary, we're done with
+                    # this multipart.  If there was a newline at the end of
+                    # the closing boundary, then we need to initialize the
+                    # epilogue with the empty string (see below).
+                    if mo.group('end'):
+                        close_boundary_seen = True
+                        linesep = mo.group('linesep')
+                        break
+                    # We saw an inter-part boundary.  Were we in the preamble?
+                    if capturing_preamble:
+                        if preamble:
+                            # According to RFC 2046, the last newline belongs
+                            # to the boundary.
+                            lastline = preamble[-1]
+                            eolmo = NLCRE_eol.search(lastline)
+                            if eolmo:
+                                preamble[-1] = lastline[:-len(eolmo.group(0))]
+                            self._cur.preamble = EMPTYSTRING.join(preamble)
+                        capturing_preamble = False
+                        self._input.unreadline(line)
+                        continue
+                    # We saw a boundary separating two parts.  Consume any
+                    # multiple boundary lines that may be following.  Our
+                    # interpretation of RFC 2046 BNF grammar does not produce
+                    # body parts within such double boundaries.
+                    while True:
+                        line = self._input.readline()
+                        if line is NeedMoreData:
+                            yield NeedMoreData
+                            continue
+                        mo = boundaryre.match(line)
+                        if not mo:
+                            self._input.unreadline(line)
+                            break
+                    # Recurse to parse this subpart; the input stream points
+                    # at the subpart's first line.
+                    self._input.push_eof_matcher(boundaryre.match)
+                    for retval in self._parsegen():
+                        if retval is NeedMoreData:
+                            yield NeedMoreData
+                            continue
+                        break
+                    # Because of RFC 2046, the newline preceding the boundary
+                    # separator actually belongs to the boundary, not the
+                    # previous subpart's payload (or epilogue if the previous
+                    # part is a multipart).
+                    if self._last.get_content_maintype() == 'multipart':
+                        epilogue = self._last.epilogue
+                        if epilogue == '':
+                            self._last.epilogue = None
+                        elif epilogue is not None:
+                            mo = NLCRE_eol.search(epilogue)
+                            if mo:
+                                end = len(mo.group(0))
+                                self._last.epilogue = epilogue[:-end]
+                    else:
+                        payload = self._last._payload
+                        if isinstance(payload, str):
+                            mo = NLCRE_eol.search(payload)
+                            if mo:
+                                payload = payload[:-len(mo.group(0))]
+                                self._last._payload = payload
+                    self._input.pop_eof_matcher()
+                    self._pop_message()
+                    # Set the multipart up for newline cleansing, which will
+                    # happen if we're in a nested multipart.
+                    self._last = self._cur
+                else:
+                    # I think we must be in the preamble
+                    assert capturing_preamble
+                    preamble.append(line)
+            # We've seen either the EOF or the end boundary.  If we're still
+            # capturing the preamble, we never saw the start boundary.  Note
+            # that as a defect and store the captured text as the payload.
+            if capturing_preamble:
+                defect = errors.StartBoundaryNotFoundDefect()
+                self.policy.handle_defect(self._cur, defect)
+                self._cur.set_payload(EMPTYSTRING.join(preamble))
+                epilogue = []
+                for line in self._input:
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                self._cur.epilogue = EMPTYSTRING.join(epilogue)
+                return
+            # If we're not processing the preamble, then we might have seen
+            # EOF without seeing that end boundary...that is also a defect.
+            if not close_boundary_seen:
+                defect = errors.CloseBoundaryNotFoundDefect()
+                self.policy.handle_defect(self._cur, defect)
+                return
+            # Everything from here to the EOF is epilogue.  If the end boundary
+            # ended in a newline, we'll need to make sure the epilogue isn't
+            # None
+            if linesep:
+                epilogue = ['']
+            else:
+                epilogue = []
+            for line in self._input:
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                epilogue.append(line)
+            # Any CRLF at the front of the epilogue is not technically part of
+            # the epilogue.  Also, watch out for an empty string epilogue,
+            # which means a single newline.
+            if epilogue:
+                firstline = epilogue[0]
+                bolmo = NLCRE_bol.match(firstline)
+                if bolmo:
+                    epilogue[0] = firstline[len(bolmo.group(0)):]
+            self._cur.epilogue = EMPTYSTRING.join(epilogue)
+            return
+        # Otherwise, it's some non-multipart type, so the entire rest of the
+        # file contents becomes the payload.
+        lines = []
+        for line in self._input:
+            if line is NeedMoreData:
+                yield NeedMoreData
+                continue
+            lines.append(line)
+        self._cur.set_payload(EMPTYSTRING.join(lines))
+
+    def _parse_headers(self, lines):
+        # Passed a list of lines that make up the headers for the current msg
+        lastheader = ''
+        lastvalue = []
+        for lineno, line in enumerate(lines):
+            # Check for continuation
+            if line[0] in ' \t':
+                if not lastheader:
+                    # The first line of the headers was a continuation.  This
+                    # is illegal, so let's note the defect, store the illegal
+                    # line, and ignore it for purposes of headers.
+                    defect = errors.FirstHeaderLineIsContinuationDefect(line)
+                    self.policy.handle_defect(self._cur, defect)
+                    continue
+                lastvalue.append(line)
+                continue
+            if lastheader:
+                self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+                lastheader, lastvalue = '', []
+            # Check for envelope header, i.e. unix-from
+            if line.startswith('From '):
+                if lineno == 0:
+                    # Strip off the trailing newline
+                    mo = NLCRE_eol.search(line)
+                    if mo:
+                        line = line[:-len(mo.group(0))]
+                    self._cur.set_unixfrom(line)
+                    continue
+                elif lineno == len(lines) - 1:
+                    # Something looking like a unix-from at the end - it's
+                    # probably the first line of the body, so push back the
+                    # line and stop.
+                    self._input.unreadline(line)
+                    return
+                else:
+                    # Weirdly placed unix-from line.  Note this as a defect
+                    # and ignore it.
+                    defect = errors.MisplacedEnvelopeHeaderDefect(line)
+                    self._cur.defects.append(defect)
+                    continue
+            # Split the line on the colon separating field name from value.
+            # There will always be a colon, because if there wasn't the part of
+            # the parser that calls us would have started parsing the body.
+            i = line.find(':')
+            assert i>0, "_parse_headers fed line with no : and no leading WS"
+            lastheader = line[:i]
+            lastvalue = [line]
+        # Done with all the lines, so handle the last header.
+        if lastheader:
+            self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+
+
+class BytesFeedParser(FeedParser):
+    """Like FeedParser, but feed accepts bytes."""
+
+    def feed(self, data):
+        super().feed(data.decode('ascii', 'surrogateescape'))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/header.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/header.py
new file mode 100644
index 00000000..42bcc0c1
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/header.py
@@ -0,0 +1,583 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Header encoding and decoding functionality."""
+
+__all__ = [
+    'Header',
+    'decode_header',
+    'make_header',
+    ]
+
+import re
+import binascii
+
+import email.quoprimime
+import email.base64mime
+
+from email.errors import HeaderParseError
+from email import charset as _charset
+Charset = _charset.Charset
+
+NL = '\n'
+SPACE = ' '
+BSPACE = b' '
+SPACE8 = ' ' * 8
+EMPTYSTRING = ''
+MAXLINELEN = 78
+FWS = ' \t'
+
+USASCII = Charset('us-ascii')
+UTF8 = Charset('utf-8')
+
+# Match encoded-word strings in the form =?charset?q?Hello_World?=
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<encoded>.*?)      # non-greedy up to the next ?= is the encoded string
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
+
+# Field name regexp, including trailing colon, but not separating whitespace,
+# according to RFC 2822.  Character range is from tilde to exclamation mark.
+# For use with .match()
+fcre = re.compile(r'[\041-\176]+:$')
+
+# Find a header embedded in a putative header value.  Used to check for
+# header injection attack.
+_embeded_header = re.compile(r'\n[^ \t]+:')
+
+
+
+# Helpers
+_max_append = email.quoprimime._max_append
+
+
+
+def decode_header(header):
+    """Decode a message header value without converting charset.
+
+    Returns a list of (string, charset) pairs containing each of the decoded
+    parts of the header.  Charset is None for non-encoded parts of the header,
+    otherwise a lower-case string containing the name of the character set
+    specified in the encoded string.
+
+    header may be a string that may or may not contain RFC2047 encoded words,
+    or it may be a Header object.
+
+    An email.errors.HeaderParseError may be raised when certain decoding error
+    occurs (e.g. a base64 decoding exception).
+    """
+    # If it is a Header object, we can just return the encoded chunks.
+    if hasattr(header, '_chunks'):
+        return [(_charset._encode(string, str(charset)), str(charset))
+                    for string, charset in header._chunks]
+    # If no encoding, just return the header with no charset.
+    if not ecre.search(header):
+        return [(header, None)]
+    # First step is to parse all the encoded parts into triplets of the form
+    # (encoded_string, encoding, charset).  For unencoded strings, the last
+    # two parts will be None.
+    words = []
+    for line in header.splitlines():
+        parts = ecre.split(line)
+        first = True
+        while parts:
+            unencoded = parts.pop(0)
+            if first:
+                unencoded = unencoded.lstrip()
+                first = False
+            if unencoded:
+                words.append((unencoded, None, None))
+            if parts:
+                charset = parts.pop(0).lower()
+                encoding = parts.pop(0).lower()
+                encoded = parts.pop(0)
+                words.append((encoded, encoding, charset))
+    # Now loop over words and remove words that consist of whitespace
+    # between two encoded strings.
+    import sys
+    droplist = []
+    for n, w in enumerate(words):
+        if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace():
+            droplist.append(n-1)
+    for d in reversed(droplist):
+        del words[d]
+
+    # The next step is to decode each encoded word by applying the reverse
+    # base64 or quopri transformation.  decoded_words is now a list of the
+    # form (decoded_word, charset).
+    decoded_words = []
+    for encoded_string, encoding, charset in words:
+        if encoding is None:
+            # This is an unencoded word.
+            decoded_words.append((encoded_string, charset))
+        elif encoding == 'q':
+            word = email.quoprimime.header_decode(encoded_string)
+            decoded_words.append((word, charset))
+        elif encoding == 'b':
+            paderr = len(encoded_string) % 4   # Postel's law: add missing padding
+            if paderr:
+                encoded_string += '==='[:4 - paderr]
+            try:
+                word = email.base64mime.decode(encoded_string)
+            except binascii.Error:
+                raise HeaderParseError('Base64 decoding error')
+            else:
+                decoded_words.append((word, charset))
+        else:
+            raise AssertionError('Unexpected encoding: ' + encoding)
+    # Now convert all words to bytes and collapse consecutive runs of
+    # similarly encoded words.
+    collapsed = []
+    last_word = last_charset = None
+    for word, charset in decoded_words:
+        if isinstance(word, str):
+            word = bytes(word, 'raw-unicode-escape')
+        if last_word is None:
+            last_word = word
+            last_charset = charset
+        elif charset != last_charset:
+            collapsed.append((last_word, last_charset))
+            last_word = word
+            last_charset = charset
+        elif last_charset is None:
+            last_word += BSPACE + word
+        else:
+            last_word += word
+    collapsed.append((last_word, last_charset))
+    return collapsed
+
+
+
+def make_header(decoded_seq, maxlinelen=None, header_name=None,
+                continuation_ws=' '):
+    """Create a Header from a sequence of pairs as returned by decode_header()
+
+    decode_header() takes a header value string and returns a sequence of
+    pairs of the format (decoded_string, charset) where charset is the string
+    name of the character set.
+
+    This function takes one of those sequence of pairs and returns a Header
+    instance.  Optional maxlinelen, header_name, and continuation_ws are as in
+    the Header constructor.
+    """
+    h = Header(maxlinelen=maxlinelen, header_name=header_name,
+               continuation_ws=continuation_ws)
+    for s, charset in decoded_seq:
+        # None means us-ascii but we can simply pass it on to h.append()
+        if charset is not None and not isinstance(charset, Charset):
+            charset = Charset(charset)
+        h.append(s, charset)
+    return h
+
+
+
+class Header:
+    def __init__(self, s=None, charset=None,
+                 maxlinelen=None, header_name=None,
+                 continuation_ws=' ', errors='strict'):
+        """Create a MIME-compliant header that can contain many character sets.
+
+        Optional s is the initial header value.  If None, the initial header
+        value is not set.  You can later append to the header with .append()
+        method calls.  s may be a byte string or a Unicode string, but see the
+        .append() documentation for semantics.
+
+        Optional charset serves two purposes: it has the same meaning as the
+        charset argument to the .append() method.  It also sets the default
+        character set for all subsequent .append() calls that omit the charset
+        argument.  If charset is not provided in the constructor, the us-ascii
+        charset is used both as s's initial charset and as the default for
+        subsequent .append() calls.
+
+        The maximum line length can be specified explicitly via maxlinelen. For
+        splitting the first line to a shorter value (to account for the field
+        header which isn't included in s, e.g. `Subject') pass in the name of
+        the field in header_name.  The default maxlinelen is 78 as recommended
+        by RFC 2822.
+
+        continuation_ws must be RFC 2822 compliant folding whitespace (usually
+        either a space or a hard tab) which will be prepended to continuation
+        lines.
+
+        errors is passed through to the .append() call.
+        """
+        if charset is None:
+            charset = USASCII
+        elif not isinstance(charset, Charset):
+            charset = Charset(charset)
+        self._charset = charset
+        self._continuation_ws = continuation_ws
+        self._chunks = []
+        if s is not None:
+            self.append(s, charset, errors)
+        if maxlinelen is None:
+            maxlinelen = MAXLINELEN
+        self._maxlinelen = maxlinelen
+        if header_name is None:
+            self._headerlen = 0
+        else:
+            # Take the separating colon and space into account.
+            self._headerlen = len(header_name) + 2
+
+    def __str__(self):
+        """Return the string value of the header."""
+        self._normalize()
+        uchunks = []
+        lastcs = None
+        lastspace = None
+        for string, charset in self._chunks:
+            # We must preserve spaces between encoded and non-encoded word
+            # boundaries, which means for us we need to add a space when we go
+            # from a charset to None/us-ascii, or from None/us-ascii to a
+            # charset.  Only do this for the second and subsequent chunks.
+            # Don't add a space if the None/us-ascii string already has
+            # a space (trailing or leading depending on transition)
+            nextcs = charset
+            if nextcs == _charset.UNKNOWN8BIT:
+                original_bytes = string.encode('ascii', 'surrogateescape')
+                string = original_bytes.decode('ascii', 'replace')
+            if uchunks:
+                hasspace = string and self._nonctext(string[0])
+                if lastcs not in (None, 'us-ascii'):
+                    if nextcs in (None, 'us-ascii') and not hasspace:
+                        uchunks.append(SPACE)
+                        nextcs = None
+                elif nextcs not in (None, 'us-ascii') and not lastspace:
+                    uchunks.append(SPACE)
+            lastspace = string and self._nonctext(string[-1])
+            lastcs = nextcs
+            uchunks.append(string)
+        return EMPTYSTRING.join(uchunks)
+
+    # Rich comparison operators for equality only.  BAW: does it make sense to
+    # have or explicitly disable <, <=, >, >= operators?
+    def __eq__(self, other):
+        # other may be a Header or a string.  Both are fine so coerce
+        # ourselves to a unicode (of the unencoded header value), swap the
+        # args and do another comparison.
+        return other == str(self)
+
+    def __ne__(self, other):
+        return not self == other
+
+    def append(self, s, charset=None, errors='strict'):
+        """Append a string to the MIME header.
+
+        Optional charset, if given, should be a Charset instance or the name
+        of a character set (which will be converted to a Charset instance).  A
+        value of None (the default) means that the charset given in the
+        constructor is used.
+
+        s may be a byte string or a Unicode string.  If it is a byte string
+        (i.e. isinstance(s, str) is false), then charset is the encoding of
+        that byte string, and a UnicodeError will be raised if the string
+        cannot be decoded with that charset.  If s is a Unicode string, then
+        charset is a hint specifying the character set of the characters in
+        the string.  In either case, when producing an RFC 2822 compliant
+        header using RFC 2047 rules, the string will be encoded using the
+        output codec of the charset.  If the string cannot be encoded to the
+        output codec, a UnicodeError will be raised.
+
+        Optional `errors' is passed as the errors argument to the decode
+        call if s is a byte string.
+        """
+        if charset is None:
+            charset = self._charset
+        elif not isinstance(charset, Charset):
+            charset = Charset(charset)
+        if not isinstance(s, str):
+            input_charset = charset.input_codec or 'us-ascii'
+            if input_charset == _charset.UNKNOWN8BIT:
+                s = s.decode('us-ascii', 'surrogateescape')
+            else:
+                s = s.decode(input_charset, errors)
+        # Ensure that the bytes we're storing can be decoded to the output
+        # character set, otherwise an early error is raised.
+        output_charset = charset.output_codec or 'us-ascii'
+        if output_charset != _charset.UNKNOWN8BIT:
+            try:
+                s.encode(output_charset, errors)
+            except UnicodeEncodeError:
+                if output_charset!='us-ascii':
+                    raise
+                charset = UTF8
+        self._chunks.append((s, charset))
+
+    def _nonctext(self, s):
+        """True if string s is not a ctext character of RFC822.
+        """
+        return s in (' ', '\t', '(', ')', '\\')
+
+    def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
+        r"""Encode a message header into an RFC-compliant format.
+
+        There are many issues involved in converting a given string for use in
+        an email header.  Only certain character sets are readable in most
+        email clients, and as header strings can only contain a subset of
+        7-bit ASCII, care must be taken to properly convert and encode (with
+        Base64 or quoted-printable) header strings.  In addition, there is a
+        75-character length limit on any given encoded header field, so
+        line-wrapping must be performed, even with double-byte character sets.
+
+        Optional maxlinelen specifies the maximum length of each generated
+        line, exclusive of the linesep string.  Individual lines may be longer
+        than maxlinelen if a folding point cannot be found.  The first line
+        will be shorter by the length of the header name plus ": " if a header
+        name was specified at Header construction time.  The default value for
+        maxlinelen is determined at header construction time.
+
+        Optional splitchars is a string containing characters which should be
+        given extra weight by the splitting algorithm during normal header
+        wrapping.  This is in very rough support of RFC 2822's `higher level
+        syntactic breaks':  split points preceded by a splitchar are preferred
+        during line splitting, with the characters preferred in the order in
+        which they appear in the string.  Space and tab may be included in the
+        string to indicate whether preference should be given to one over the
+        other as a split point when other split chars do not appear in the line
+        being split.  Splitchars does not affect RFC 2047 encoded lines.
+
+        Optional linesep is a string to be used to separate the lines of
+        the value.  The default value is the most useful for typical
+        Python applications, but it can be set to \r\n to produce RFC-compliant
+        line separators when needed.
+        """
+        self._normalize()
+        if maxlinelen is None:
+            maxlinelen = self._maxlinelen
+        # A maxlinelen of 0 means don't wrap.  For all practical purposes,
+        # choosing a huge number here accomplishes that and makes the
+        # _ValueFormatter algorithm much simpler.
+        if maxlinelen == 0:
+            maxlinelen = 1000000
+        formatter = _ValueFormatter(self._headerlen, maxlinelen,
+                                    self._continuation_ws, splitchars)
+        lastcs = None
+        hasspace = lastspace = None
+        for string, charset in self._chunks:
+            if hasspace is not None:
+                hasspace = string and self._nonctext(string[0])
+                import sys
+                if lastcs not in (None, 'us-ascii'):
+                    if not hasspace or charset not in (None, 'us-ascii'):
+                        formatter.add_transition()
+                elif charset not in (None, 'us-ascii') and not lastspace:
+                    formatter.add_transition()
+            lastspace = string and self._nonctext(string[-1])
+            lastcs = charset
+            hasspace = False
+            lines = string.splitlines()
+            if lines:
+                formatter.feed('', lines[0], charset)
+            else:
+                formatter.feed('', '', charset)
+            for line in lines[1:]:
+                formatter.newline()
+                if charset.header_encoding is not None:
+                    formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
+                                   charset)
+                else:
+                    sline = line.lstrip()
+                    fws = line[:len(line)-len(sline)]
+                    formatter.feed(fws, sline, charset)
+            if len(lines) > 1:
+                formatter.newline()
+        if self._chunks:
+            formatter.add_transition()
+        value = formatter._str(linesep)
+        if _embeded_header.search(value):
+            raise HeaderParseError("header value appears to contain "
+                "an embedded header: {!r}".format(value))
+        return value
+
+    def _normalize(self):
+        # Step 1: Normalize the chunks so that all runs of identical charsets
+        # get collapsed into a single unicode string.
+        chunks = []
+        last_charset = None
+        last_chunk = []
+        for string, charset in self._chunks:
+            if charset == last_charset:
+                last_chunk.append(string)
+            else:
+                if last_charset is not None:
+                    chunks.append((SPACE.join(last_chunk), last_charset))
+                last_chunk = [string]
+                last_charset = charset
+        if last_chunk:
+            chunks.append((SPACE.join(last_chunk), last_charset))
+        self._chunks = chunks
+
+
+
+class _ValueFormatter:
+    def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
+        self._maxlen = maxlen
+        self._continuation_ws = continuation_ws
+        self._continuation_ws_len = len(continuation_ws)
+        self._splitchars = splitchars
+        self._lines = []
+        self._current_line = _Accumulator(headerlen)
+
+    def _str(self, linesep):
+        self.newline()
+        return linesep.join(self._lines)
+
+    def __str__(self):
+        return self._str(NL)
+
+    def newline(self):
+        end_of_line = self._current_line.pop()
+        if end_of_line != (' ', ''):
+            self._current_line.push(*end_of_line)
+        if len(self._current_line) > 0:
+            if self._current_line.is_onlyws():
+                self._lines[-1] += str(self._current_line)
+            else:
+                self._lines.append(str(self._current_line))
+        self._current_line.reset()
+
+    def add_transition(self):
+        self._current_line.push(' ', '')
+
+    def feed(self, fws, string, charset):
+        # If the charset has no header encoding (i.e. it is an ASCII encoding)
+        # then we must split the header at the "highest level syntactic break"
+        # possible. Note that we don't have a lot of smarts about field
+        # syntax; we just try to break on semi-colons, then commas, then
+        # whitespace.  Eventually, this should be pluggable.
+        if charset.header_encoding is None:
+            self._ascii_split(fws, string, self._splitchars)
+            return
+        # Otherwise, we're doing either a Base64 or a quoted-printable
+        # encoding which means we don't need to split the line on syntactic
+        # breaks.  We can basically just find enough characters to fit on the
+        # current line, minus the RFC 2047 chrome.  What makes this trickier
+        # though is that we have to split at octet boundaries, not character
+        # boundaries but it's only safe to split at character boundaries so at
+        # best we can only get close.
+        encoded_lines = charset.header_encode_lines(string, self._maxlengths())
+        # The first element extends the current line, but if it's None then
+        # nothing more fit on the current line so start a new line.
+        try:
+            first_line = encoded_lines.pop(0)
+        except IndexError:
+            # There are no encoded lines, so we're done.
+            return
+        if first_line is not None:
+            self._append_chunk(fws, first_line)
+        try:
+            last_line = encoded_lines.pop()
+        except IndexError:
+            # There was only one line.
+            return
+        self.newline()
+        self._current_line.push(self._continuation_ws, last_line)
+        # Everything else are full lines in themselves.
+        for line in encoded_lines:
+            self._lines.append(self._continuation_ws + line)
+
+    def _maxlengths(self):
+        # The first line's length.
+        yield self._maxlen - len(self._current_line)
+        while True:
+            yield self._maxlen - self._continuation_ws_len
+
+    def _ascii_split(self, fws, string, splitchars):
+        # The RFC 2822 header folding algorithm is simple in principle but
+        # complex in practice.  Lines may be folded any place where "folding
+        # white space" appears by inserting a linesep character in front of the
+        # FWS.  The complication is that not all spaces or tabs qualify as FWS,
+        # and we are also supposed to prefer to break at "higher level
+        # syntactic breaks".  We can't do either of these without intimate
+        # knowledge of the structure of structured headers, which we don't have
+        # here.  So the best we can do here is prefer to break at the specified
+        # splitchars, and hope that we don't choose any spaces or tabs that
+        # aren't legal FWS.  (This is at least better than the old algorithm,
+        # where we would sometimes *introduce* FWS after a splitchar, or the
+        # algorithm before that, where we would turn all white space runs into
+        # single spaces or tabs.)
+        parts = re.split("(["+FWS+"]+)", fws+string)
+        if parts[0]:
+            parts[:0] = ['']
+        else:
+            parts.pop(0)
+        for fws, part in zip(*[iter(parts)]*2):
+            self._append_chunk(fws, part)
+
+    def _append_chunk(self, fws, string):
+        self._current_line.push(fws, string)
+        if len(self._current_line) > self._maxlen:
+            # Find the best split point, working backward from the end.
+            # There might be none, on a long first line.
+            for ch in self._splitchars:
+                for i in range(self._current_line.part_count()-1, 0, -1):
+                    if ch.isspace():
+                        fws = self._current_line[i][0]
+                        if fws and fws[0]==ch:
+                            break
+                    prevpart = self._current_line[i-1][1]
+                    if prevpart and prevpart[-1]==ch:
+                        break
+                else:
+                    continue
+                break
+            else:
+                fws, part = self._current_line.pop()
+                if self._current_line._initial_size > 0:
+                    # There will be a header, so leave it on a line by itself.
+                    self.newline()
+                    if not fws:
+                        # We don't use continuation_ws here because the whitespace
+                        # after a header should always be a space.
+                        fws = ' '
+                self._current_line.push(fws, part)
+                return
+            remainder = self._current_line.pop_from(i)
+            self._lines.append(str(self._current_line))
+            self._current_line.reset(remainder)
+
+
+class _Accumulator(list):
+
+    def __init__(self, initial_size=0):
+        self._initial_size = initial_size
+        super().__init__()
+
+    def push(self, fws, string):
+        self.append((fws, string))
+
+    def pop_from(self, i=0):
+        popped = self[i:]
+        self[i:] = []
+        return popped
+
+    def pop(self):
+        if self.part_count()==0:
+            return ('', '')
+        return super().pop()
+
+    def __len__(self):
+        return sum((len(fws)+len(part) for fws, part in self),
+                   self._initial_size)
+
+    def __str__(self):
+        return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
+                                for fws, part in self))
+
+    def reset(self, startval=None):
+        if startval is None:
+            startval = []
+        self[:] = startval
+        self._initial_size = 0
+
+    def is_onlyws(self):
+        return self._initial_size==0 and (not self or str(self).isspace())
+
+    def part_count(self):
+        return super().__len__()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/iterators.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/iterators.py
new file mode 100644
index 00000000..e18f7eb3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/iterators.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Various types of useful iterators and generators."""
+
+__all__ = [
+    'body_line_iterator',
+    'typed_subpart_iterator',
+    'walk',
+    # Do not include _structure() since it's part of the debugging API.
+    ]
+
+import sys
+from io import StringIO
+
+
+
+# This function will become a method of the Message class
+def walk(self):
+    """Walk over the message tree, yielding each subpart.
+
+    The walk is performed in depth-first order.  This method is a
+    generator.
+    """
+    yield self
+    if self.is_multipart():
+        for subpart in self.get_payload():
+            for subsubpart in subpart.visit():
+                yield subsubpart
+
+
+
+# These two functions are imported into the Iterators.py interface module.
+def body_line_iterator(msg, decode=False):
+    """Iterate over the parts, returning string payloads line-by-line.
+
+    Optional decode (default False) is passed through to .get_payload().
+    """
+    for subpart in msg.visit():
+        payload = subpart.get_payload(decode=decode)
+        if isinstance(payload, str):
+            for line in StringIO(payload):
+                yield line
+
+
+def typed_subpart_iterator(msg, maintype='text', subtype=None):
+    """Iterate over the subparts with a given MIME type.
+
+    Use `maintype' as the main MIME type to match against; this defaults to
+    "text".  Optional `subtype' is the MIME subtype to match against; if
+    omitted, only the main type is matched.
+    """
+    for subpart in msg.visit():
+        if subpart.get_content_maintype() == maintype:
+            if subtype is None or subpart.get_content_subtype() == subtype:
+                yield subpart
+
+
+
+def _structure(msg, fp=None, level=0, include_default=False):
+    """A handy debugging aid"""
+    if fp is None:
+        fp = sys.stdout
+    tab = ' ' * (level * 4)
+    print(tab + msg.get_content_type(), end='', file=fp)
+    if include_default:
+        print(' [%s]' % msg.get_default_type(), file=fp)
+    else:
+        print(file=fp)
+    if msg.is_multipart():
+        for subpart in msg.get_payload():
+            _structure(subpart, fp, level+1, include_default)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/message.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/message.py
new file mode 100644
index 00000000..5020a032
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/message.py
@@ -0,0 +1,879 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Basic message object for the email package object model."""
+
+__all__ = ['Message']
+
+import re
+import uu
+import base64
+import binascii
+from io import BytesIO, StringIO
+
+# Intrapackage imports
+from email import utils
+from email import errors
+from email._policybase import compat32
+from email import charset as _charset
+from email._encoded_words import decode_b
+Charset = _charset.Charset
+
+SEMISPACE = '; '
+
+# Regular expression that matches `special' characters in parameters, the
+# existence of which force quoting of the parameter value.
+tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
+
+
+def _splitparam(param):
+    # Split header parameters.  BAW: this may be too simple.  It isn't
+    # strictly RFC 2045 (section 5.1) compliant, but it catches most headers
+    # found in the wild.  We may eventually need a full fledged parser.
+    # RDM: we might have a Header here; for now just stringify it.
+    a, sep, b = str(param).partition(';')
+    if not sep:
+        return a.strip(), None
+    return a.strip(), b.strip()
+
+def _formatparam(param, value=None, quote=True):
+    """Convenience function to format and return a key=value pair.
+
+    This will quote the value if needed or if quote is true.  If value is a
+    three tuple (charset, language, value), it will be encoded according
+    to RFC2231 rules.  If it contains non-ascii characters it will likewise
+    be encoded according to RFC2231 rules, using the utf-8 charset and
+    a null language.
+    """
+    if value is not None and len(value) > 0:
+        # A tuple is used for RFC 2231 encoded parameter values where items
+        # are (charset, language, value).  charset is a string, not a Charset
+        # instance.  RFC 2231 encoded values are never quoted, per RFC.
+        if isinstance(value, tuple):
+            # Encode as per RFC 2231
+            param += '*'
+            value = utils.encode_rfc2231(value[2], value[0], value[1])
+            return '%s=%s' % (param, value)
+        else:
+            try:
+                value.encode('ascii')
+            except UnicodeEncodeError:
+                param += '*'
+                value = utils.encode_rfc2231(value, 'utf-8', '')
+                return '%s=%s' % (param, value)
+        # BAW: Please check this.  I think that if quote is set it should
+        # force quoting even if not necessary.
+        if quote or tspecials.search(value):
+            return '%s="%s"' % (param, utils.quote(value))
+        else:
+            return '%s=%s' % (param, value)
+    else:
+        return param
+
+def _parseparam(s):
+    # RDM This might be a Header, so for now stringify it.
+    s = ';' + str(s)
+    plist = []
+    while s[:1] == ';':
+        s = s[1:]
+        end = s.find(';')
+        while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+            end = s.find(';', end + 1)
+        if end < 0:
+            end = len(s)
+        f = s[:end]
+        if '=' in f:
+            i = f.index('=')
+            f = f[:i].strip().lower() + '=' + f[i+1:].strip()
+        plist.append(f.strip())
+        s = s[end:]
+    return plist
+
+
+def _unquotevalue(value):
+    # This is different than utils.collapse_rfc2231_value() because it doesn't
+    # try to convert the value to a unicode.  Message.get_param() and
+    # Message.get_params() are both currently defined to return the tuple in
+    # the face of RFC 2231 parameters.
+    if isinstance(value, tuple):
+        return value[0], value[1], utils.unquote(value[2])
+    else:
+        return utils.unquote(value)
+
+
+
+class Message:
+    """Basic message object.
+
+    A message object is defined as something that has a bunch of RFC 2822
+    headers and a payload.  It may optionally have an envelope header
+    (a.k.a. Unix-From or From_ header).  If the message is a container (i.e. a
+    multipart or a message/rfc822), then the payload is a list of Message
+    objects, otherwise it is a string.
+
+    Message objects implement part of the `mapping' interface, which assumes
+    there is exactly one occurrence of the header per message.  Some headers
+    do in fact appear multiple times (e.g. Received) and for those headers,
+    you must use the explicit API to set or get all the headers.  Not all of
+    the mapping methods are implemented.
+    """
+    def __init__(self, policy=compat32):
+        self.policy = policy
+        self._headers = []
+        self._unixfrom = None
+        self._payload = None
+        self._charset = None
+        # Defaults for multipart messages
+        self.preamble = self.epilogue = None
+        self.defects = []
+        # Default content type
+        self._default_type = 'text/plain'
+
+    def __str__(self):
+        """Return the entire formatted message as a string.
+        This includes the headers, body, and envelope header.
+        """
+        return self.as_string()
+
+    def as_string(self, unixfrom=False, maxheaderlen=0):
+        """Return the entire formatted message as a string.
+        Optional `unixfrom' when True, means include the Unix From_ envelope
+        header.
+
+        This is a convenience method and may not generate the message exactly
+        as you intend.  For more flexibility, use the flatten() method of a
+        Generator instance.
+        """
+        from email.generator import Generator
+        fp = StringIO()
+        g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen)
+        g.flatten(self, unixfrom=unixfrom)
+        return fp.getvalue()
+
+    def is_multipart(self):
+        """Return True if the message consists of multiple parts."""
+        return isinstance(self._payload, list)
+
+    #
+    # Unix From_ line
+    #
+    def set_unixfrom(self, unixfrom):
+        self._unixfrom = unixfrom
+
+    def get_unixfrom(self):
+        return self._unixfrom
+
+    #
+    # Payload manipulation.
+    #
+    def attach(self, payload):
+        """Add the given payload to the current payload.
+
+        The current payload will always be a list of objects after this method
+        is called.  If you want to set the payload to a scalar object, use
+        set_payload() instead.
+        """
+        if self._payload is None:
+            self._payload = [payload]
+        else:
+            self._payload.append(payload)
+
+    def get_payload(self, i=None, decode=False):
+        """Return a reference to the payload.
+
+        The payload will either be a list object or a string.  If you mutate
+        the list object, you modify the message's payload in place.  Optional
+        i returns that index into the payload.
+
+        Optional decode is a flag indicating whether the payload should be
+        decoded or not, according to the Content-Transfer-Encoding header
+        (default is False).
+
+        When True and the message is not a multipart, the payload will be
+        decoded if this header's value is `quoted-printable' or `base64'.  If
+        some other encoding is used, or the header is missing, or if the
+        payload has bogus data (i.e. bogus base64 or uuencoded data), the
+        payload is returned as-is.
+
+        If the message is a multipart and the decode flag is True, then None
+        is returned.
+        """
+        # Here is the logic table for this code, based on the email5.0.0 code:
+        #   i     decode  is_multipart  result
+        # ------  ------  ------------  ------------------------------
+        #  None   True    True          None
+        #   i     True    True          None
+        #  None   False   True          _payload (a list)
+        #   i     False   True          _payload element i (a Message)
+        #   i     False   False         error (not a list)
+        #   i     True    False         error (not a list)
+        #  None   False   False         _payload
+        #  None   True    False         _payload decoded (bytes)
+        # Note that Barry planned to factor out the 'decode' case, but that
+        # isn't so easy now that we handle the 8 bit data, which needs to be
+        # converted in both the decode and non-decode path.
+        if self.is_multipart():
+            if decode:
+                return None
+            if i is None:
+                return self._payload
+            else:
+                return self._payload[i]
+        # For backward compatibility, Use isinstance and this error message
+        # instead of the more logical is_multipart test.
+        if i is not None and not isinstance(self._payload, list):
+            raise TypeError('Expected list, got %s' % type(self._payload))
+        payload = self._payload
+        # cte might be a Header, so for now stringify it.
+        cte = str(self.get('content-transfer-encoding', '')).lower()
+        # payload may be bytes here.
+        if isinstance(payload, str):
+            if utils._has_surrogates(payload):
+                bpayload = payload.encode('ascii', 'surrogateescape')
+                if not decode:
+                    try:
+                        payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
+                    except LookupError:
+                        payload = bpayload.decode('ascii', 'replace')
+            elif decode:
+                try:
+                    bpayload = payload.encode('ascii')
+                except UnicodeError:
+                    # This won't happen for RFC compliant messages (messages
+                    # containing only ASCII codepoints in the unicode input).
+                    # If it does happen, turn the string into bytes in a way
+                    # guaranteed not to fail.
+                    bpayload = payload.encode('raw-unicode-escape')
+        if not decode:
+            return payload
+        if cte == 'quoted-printable':
+            return utils._qdecode(bpayload)
+        elif cte == 'base64':
+            # XXX: this is a bit of a hack; decode_b should probably be factored
+            # out somewhere, but I haven't figured out where yet.
+            value, defects = decode_b(b''.join(bpayload.splitlines()))
+            for defect in defects:
+                self.policy.handle_defect(self, defect)
+            return value
+        elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+            in_file = BytesIO(bpayload)
+            out_file = BytesIO()
+            try:
+                uu.decode(in_file, out_file, quiet=True)
+                return out_file.getvalue()
+            except uu.Error:
+                # Some decoding problem
+                return bpayload
+        if isinstance(payload, str):
+            return bpayload
+        return payload
+
+    def set_payload(self, payload, charset=None):
+        """Set the payload to the given value.
+
+        Optional charset sets the message's default character set.  See
+        set_charset() for details.
+        """
+        if isinstance(payload, bytes):
+            payload = payload.decode('ascii', 'surrogateescape')
+        self._payload = payload
+        if charset is not None:
+            self.set_charset(charset)
+
+    def set_charset(self, charset):
+        """Set the charset of the payload to a given character set.
+
+        charset can be a Charset instance, a string naming a character set, or
+        None.  If it is a string it will be converted to a Charset instance.
+        If charset is None, the charset parameter will be removed from the
+        Content-Type field.  Anything else will generate a TypeError.
+
+        The message will be assumed to be of type text/* encoded with
+        charset.input_charset.  It will be converted to charset.output_charset
+        and encoded properly, if needed, when generating the plain text
+        representation of the message.  MIME headers (MIME-Version,
+        Content-Type, Content-Transfer-Encoding) will be added as needed.
+        """
+        if charset is None:
+            self.del_param('charset')
+            self._charset = None
+            return
+        if not isinstance(charset, Charset):
+            charset = Charset(charset)
+        self._charset = charset
+        if 'MIME-Version' not in self:
+            self.add_header('MIME-Version', '1.0')
+        if 'Content-Type' not in self:
+            self.add_header('Content-Type', 'text/plain',
+                            charset=charset.get_output_charset())
+        else:
+            self.set_param('charset', charset.get_output_charset())
+        if charset != charset.get_output_charset():
+            self._payload = charset.body_encode(self._payload)
+        if 'Content-Transfer-Encoding' not in self:
+            cte = charset.get_body_encoding()
+            try:
+                cte(self)
+            except TypeError:
+                self._payload = charset.body_encode(self._payload)
+                self.add_header('Content-Transfer-Encoding', cte)
+
+    def get_charset(self):
+        """Return the Charset instance associated with the message's payload.
+        """
+        return self._charset
+
+    #
+    # MAPPING INTERFACE (partial)
+    #
+    def __len__(self):
+        """Return the total number of headers, including duplicates."""
+        return len(self._headers)
+
+    def __getitem__(self, name):
+        """Get a header value.
+
+        Return None if the header is missing instead of raising an exception.
+
+        Note that if the header appeared multiple times, exactly which
+        occurrence gets returned is undefined.  Use get_all() to get all
+        the values matching a header field name.
+        """
+        return self.get(name)
+
+    def __setitem__(self, name, val):
+        """Set the value of a header.
+
+        Note: this does not overwrite an existing header with the same field
+        name.  Use __delitem__() first to delete any existing headers.
+        """
+        max_count = self.policy.header_max_count(name)
+        if max_count:
+            lname = name.lower()
+            found = 0
+            for k, v in self._headers:
+                if k.lower() == lname:
+                    found += 1
+                    if found >= max_count:
+                        raise ValueError("There may be at most {} {} headers "
+                                         "in a message".format(max_count, name))
+        self._headers.append(self.policy.header_store_parse(name, val))
+
+    def __delitem__(self, name):
+        """Delete all occurrences of a header, if present.
+
+        Does not raise an exception if the header is missing.
+        """
+        name = name.lower()
+        newheaders = []
+        for k, v in self._headers:
+            if k.lower() != name:
+                newheaders.append((k, v))
+        self._headers = newheaders
+
+    def __contains__(self, name):
+        return name.lower() in [k.lower() for k, v in self._headers]
+
+    def __iter__(self):
+        for field, value in self._headers:
+            yield field
+
+    def keys(self):
+        """Return a list of all the message's header field names.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [k for k, v in self._headers]
+
+    def values(self):
+        """Return a list of all the message's header values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [self.policy.header_fetch_parse(k, v)
+                for k, v in self._headers]
+
+    def items(self):
+        """Get all the message's header fields and values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [(k, self.policy.header_fetch_parse(k, v))
+                for k, v in self._headers]
+
+    def get(self, name, failobj=None):
+        """Get a header value.
+
+        Like __getitem__() but return failobj instead of None when the field
+        is missing.
+        """
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                return self.policy.header_fetch_parse(k, v)
+        return failobj
+
+    #
+    # "Internal" methods (public API, but only intended for use by a parser
+    # or generator, not normal application code.
+    #
+
+    def set_raw(self, name, value):
+        """Store name and value in the model without modification.
+
+        This is an "internal" API, intended only for use by a parser.
+        """
+        self._headers.append((name, value))
+
+    def raw_items(self):
+        """Return the (name, value) header pairs without modification.
+
+        This is an "internal" API, intended only for use by a generator.
+        """
+        return iter(self._headers.copy())
+
+    #
+    # Additional useful stuff
+    #
+
+    def get_all(self, name, failobj=None):
+        """Return a list of all the values for the named field.
+
+        These will be sorted in the order they appeared in the original
+        message, and may contain duplicates.  Any fields deleted and
+        re-inserted are always appended to the header list.
+
+        If no such fields exist, failobj is returned (defaults to None).
+        """
+        values = []
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                values.append(self.policy.header_fetch_parse(k, v))
+        if not values:
+            return failobj
+        return values
+
+    def add_header(self, _name, _value, **_params):
+        """Extended header setting.
+
+        name is the header field to add.  keyword arguments can be used to set
+        additional parameters for the header field, with underscores converted
+        to dashes.  Normally the parameter will be added as key="value" unless
+        value is None, in which case only the key will be added.  If a
+        parameter value contains non-ASCII characters it can be specified as a
+        three-tuple of (charset, language, value), in which case it will be
+        encoded according to RFC2231 rules.  Otherwise it will be encoded using
+        the utf-8 charset and a language of ''.
+
+        Examples:
+
+        msg.add_header('content-disposition', 'attachment', filename='bud.gif')
+        msg.add_header('content-disposition', 'attachment',
+                       filename=('utf-8', '', Fußballer.ppt'))
+        msg.add_header('content-disposition', 'attachment',
+                       filename='Fußballer.ppt'))
+        """
+        parts = []
+        for k, v in _params.items():
+            if v is None:
+                parts.append(k.replace('_', '-'))
+            else:
+                parts.append(_formatparam(k.replace('_', '-'), v))
+        if _value is not None:
+            parts.insert(0, _value)
+        self[_name] = SEMISPACE.join(parts)
+
+    def replace_header(self, _name, _value):
+        """Replace a header.
+
+        Replace the first matching header found in the message, retaining
+        header order and case.  If no matching header was found, a KeyError is
+        raised.
+        """
+        _name = _name.lower()
+        for i, (k, v) in zip(range(len(self._headers)), self._headers):
+            if k.lower() == _name:
+                self._headers[i] = self.policy.header_store_parse(k, _value)
+                break
+        else:
+            raise KeyError(_name)
+
+    #
+    # Use these three methods instead of the three above.
+    #
+
+    def get_content_type(self):
+        """Return the message's content type.
+
+        The returned string is coerced to lower case of the form
+        `maintype/subtype'.  If there was no Content-Type header in the
+        message, the default type as given by get_default_type() will be
+        returned.  Since according to RFC 2045, messages always have a default
+        type this will always return a value.
+
+        RFC 2045 defines a message's default type to be text/plain unless it
+        appears inside a multipart/digest container, in which case it would be
+        message/rfc822.
+        """
+        missing = object()
+        value = self.get('content-type', missing)
+        if value is missing:
+            # This should have no parameters
+            return self.get_default_type()
+        ctype = _splitparam(value)[0].lower()
+        # RFC 2045, section 5.2 says if its invalid, use text/plain
+        if ctype.count('/') != 1:
+            return 'text/plain'
+        return ctype
+
+    def get_content_maintype(self):
+        """Return the message's main content type.
+
+        This is the `maintype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[0]
+
+    def get_content_subtype(self):
+        """Returns the message's sub-content type.
+
+        This is the `subtype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[1]
+
+    def get_default_type(self):
+        """Return the `default' content type.
+
+        Most messages have a default content type of text/plain, except for
+        messages that are subparts of multipart/digest containers.  Such
+        subparts have a default content type of message/rfc822.
+        """
+        return self._default_type
+
+    def set_default_type(self, ctype):
+        """Set the `default' content type.
+
+        ctype should be either "text/plain" or "message/rfc822", although this
+        is not enforced.  The default content type is not stored in the
+        Content-Type header.
+        """
+        self._default_type = ctype
+
+    def _get_params_preserve(self, failobj, header):
+        # Like get_params() but preserves the quoting of values.  BAW:
+        # should this be part of the public interface?
+        missing = object()
+        value = self.get(header, missing)
+        if value is missing:
+            return failobj
+        params = []
+        for p in _parseparam(value):
+            try:
+                name, val = p.split('=', 1)
+                name = name.strip()
+                val = val.strip()
+            except ValueError:
+                # Must have been a bare attribute
+                name = p.strip()
+                val = ''
+            params.append((name, val))
+        params = utils.decode_params(params)
+        return params
+
+    def get_params(self, failobj=None, header='content-type', unquote=True):
+        """Return the message's Content-Type parameters, as a list.
+
+        The elements of the returned list are 2-tuples of key/value pairs, as
+        split on the `=' sign.  The left hand side of the `=' is the key,
+        while the right hand side is the value.  If there is no `=' sign in
+        the parameter the value is the empty string.  The value is as
+        described in the get_param() method.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header.  Optional header is the header to search instead of
+        Content-Type.  If unquote is True, the value is unquoted.
+        """
+        missing = object()
+        params = self._get_params_preserve(missing, header)
+        if params is missing:
+            return failobj
+        if unquote:
+            return [(k, _unquotevalue(v)) for k, v in params]
+        else:
+            return params
+
+    def get_param(self, param, failobj=None, header='content-type',
+                  unquote=True):
+        """Return the parameter value if found in the Content-Type header.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header, or the Content-Type header has no such parameter.  Optional
+        header is the header to search instead of Content-Type.
+
+        Parameter keys are always compared case insensitively.  The return
+        value can either be a string, or a 3-tuple if the parameter was RFC
+        2231 encoded.  When it's a 3-tuple, the elements of the value are of
+        the form (CHARSET, LANGUAGE, VALUE).  Note that both CHARSET and
+        LANGUAGE can be None, in which case you should consider VALUE to be
+        encoded in the us-ascii charset.  You can usually ignore LANGUAGE.
+        The parameter value (either the returned string, or the VALUE item in
+        the 3-tuple) is always unquoted, unless unquote is set to False.
+
+        If your application doesn't care whether the parameter was RFC 2231
+        encoded, it can turn the return value into a string as follows:
+
+            param = msg.get_param('foo')
+            param = email.utils.collapse_rfc2231_value(rawparam)
+
+        """
+        if header not in self:
+            return failobj
+        for k, v in self._get_params_preserve(failobj, header):
+            if k.lower() == param.lower():
+                if unquote:
+                    return _unquotevalue(v)
+                else:
+                    return v
+        return failobj
+
+    def set_param(self, param, value, header='Content-Type', requote=True,
+                  charset=None, language=''):
+        """Set a parameter in the Content-Type header.
+
+        If the parameter already exists in the header, its value will be
+        replaced with the new value.
+
+        If header is Content-Type and has not yet been defined for this
+        message, it will be set to "text/plain" and the new parameter and
+        value will be appended as per RFC 2045.
+
+        An alternate header can specified in the header argument, and all
+        parameters will be quoted as necessary unless requote is False.
+
+        If charset is specified, the parameter will be encoded according to RFC
+        2231.  Optional language specifies the RFC 2231 language, defaulting
+        to the empty string.  Both charset and language should be strings.
+        """
+        if not isinstance(value, tuple) and charset:
+            value = (charset, language, value)
+
+        if header not in self and header.lower() == 'content-type':
+            ctype = 'text/plain'
+        else:
+            ctype = self.get(header)
+        if not self.get_param(param, header=header):
+            if not ctype:
+                ctype = _formatparam(param, value, requote)
+            else:
+                ctype = SEMISPACE.join(
+                    [ctype, _formatparam(param, value, requote)])
+        else:
+            ctype = ''
+            for old_param, old_value in self.get_params(header=header,
+                                                        unquote=requote):
+                append_param = ''
+                if old_param.lower() == param.lower():
+                    append_param = _formatparam(param, value, requote)
+                else:
+                    append_param = _formatparam(old_param, old_value, requote)
+                if not ctype:
+                    ctype = append_param
+                else:
+                    ctype = SEMISPACE.join([ctype, append_param])
+        if ctype != self.get(header):
+            del self[header]
+            self[header] = ctype
+
+    def del_param(self, param, header='content-type', requote=True):
+        """Remove the given parameter completely from the Content-Type header.
+
+        The header will be re-written in place without the parameter or its
+        value. All values will be quoted as necessary unless requote is
+        False.  Optional header specifies an alternative to the Content-Type
+        header.
+        """
+        if header not in self:
+            return
+        new_ctype = ''
+        for p, v in self.get_params(header=header, unquote=requote):
+            if p.lower() != param.lower():
+                if not new_ctype:
+                    new_ctype = _formatparam(p, v, requote)
+                else:
+                    new_ctype = SEMISPACE.join([new_ctype,
+                                                _formatparam(p, v, requote)])
+        if new_ctype != self.get(header):
+            del self[header]
+            self[header] = new_ctype
+
+    def set_type(self, type, header='Content-Type', requote=True):
+        """Set the main type and subtype for the Content-Type header.
+
+        type must be a string in the form "maintype/subtype", otherwise a
+        ValueError is raised.
+
+        This method replaces the Content-Type header, keeping all the
+        parameters in place.  If requote is False, this leaves the existing
+        header's quoting as is.  Otherwise, the parameters will be quoted (the
+        default).
+
+        An alternative header can be specified in the header argument.  When
+        the Content-Type header is set, we'll always also add a MIME-Version
+        header.
+        """
+        # BAW: should we be strict?
+        if not type.count('/') == 1:
+            raise ValueError
+        # Set the Content-Type, you get a MIME-Version
+        if header.lower() == 'content-type':
+            del self['mime-version']
+            self['MIME-Version'] = '1.0'
+        if header not in self:
+            self[header] = type
+            return
+        params = self.get_params(header=header, unquote=requote)
+        del self[header]
+        self[header] = type
+        # Skip the first param; it's the old type.
+        for p, v in params[1:]:
+            self.set_param(p, v, header, requote)
+
+    def get_filename(self, failobj=None):
+        """Return the filename associated with the payload if present.
+
+        The filename is extracted from the Content-Disposition header's
+        `filename' parameter, and it is unquoted.  If that header is missing
+        the `filename' parameter, this method falls back to looking for the
+        `name' parameter.
+        """
+        missing = object()
+        filename = self.get_param('filename', missing, 'content-disposition')
+        if filename is missing:
+            filename = self.get_param('name', missing, 'content-type')
+        if filename is missing:
+            return failobj
+        return utils.collapse_rfc2231_value(filename).strip()
+
+    def get_boundary(self, failobj=None):
+        """Return the boundary associated with the payload if present.
+
+        The boundary is extracted from the Content-Type header's `boundary'
+        parameter, and it is unquoted.
+        """
+        missing = object()
+        boundary = self.get_param('boundary', missing)
+        if boundary is missing:
+            return failobj
+        # RFC 2046 says that boundaries may begin but not end in w/s
+        return utils.collapse_rfc2231_value(boundary).rstrip()
+
+    def set_boundary(self, boundary):
+        """Set the boundary parameter in Content-Type to 'boundary'.
+
+        This is subtly different than deleting the Content-Type header and
+        adding a new one with a new boundary parameter via add_header().  The
+        main difference is that using the set_boundary() method preserves the
+        order of the Content-Type header in the original message.
+
+        HeaderParseError is raised if the message has no Content-Type header.
+        """
+        missing = object()
+        params = self._get_params_preserve(missing, 'content-type')
+        if params is missing:
+            # There was no Content-Type header, and we don't know what type
+            # to set it to, so raise an exception.
+            raise errors.HeaderParseError('No Content-Type header found')
+        newparams = []
+        foundp = False
+        for pk, pv in params:
+            if pk.lower() == 'boundary':
+                newparams.append(('boundary', '"%s"' % boundary))
+                foundp = True
+            else:
+                newparams.append((pk, pv))
+        if not foundp:
+            # The original Content-Type header had no boundary attribute.
+            # Tack one on the end.  BAW: should we raise an exception
+            # instead???
+            newparams.append(('boundary', '"%s"' % boundary))
+        # Replace the existing Content-Type header with the new value
+        newheaders = []
+        for h, v in self._headers:
+            if h.lower() == 'content-type':
+                parts = []
+                for k, v in newparams:
+                    if v == '':
+                        parts.append(k)
+                    else:
+                        parts.append('%s=%s' % (k, v))
+                val = SEMISPACE.join(parts)
+                newheaders.append(self.policy.header_store_parse(h, val))
+
+            else:
+                newheaders.append((h, v))
+        self._headers = newheaders
+
+    def get_content_charset(self, failobj=None):
+        """Return the charset parameter of the Content-Type header.
+
+        The returned string is always coerced to lower case.  If there is no
+        Content-Type header, or if that header has no charset parameter,
+        failobj is returned.
+        """
+        missing = object()
+        charset = self.get_param('charset', missing)
+        if charset is missing:
+            return failobj
+        if isinstance(charset, tuple):
+            # RFC 2231 encoded, so decode it, and it better end up as ascii.
+            pcharset = charset[0] or 'us-ascii'
+            try:
+                # LookupError will be raised if the charset isn't known to
+                # Python.  UnicodeError will be raised if the encoded text
+                # contains a character not in the charset.
+                as_bytes = charset[2].encode('raw-unicode-escape')
+                charset = str(as_bytes, pcharset)
+            except (LookupError, UnicodeError):
+                charset = charset[2]
+        # charset characters must be in us-ascii range
+        try:
+            charset.encode('us-ascii')
+        except UnicodeError:
+            return failobj
+        # RFC 2046, $4.1.2 says charsets are not case sensitive
+        return charset.lower()
+
+    def get_charsets(self, failobj=None):
+        """Return a list containing the charset(s) used in this message.
+
+        The returned list of items describes the Content-Type headers'
+        charset parameter for this message and all the subparts in its
+        payload.
+
+        Each item will either be a string (the value of the charset parameter
+        in the Content-Type header of that part) or the value of the
+        'failobj' parameter (defaults to None), if the part does not have a
+        main MIME type of "text", or the charset is not defined.
+
+        The list will contain one string for each part of the message, plus
+        one for the container message (i.e. self), so that a non-multipart
+        message will still return a list of length 1.
+        """
+        return [part.get_content_charset(failobj) for part in self.walk()]
+
+    # I.e. def walk(self): ...
+    from email.iterators import walk
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/parser.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/parser.py
new file mode 100644
index 00000000..34ee58ac
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/parser.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
+# Contact: email-sig@python.org
+
+"""A parser of RFC 2822 and MIME email messages."""
+
+__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser']
+
+import warnings
+from io import StringIO, TextIOWrapper
+
+from email.feedparser import FeedParser, BytesFeedParser
+from email.message import Message
+from email._policybase import compat32
+
+
+
+class Parser:
+    def __init__(self, _class=Message, policy=compat32):
+        """Parser of RFC 2822 and MIME email messages.
+
+        Creates an in-memory object tree representing the email message, which
+        can then be manipulated and turned over to a Generator to return the
+        textual representation of the message.
+
+        The string must be formatted as a block of RFC 2822 headers and header
+        continuation lines, optionally preceeded by a `Unix-from' header.  The
+        header block is terminated either by the end of the string or by a
+        blank line.
+
+        _class is the class to instantiate for new message objects when they
+        must be created.  This class must have a constructor that can take
+        zero arguments.  Default is Message.Message.
+
+        The policy keyword specifies a policy object that controls a number of
+        aspects of the parser's operation.  The default policy maintains
+        backward compatibility.
+
+        """
+        self._class = _class
+        self.policy = policy
+
+    def parse(self, fp, headersonly=False):
+        """Create a message structure from the data in a file.
+
+        Reads all the data from the file and returns the root of the message
+        structure.  Optional headersonly is a flag specifying whether to stop
+        parsing after reading the headers or not.  The default is False,
+        meaning it parses the entire contents of the file.
+        """
+        feedparser = FeedParser(self._class, policy=self.policy)
+        if headersonly:
+            feedparser._set_headersonly()
+        while True:
+            data = fp.read(8192)
+            if not data:
+                break
+            feedparser.feed(data)
+        return feedparser.close()
+
+    def parsestr(self, text, headersonly=False):
+        """Create a message structure from a string.
+
+        Returns the root of the message structure.  Optional headersonly is a
+        flag specifying whether to stop parsing after reading the headers or
+        not.  The default is False, meaning it parses the entire contents of
+        the file.
+        """
+        return self.parse(StringIO(text), headersonly=headersonly)
+
+
+
+class HeaderParser(Parser):
+    def parse(self, fp, headersonly=True):
+        return Parser.parse(self, fp, True)
+
+    def parsestr(self, text, headersonly=True):
+        return Parser.parsestr(self, text, True)
+
+
+class BytesParser:
+
+    def __init__(self, *args, **kw):
+        """Parser of binary RFC 2822 and MIME email messages.
+
+        Creates an in-memory object tree representing the email message, which
+        can then be manipulated and turned over to a Generator to return the
+        textual representation of the message.
+
+        The input must be formatted as a block of RFC 2822 headers and header
+        continuation lines, optionally preceeded by a `Unix-from' header.  The
+        header block is terminated either by the end of the input or by a
+        blank line.
+
+        _class is the class to instantiate for new message objects when they
+        must be created.  This class must have a constructor that can take
+        zero arguments.  Default is Message.Message.
+        """
+        self.parser = Parser(*args, **kw)
+
+    def parse(self, fp, headersonly=False):
+        """Create a message structure from the data in a binary file.
+
+        Reads all the data from the file and returns the root of the message
+        structure.  Optional headersonly is a flag specifying whether to stop
+        parsing after reading the headers or not.  The default is False,
+        meaning it parses the entire contents of the file.
+        """
+        fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
+        with fp:
+            return self.parser.parse(fp, headersonly)
+
+
+    def parsebytes(self, text, headersonly=False):
+        """Create a message structure from a byte string.
+
+        Returns the root of the message structure.  Optional headersonly is a
+        flag specifying whether to stop parsing after reading the headers or
+        not.  The default is False, meaning it parses the entire contents of
+        the file.
+        """
+        text = text.decode('ASCII', errors='surrogateescape')
+        return self.parser.parsestr(text, headersonly)
+
+
+class BytesHeaderParser(BytesParser):
+    def parse(self, fp, headersonly=True):
+        return BytesParser.parse(self, fp, headersonly=True)
+
+    def parsebytes(self, text, headersonly=True):
+        return BytesParser.parsebytes(self, text, headersonly=True)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/quoprimime.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/quoprimime.py
new file mode 100644
index 00000000..bc02281b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/quoprimime.py
@@ -0,0 +1,322 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Quoted-printable content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode US ASCII-like 8-bit data called `quoted-printable'.  It is used to
+safely encode text that is in a character set similar to the 7-bit US ASCII
+character set, but that includes some 8-bit characters that are normally not
+allowed in email bodies or headers.
+
+Quoted-printable is very space-inefficient for encoding binary files; use the
+email.base64mime module for that instead.
+
+This module provides an interface to encode and decode both headers and bodies
+with quoted-printable encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:/From:/Cc: etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character
+conversion necessary for proper internationalized headers; it only
+does dumb encoding and decoding.  To deal with the various line
+wrapping issues, use the email.header module.
+"""
+
+__all__ = [
+    'body_decode',
+    'body_encode',
+    'body_length',
+    'decode',
+    'decodestring',
+    'header_decode',
+    'header_encode',
+    'header_length',
+    'quote',
+    'unquote',
+    ]
+
+import re
+import io
+
+from string import ascii_letters, digits, hexdigits
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# Build a mapping of octets to the expansion of that octet.  Since we're only
+# going to have 256 of these things, this isn't terribly inefficient
+# space-wise.  Remember that headers and bodies have different sets of safe
+# characters.  Initialize both maps with the full expansion, and then override
+# the safe bytes with the more compact form.
+_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256))
+_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy()
+
+# Safe header bytes which need no encoding.
+for c in b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'):
+    _QUOPRI_HEADER_MAP[c] = chr(c)
+# Headers have one other special encoding; spaces become underscores.
+_QUOPRI_HEADER_MAP[ord(' ')] = '_'
+
+# Safe body bytes which need no encoding.
+for c in (b' !"#$%&\'()*+,-./0123456789:;<>'
+          b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
+          b'abcdefghijklmnopqrstuvwxyz{|}~\t'):
+    _QUOPRI_BODY_MAP[c] = chr(c)
+
+
+
+# Helpers
+def header_check(octet):
+    """Return True if the octet should be escaped with header quopri."""
+    return chr(octet) != _QUOPRI_HEADER_MAP[octet]
+
+
+def body_check(octet):
+    """Return True if the octet should be escaped with body quopri."""
+    return chr(octet) != _QUOPRI_BODY_MAP[octet]
+
+
+def header_length(bytearray):
+    """Return a header quoted-printable encoding length.
+
+    Note that this does not include any RFC 2047 chrome added by
+    `header_encode()`.
+
+    :param bytearray: An array of bytes (a.k.a. octets).
+    :return: The length in bytes of the byte array when it is encoded with
+        quoted-printable for headers.
+    """
+    return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray)
+
+
+def body_length(bytearray):
+    """Return a body quoted-printable encoding length.
+
+    :param bytearray: An array of bytes (a.k.a. octets).
+    :return: The length in bytes of the byte array when it is encoded with
+        quoted-printable for bodies.
+    """
+    return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray)
+
+
+def _max_append(L, s, maxlen, extra=''):
+    if not isinstance(s, str):
+        s = chr(s)
+    if not L:
+        L.append(s.lstrip())
+    elif len(L[-1]) + len(s) <= maxlen:
+        L[-1] += extra + s
+    else:
+        L.append(s.lstrip())
+
+
+def unquote(s):
+    """Turn a string in the form =AB to the ASCII character with value 0xab"""
+    return chr(int(s[1:3], 16))
+
+
+def quote(c):
+    return '=%02X' % ord(c)
+
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+    """Encode a single header line with quoted-printable (like) encoding.
+
+    Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
+    used specifically for email header fields to allow charsets with mostly 7
+    bit characters (and some 8 bit) to remain more or less readable in non-RFC
+    2045 aware mail clients.
+
+    charset names the character set to use in the RFC 2046 header.  It
+    defaults to iso-8859-1.
+    """
+    # Return empty headers as an empty string.
+    if not header_bytes:
+        return ''
+    # Iterate over every byte, encoding if necessary.
+    encoded = []
+    for octet in header_bytes:
+        encoded.append(_QUOPRI_HEADER_MAP[octet])
+    # Now add the RFC chrome to each encoded chunk and glue the chunks
+    # together.
+    return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded))
+
+
+class _body_accumulator(io.StringIO):
+
+    def __init__(self, maxlinelen, eol, *args, **kw):
+        super().__init__(*args, **kw)
+        self.eol = eol
+        self.maxlinelen = self.room = maxlinelen
+
+    def write_str(self, s):
+        """Add string s to the accumulated body."""
+        self.write(s)
+        self.room -= len(s)
+
+    def newline(self):
+        """Write eol, then start new line."""
+        self.write_str(self.eol)
+        self.room = self.maxlinelen
+
+    def write_soft_break(self):
+        """Write a soft break, then start a new line."""
+        self.write_str('=')
+        self.newline()
+
+    def write_wrapped(self, s, extra_room=0):
+        """Add a soft line break if needed, then write s."""
+        if self.room < len(s) + extra_room:
+            self.write_soft_break()
+        self.write_str(s)
+
+    def write_char(self, c, is_last_char):
+        if not is_last_char:
+            # Another character follows on this line, so we must leave
+            # extra room, either for it or a soft break, and whitespace
+            # need not be quoted.
+            self.write_wrapped(c, extra_room=1)
+        elif c not in ' \t':
+            # For this and remaining cases, no more characters follow,
+            # so there is no need to reserve extra room (since a hard
+            # break will immediately follow).
+            self.write_wrapped(c)
+        elif self.room >= 3:
+            # It's a whitespace character at end-of-line, and we have room
+            # for the three-character quoted encoding.
+            self.write(quote(c))
+        elif self.room == 2:
+            # There's room for the whitespace character and a soft break.
+            self.write(c)
+            self.write_soft_break()
+        else:
+            # There's room only for a soft break.  The quoted whitespace
+            # will be the only content on the subsequent line.
+            self.write_soft_break()
+            self.write(quote(c))
+
+
+def body_encode(body, maxlinelen=76, eol=NL):
+    """Encode with quoted-printable, wrapping at maxlinelen characters.
+
+    Each line of encoded text will end with eol, which defaults to "\\n".  Set
+    this to "\\r\\n" if you will be using the result of this function directly
+    in an email.
+
+    Each line will be wrapped at, at most, maxlinelen characters before the
+    eol string (maxlinelen defaults to 76 characters, the maximum value
+    permitted by RFC 2045).  Long lines will have the 'soft line break'
+    quoted-printable character "=" appended to them, so the decoded text will
+    be identical to the original text.
+
+    The minimum maxlinelen is 4 to have room for a quoted character ("=XX")
+    followed by a soft line break.  Smaller values will generate a
+    ValueError.
+
+    """
+
+    if maxlinelen < 4:
+        raise ValueError("maxlinelen must be at least 4")
+    if not body:
+        return body
+
+    # The last line may or may not end in eol, but all other lines do.
+    last_has_eol = (body[-1] in '\r\n')
+
+    # This accumulator will make it easier to build the encoded body.
+    encoded_body = _body_accumulator(maxlinelen, eol)
+
+    lines = body.splitlines()
+    last_line_no = len(lines) - 1
+    for line_no, line in enumerate(lines):
+        last_char_index = len(line) - 1
+        for i, c in enumerate(line):
+            if body_check(ord(c)):
+                c = quote(c)
+            encoded_body.write_char(c, i==last_char_index)
+        # Add an eol if input line had eol.  All input lines have eol except
+        # possibly the last one.
+        if line_no < last_line_no or last_has_eol:
+            encoded_body.newline()
+
+    return encoded_body.getvalue()
+
+
+
+# BAW: I'm not sure if the intent was for the signature of this function to be
+# the same as base64MIME.decode() or not...
+def decode(encoded, eol=NL):
+    """Decode a quoted-printable string.
+
+    Lines are separated with eol, which defaults to \\n.
+    """
+    if not encoded:
+        return encoded
+    # BAW: see comment in encode() above.  Again, we're building up the
+    # decoded string with string concatenation, which could be done much more
+    # efficiently.
+    decoded = ''
+
+    for line in encoded.splitlines():
+        line = line.rstrip()
+        if not line:
+            decoded += eol
+            continue
+
+        i = 0
+        n = len(line)
+        while i < n:
+            c = line[i]
+            if c != '=':
+                decoded += c
+                i += 1
+            # Otherwise, c == "=".  Are we at the end of the line?  If so, add
+            # a soft line break.
+            elif i+1 == n:
+                i += 1
+                continue
+            # Decode if in form =AB
+            elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
+                decoded += unquote(line[i:i+3])
+                i += 3
+            # Otherwise, not in form =AB, pass literally
+            else:
+                decoded += c
+                i += 1
+
+            if i == n:
+                decoded += eol
+    # Special case if original string did not end with eol
+    if encoded[-1] not in '\r\n' and decoded.endswith(eol):
+        decoded = decoded[:-1]
+    return decoded
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
+
+
+
+def _unquote_match(match):
+    """Turn a match in the form =AB to the ASCII character with value 0xab"""
+    s = match.group(0)
+    return unquote(s)
+
+
+# Header decoding is done a bit differently
+def header_decode(s):
+    """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
+
+    This function does not parse a full MIME header value encoded with
+    quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
+    the high level email.header class for that functionality.
+    """
+    s = s.replace('_', ' ')
+    return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, flags=re.ASCII)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/utils.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/utils.py
new file mode 100644
index 00000000..93a625c8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/email/utils.py
@@ -0,0 +1,391 @@
+# Copyright (C) 2001-2010 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Miscellaneous utilities."""
+
+__all__ = [
+    'collapse_rfc2231_value',
+    'decode_params',
+    'decode_rfc2231',
+    'encode_rfc2231',
+    'formataddr',
+    'formatdate',
+    'format_datetime',
+    'getaddresses',
+    'make_msgid',
+    'mktime_tz',
+    'parseaddr',
+    'parsedate',
+    'parsedate_tz',
+    'parsedate_to_datetime',
+    'unquote',
+    ]
+
+import os
+import re
+import time
+import base64
+import random
+import socket
+import datetime
+import urllib.parse
+import warnings
+from io import StringIO
+
+from email._parseaddr import quote
+from email._parseaddr import AddressList as _AddressList
+from email._parseaddr import mktime_tz
+
+from email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
+
+from quopri import decodestring as _qdecode
+
+# Intrapackage imports
+from email.encoders import _bencode, _qencode
+from email.charset import Charset
+
+COMMASPACE = ', '
+EMPTYSTRING = ''
+UEMPTYSTRING = ''
+CRLF = '\r\n'
+TICK = "'"
+
+specialsre = re.compile(r'[][\\()<>@,:;".]')
+escapesre = re.compile(r'[\\"]')
+
+# How to figure out if we are processing strings that come from a byte
+# source with undecodable characters.
+_has_surrogates = re.compile(
+    '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search
+
+# How to deal with a string containing bytes before handing it to the
+# application through the 'normal' interface.
+def _sanitize(string):
+    # Turn any escaped bytes into unicode 'unknown' char.
+    original_bytes = string.encode('ascii', 'surrogateescape')
+    return original_bytes.decode('ascii', 'replace')
+
+
+# Helpers
+
+def formataddr(pair, charset='utf-8'):
+    """The inverse of parseaddr(), this takes a 2-tuple of the form
+    (realname, email_address) and returns the string value suitable
+    for an RFC 2822 From, To or Cc header.
+
+    If the first element of pair is false, then the second element is
+    returned unmodified.
+
+    Optional charset if given is the character set that is used to encode
+    realname in case realname is not ASCII safe.  Can be an instance of str or
+    a Charset-like object which has a header_encode method.  Default is
+    'utf-8'.
+    """
+    name, address = pair
+    # The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
+    address.encode('ascii')
+    if name:
+        try:
+            name.encode('ascii')
+        except UnicodeEncodeError:
+            if isinstance(charset, str):
+                charset = Charset(charset)
+            encoded_name = charset.header_encode(name)
+            return "%s <%s>" % (encoded_name, address)
+        else:
+            quotes = ''
+            if specialsre.search(name):
+                quotes = '"'
+            name = escapesre.sub(r'\\\g<0>', name)
+            return '%s%s%s <%s>' % (quotes, name, quotes, address)
+    return address
+
+
+
+def getaddresses(fieldvalues):
+    """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
+    all = COMMASPACE.join(fieldvalues)
+    a = _AddressList(all)
+    return a.addresslist
+
+
+
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<atom>.*?)         # non-greedy up to the next ?= is the atom
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE)
+
+
+def _format_timetuple_and_zone(timetuple, zone):
+    return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+        ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
+        timetuple[2],
+        ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+         'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
+        timetuple[0], timetuple[3], timetuple[4], timetuple[5],
+        zone)
+
+def formatdate(timeval=None, localtime=False, usegmt=False):
+    """Returns a date string as specified by RFC 2822, e.g.:
+
+    Fri, 09 Nov 2001 01:08:47 -0000
+
+    Optional timeval if given is a floating point time value as accepted by
+    gmtime() and localtime(), otherwise the current time is used.
+
+    Optional localtime is a flag that when True, interprets timeval, and
+    returns a date relative to the local timezone instead of UTC, properly
+    taking daylight savings time into account.
+
+    Optional argument usegmt means that the timezone is written out as
+    an ascii string, not numeric one (so "GMT" instead of "+0000"). This
+    is needed for HTTP, and is only used when localtime==False.
+    """
+    # Note: we cannot use strftime() because that honors the locale and RFC
+    # 2822 requires that day and month names be the English abbreviations.
+    if timeval is None:
+        timeval = time.time()
+    if localtime:
+        now = time.localtime(timeval)
+        # Calculate timezone offset, based on whether the local zone has
+        # daylight savings time, and whether DST is in effect.
+        if time.daylight and now[-1]:
+            offset = time.altzone
+        else:
+            offset = time.timezone
+        hours, minutes = divmod(abs(offset), 3600)
+        # Remember offset is in seconds west of UTC, but the timezone is in
+        # minutes east of UTC, so the signs differ.
+        if offset > 0:
+            sign = '-'
+        else:
+            sign = '+'
+        zone = '%s%02d%02d' % (sign, hours, minutes // 60)
+    else:
+        now = time.gmtime(timeval)
+        # Timezone offset is always -0000
+        if usegmt:
+            zone = 'GMT'
+        else:
+            zone = '-0000'
+    return _format_timetuple_and_zone(now, zone)
+
+def format_datetime(dt, usegmt=False):
+    """Turn a datetime into a date string as specified in RFC 2822.
+
+    If usegmt is True, dt must be an aware datetime with an offset of zero.  In
+    this case 'GMT' will be rendered instead of the normal +0000 required by
+    RFC2822.  This is to support HTTP headers involving date stamps.
+    """
+    now = dt.timetuple()
+    if usegmt:
+        if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
+            raise ValueError("usegmt option requires a UTC datetime")
+        zone = 'GMT'
+    elif dt.tzinfo is None:
+        zone = '-0000'
+    else:
+        zone = dt.strftime("%z")
+    return _format_timetuple_and_zone(now, zone)
+
+
+def make_msgid(idstring=None, domain=None):
+    """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
+
+    <20020201195627.33539.96671@nightshade.la.mastaler.com>
+
+    Optional idstring if given is a string used to strengthen the
+    uniqueness of the message id.  Optional domain if given provides the
+    portion of the message id after the '@'.  It defaults to the locally
+    defined hostname.
+    """
+    timeval = time.time()
+    utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
+    pid = os.getpid()
+    randint = random.randrange(100000)
+    if idstring is None:
+        idstring = ''
+    else:
+        idstring = '.' + idstring
+    if domain is None:
+        domain = socket.getfqdn()
+    msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
+    return msgid
+
+
+def parsedate_to_datetime(data):
+    *dtuple, tz = _parsedate_tz(data)
+    if tz is None:
+        return datetime.datetime(*dtuple[:6])
+    return datetime.datetime(*dtuple[:6],
+            tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
+
+
+def parseaddr(addr):
+    addrs = _AddressList(addr).addresslist
+    if not addrs:
+        return '', ''
+    return addrs[0]
+
+
+# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
+def unquote(str):
+    """Remove quotes from a string."""
+    if len(str) > 1:
+        if str.startswith('"') and str.endswith('"'):
+            return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+        if str.startswith('<') and str.endswith('>'):
+            return str[1:-1]
+    return str
+
+
+
+# RFC2231-related functions - parameter encoding and decoding
+def decode_rfc2231(s):
+    """Decode string according to RFC 2231"""
+    parts = s.split(TICK, 2)
+    if len(parts) <= 2:
+        return None, None, s
+    return parts
+
+
+def encode_rfc2231(s, charset=None, language=None):
+    """Encode string according to RFC 2231.
+
+    If neither charset nor language is given, then s is returned as-is.  If
+    charset is given but not language, the string is encoded using the empty
+    string for language.
+    """
+    s = urllib.parse.quote(s, safe='', encoding=charset or 'ascii')
+    if charset is None and language is None:
+        return s
+    if language is None:
+        language = ''
+    return "%s'%s'%s" % (charset, language, s)
+
+
+rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
+    re.ASCII)
+
+def decode_params(params):
+    """Decode parameters list according to RFC 2231.
+
+    params is a sequence of 2-tuples containing (param name, string value).
+    """
+    # Copy params so we don't mess with the original
+    params = params[:]
+    new_params = []
+    # Map parameter's name to a list of continuations.  The values are a
+    # 3-tuple of the continuation number, the string value, and a flag
+    # specifying whether a particular segment is %-encoded.
+    rfc2231_params = {}
+    name, value = params.pop(0)
+    new_params.append((name, value))
+    while params:
+        name, value = params.pop(0)
+        if name.endswith('*'):
+            encoded = True
+        else:
+            encoded = False
+        value = unquote(value)
+        mo = rfc2231_continuation.match(name)
+        if mo:
+            name, num = mo.group('name', 'num')
+            if num is not None:
+                num = int(num)
+            rfc2231_params.setdefault(name, []).append((num, value, encoded))
+        else:
+            new_params.append((name, '"%s"' % quote(value)))
+    if rfc2231_params:
+        for name, continuations in rfc2231_params.items():
+            value = []
+            extended = False
+            # Sort by number
+            continuations.sort()
+            # And now append all values in numerical order, converting
+            # %-encodings for the encoded segments.  If any of the
+            # continuation names ends in a *, then the entire string, after
+            # decoding segments and concatenating, must have the charset and
+            # language specifiers at the beginning of the string.
+            for num, s, encoded in continuations:
+                if encoded:
+                    # Decode as "latin-1", so the characters in s directly
+                    # represent the percent-encoded octet values.
+                    # collapse_rfc2231_value treats this as an octet sequence.
+                    s = urllib.parse.unquote(s, encoding="latin-1")
+                    extended = True
+                value.append(s)
+            value = quote(EMPTYSTRING.join(value))
+            if extended:
+                charset, language, value = decode_rfc2231(value)
+                new_params.append((name, (charset, language, '"%s"' % value)))
+            else:
+                new_params.append((name, '"%s"' % value))
+    return new_params
+
+def collapse_rfc2231_value(value, errors='replace',
+                           fallback_charset='us-ascii'):
+    if not isinstance(value, tuple) or len(value) != 3:
+        return unquote(value)
+    # While value comes to us as a unicode string, we need it to be a bytes
+    # object.  We do not want bytes() normal utf-8 decoder, we want a straight
+    # interpretation of the string as character bytes.
+    charset, language, text = value
+    rawbytes = bytes(text, 'raw-unicode-escape')
+    try:
+        return str(rawbytes, charset, errors)
+    except LookupError:
+        # charset is not a known codec.
+        return unquote(text)
+
+
+#
+# datetime doesn't provide a localtime function yet, so provide one.  Code
+# adapted from the patch in issue 9527.  This may not be perfect, but it is
+# better than not having it.
+#
+
+def localtime(dt=None, isdst=-1):
+    """Return local time as an aware datetime object.
+
+    If called without arguments, return current time.  Otherwise *dt*
+    argument should be a datetime instance, and it is converted to the
+    local time zone according to the system time zone database.  If *dt* is
+    naive (that is, dt.tzinfo is None), it is assumed to be in local time.
+    In this case, a positive or zero value for *isdst* causes localtime to
+    presume initially that summer time (for example, Daylight Saving Time)
+    is or is not (respectively) in effect for the specified time.  A
+    negative value for *isdst* causes the localtime() function to attempt
+    to divine whether summer time is in effect for the specified time.
+
+    """
+    if dt is None:
+        return datetime.datetime.now(datetime.timezone.utc).astimezone()
+    if dt.tzinfo is not None:
+        return dt.astimezone()
+    # We have a naive datetime.  Convert to a (localtime) timetuple and pass to
+    # system mktime together with the isdst hint.  System mktime will return
+    # seconds since epoch.
+    tm = dt.timetuple()[:-1] + (isdst,)
+    seconds = time.mktime(tm)
+    localtm = time.localtime(seconds)
+    try:
+        delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
+        tz = datetime.timezone(delta, localtm.tm_zone)
+    except AttributeError:
+        # Compute UTC offset and compare with the value implied by tm_isdst.
+        # If the values match, use the zone name implied by tm_isdst.
+        delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
+        dst = time.daylight and localtm.tm_isdst > 0
+        gmtoff = -(time.altzone if dst else time.timezone)
+        if delta == datetime.timedelta(seconds=gmtoff):
+            tz = datetime.timezone(delta, time.tzname[dst])
+        else:
+            tz = datetime.timezone(delta)
+    return dt.replace(tzinfo=tz)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/errno.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/errno.py
new file mode 100644
index 00000000..7b7935ef
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/errno.py
@@ -0,0 +1,38 @@
+EPERM   = 1   # Operation not permitted
+ENOENT  = 2   # No such file or directory
+ESRCH   = 3   # No such process
+EINTR   = 4   # Interrupted system call
+EIO     = 5   # I/O error
+ENXIO   = 6   # No such device or address
+E2BIG   = 7   # Argument list too long
+ENOEXEC = 8   # Exec format error
+EBADF   = 9   # Bad file number
+ECHILD  = 10  # No child processes
+EAGAIN  = 11  # Try again
+ENOMEM  = 12  # Out of memory
+EACCES  = 13  # Permission denied
+EFAULT  = 14  # Bad address
+ENOTBLK = 15  # Block device required
+EBUSY   = 16  # Device or resource busy
+EEXIST  = 17  # File exists
+EXDEV   = 18  # Cross-device link
+ENODEV  = 19  # No such device
+ENOTDIR = 20  # Not a directory
+EISDIR  = 21  # Is a directory
+EINVAL  = 22  # Invalid argument
+ENFILE  = 23  # File table overflow
+EMFILE  = 24  # Too many open files
+ENOTTY  = 25  # Not a typewriter
+ETXTBSY = 26  # Text file busy
+EFBIG   = 27  # File too large
+ENOSPC  = 28  # No space left on device
+ESPIPE  = 29  # Illegal seek
+EROFS   = 30  # Read-only file system
+EMLINK  = 31  # Too many links
+EPIPE   = 32  # Broken pipe
+EDOM    = 33  # Math argument out of domain of func
+ERANGE  = 34  # Math result not representable
+EAFNOSUPPORT = 97 # Address family not supported by protocol
+ECONNRESET = 104 # Connection timed out
+ETIMEDOUT = 110 # Connection timed out
+EINPROGRESS = 115 # Operation now in progress
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fcntl.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fcntl.py
new file mode 100644
index 00000000..5917840c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fcntl.py
@@ -0,0 +1,37 @@
+import ffi
+import os
+import ffilib
+
+
+libc = ffilib.libc()
+
+fcntl_l = libc.func("i", "fcntl", "iil")
+fcntl_s = libc.func("i", "fcntl", "iip")
+ioctl_l = libc.func("i", "ioctl", "iil")
+ioctl_s = libc.func("i", "ioctl", "iip")
+
+
+def fcntl(fd, op, arg=0):
+    if type(arg) is int:
+        r = fcntl_l(fd, op, arg)
+        os.check_error(r)
+        return r
+    else:
+        r = fcntl_s(fd, op, arg)
+        os.check_error(r)
+        # TODO: Not compliant. CPython says that arg should be immutable,
+        # and possibly mutated buffer is returned.
+        return r
+
+
+def ioctl(fd, op, arg=0, mut=False):
+    if type(arg) is int:
+        r = ioctl_l(fd, op, arg)
+        os.check_error(r)
+        return r
+    else:
+        # TODO
+        assert mut
+        r = ioctl_s(fd, op, arg)
+        os.check_error(r)
+        return r
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ffilib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ffilib.py
new file mode 100644
index 00000000..dc4d672a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ffilib.py
@@ -0,0 +1,46 @@
+import sys
+try:
+    import ffi
+except ImportError:
+    ffi = None
+
+_cache = {}
+
+def open(name, maxver=10, extra=()):
+    if not ffi:
+        return None
+    try:
+        return _cache[name]
+    except KeyError:
+        pass
+    def libs():
+        if sys.platform == "linux":
+            yield '%s.so' % name
+            for i in range(maxver, -1, -1):
+                yield '%s.so.%u' % (name, i)
+        else:
+            for ext in ('dylib', 'dll'):
+                yield '%s.%s' % (name, ext)
+        for n in extra:
+            yield n
+    err = None
+    for n in libs():
+        try:
+            l = ffi.open(n)
+            _cache[name] = l
+            return l
+        except OSError as e:
+            err = e
+    raise err
+
+def libc():
+    return open("libc", 6)
+
+# Find out bitness of the platform, even if long ints are not supported
+# TODO: All bitness differences should be removed from micropython-lib, and
+# this snippet too.
+bitness = 1
+v = sys.maxsize
+while v:
+    bitness += 1
+    v >>= 1
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fnmatch.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fnmatch.py
new file mode 100644
index 00000000..d5f7a43f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fnmatch.py
@@ -0,0 +1,111 @@
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression.  They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN.  (It does not compile it.)
+"""
+import os
+import os.path
+import posixpath
+import re
+#import functools
+
+__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
+
+def fnmatch(name, pat):
+    """Test whether FILENAME matches PATTERN.
+
+    Patterns are Unix shell style:
+
+    *       matches everything
+    ?       matches any single character
+    [seq]   matches any character in seq
+    [!seq]  matches any char not in seq
+
+    An initial period in FILENAME is not special.
+    Both FILENAME and PATTERN are first case-normalized
+    if the operating system requires it.
+    If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+    """
+    name = os.path.normcase(name)
+    pat = os.path.normcase(pat)
+    return fnmatchcase(name, pat)
+
+#@functools.lru_cache(maxsize=256, typed=True)
+def _compile_pattern(pat):
+    if isinstance(pat, bytes):
+        pat_str = str(pat, 'ISO-8859-1')
+        res_str = translate(pat_str)
+        res = bytes(res_str, 'ISO-8859-1')
+    else:
+        res = translate(pat)
+    return re.compile(res).match
+
+def filter(names, pat):
+    """Return the subset of the list NAMES that match PAT."""
+    result = []
+    pat = os.path.normcase(pat)
+    match = _compile_pattern(pat)
+    if os.path is posixpath:
+        # normcase on posix is NOP. Optimize it away from the loop.
+        for name in names:
+            if match(name):
+                result.append(name)
+    else:
+        for name in names:
+            if match(os.path.normcase(name)):
+                result.append(name)
+    return result
+
+def fnmatchcase(name, pat):
+    """Test whether FILENAME matches PATTERN, including case.
+
+    This is a version of fnmatch() which doesn't case-normalize
+    its arguments.
+    """
+    match = _compile_pattern(pat)
+    return match(name) is not None
+
+
+def translate(pat):
+    """Translate a shell PATTERN to a regular expression.
+
+    There is no way to quote meta-characters.
+    """
+
+    i, n = 0, len(pat)
+    res = ''
+    while i < n:
+        c = pat[i]
+        i = i+1
+        if c == '*':
+            res = res + '.*'
+        elif c == '?':
+            res = res + '.'
+        elif c == '[':
+            j = i
+            if j < n and pat[j] == '!':
+                j = j+1
+            if j < n and pat[j] == ']':
+                j = j+1
+            while j < n and pat[j] != ']':
+                j = j+1
+            if j >= n:
+                res = res + '\\['
+            else:
+                stuff = pat[i:j].replace('\\','\\\\')
+                i = j+1
+                if stuff[0] == '!':
+                    stuff = '^' + stuff[1:]
+                elif stuff[0] == '^':
+                    stuff = '\\' + stuff
+                res = '%s[%s]' % (res, stuff)
+        else:
+            res = res + re.escape(c)
+    # Original patterns is undefined, see http://bugs.python.org/issue21464
+    return '(?ms)' + res + '\Z'
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/formatter.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/formatter.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fractions.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/fractions.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ftplib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ftplib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/functools.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/functools.py
new file mode 100644
index 00000000..97196bef
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/functools.py
@@ -0,0 +1,27 @@
+def partial(func, *args, **kwargs):
+    def _partial(*more_args, **more_kwargs):
+        kw = kwargs.copy()
+        kw.update(more_kwargs)
+        return func(*(args + more_args), **kw)
+    return _partial
+
+
+def update_wrapper(wrapper, wrapped, assigned=None, updated=None):
+    # Dummy impl
+    return wrapper
+
+
+def wraps(wrapped, assigned=None, updated=None):
+    # Dummy impl
+    return lambda x: x
+
+
+def reduce(function, iterable, initializer=None):
+    it = iter(iterable)
+    if initializer is None:
+        value = next(it)
+    else:
+        value = initializer
+    for element in it:
+        value = function(value, element)
+    return value
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/getopt.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/getopt.py
new file mode 100644
index 00000000..3d6ecbdd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/getopt.py
@@ -0,0 +1,215 @@
+"""Parser for command line options.
+
+This module helps scripts to parse the command line arguments in
+sys.argv.  It supports the same conventions as the Unix getopt()
+function (including the special meanings of arguments of the form `-'
+and `--').  Long options similar to those supported by GNU software
+may be used as well via an optional third argument.  This module
+provides two functions and an exception:
+
+getopt() -- Parse command line options
+gnu_getopt() -- Like getopt(), but allow option and non-option arguments
+to be intermixed.
+GetoptError -- exception (class) raised with 'opt' attribute, which is the
+option involved with the exception.
+"""
+
+# Long option support added by Lars Wirzenius <liw@iki.fi>.
+#
+# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
+# to class-based exceptions.
+#
+# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
+#
+# TODO for gnu_getopt():
+#
+# - GNU getopt_long_only mechanism
+# - allow the caller to specify ordering
+# - RETURN_IN_ORDER option
+# - GNU extension with '-' as first character of option string
+# - optional arguments, specified by double colons
+# - a option string with a W followed by semicolon should
+#   treat "-W foo" as "--foo"
+
+__all__ = ["GetoptError","error","getopt","gnu_getopt"]
+
+import os
+try:
+    from gettext import gettext as _
+except ImportError:
+    # Bootstrapping Python: gettext's dependencies not built yet
+    def _(s): return s
+
+class GetoptError(Exception):
+    opt = ''
+    msg = ''
+    def __init__(self, msg, opt=''):
+        self.msg = msg
+        self.opt = opt
+        Exception.__init__(self, msg, opt)
+
+    def __str__(self):
+        return self.msg
+
+error = GetoptError # backward compatibility
+
+def getopt(args, shortopts, longopts = []):
+    """getopt(args, options[, long_options]) -> opts, args
+
+    Parses command line options and parameter list.  args is the
+    argument list to be parsed, without the leading reference to the
+    running program.  Typically, this means "sys.argv[1:]".  shortopts
+    is the string of option letters that the script wants to
+    recognize, with options that require an argument followed by a
+    colon (i.e., the same format that Unix getopt() uses).  If
+    specified, longopts is a list of strings with the names of the
+    long options which should be supported.  The leading '--'
+    characters should not be included in the option name.  Options
+    which require an argument should be followed by an equal sign
+    ('=').
+
+    The return value consists of two elements: the first is a list of
+    (option, value) pairs; the second is the list of program arguments
+    left after the option list was stripped (this is a trailing slice
+    of the first argument).  Each option-and-value pair returned has
+    the option as its first element, prefixed with a hyphen (e.g.,
+    '-x'), and the option argument as its second element, or an empty
+    string if the option has no argument.  The options occur in the
+    list in the same order in which they were found, thus allowing
+    multiple occurrences.  Long and short options may be mixed.
+
+    """
+
+    opts = []
+    if type(longopts) == type(""):
+        longopts = [longopts]
+    else:
+        longopts = list(longopts)
+    while args and args[0].startswith('-') and args[0] != '-':
+        if args[0] == '--':
+            args = args[1:]
+            break
+        if args[0].startswith('--'):
+            opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
+        else:
+            opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
+
+    return opts, args
+
+def gnu_getopt(args, shortopts, longopts = []):
+    """getopt(args, options[, long_options]) -> opts, args
+
+    This function works like getopt(), except that GNU style scanning
+    mode is used by default. This means that option and non-option
+    arguments may be intermixed. The getopt() function stops
+    processing options as soon as a non-option argument is
+    encountered.
+
+    If the first character of the option string is `+', or if the
+    environment variable POSIXLY_CORRECT is set, then option
+    processing stops as soon as a non-option argument is encountered.
+
+    """
+
+    opts = []
+    prog_args = []
+    if isinstance(longopts, str):
+        longopts = [longopts]
+    else:
+        longopts = list(longopts)
+
+    # Allow options after non-option arguments?
+    if shortopts.startswith('+'):
+        shortopts = shortopts[1:]
+        all_options_first = True
+    elif os.environ.get("POSIXLY_CORRECT"):
+        all_options_first = True
+    else:
+        all_options_first = False
+
+    while args:
+        if args[0] == '--':
+            prog_args += args[1:]
+            break
+
+        if args[0][:2] == '--':
+            opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
+        elif args[0][:1] == '-' and args[0] != '-':
+            opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
+        else:
+            if all_options_first:
+                prog_args += args
+                break
+            else:
+                prog_args.append(args[0])
+                args = args[1:]
+
+    return opts, prog_args
+
+def do_longs(opts, opt, longopts, args):
+    try:
+        i = opt.index('=')
+    except ValueError:
+        optarg = None
+    else:
+        opt, optarg = opt[:i], opt[i+1:]
+
+    has_arg, opt = long_has_args(opt, longopts)
+    if has_arg:
+        if optarg is None:
+            if not args:
+                raise GetoptError(_('option --%s requires argument') % opt, opt)
+            optarg, args = args[0], args[1:]
+    elif optarg is not None:
+        raise GetoptError(_('option --%s must not have an argument') % opt, opt)
+    opts.append(('--' + opt, optarg or ''))
+    return opts, args
+
+# Return:
+#   has_arg?
+#   full option name
+def long_has_args(opt, longopts):
+    possibilities = [o for o in longopts if o.startswith(opt)]
+    if not possibilities:
+        raise GetoptError(_('option --%s not recognized') % opt, opt)
+    # Is there an exact match?
+    if opt in possibilities:
+        return False, opt
+    elif opt + '=' in possibilities:
+        return True, opt
+    # No exact match, so better be unique.
+    if len(possibilities) > 1:
+        # XXX since possibilities contains all valid continuations, might be
+        # nice to work them into the error msg
+        raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
+    assert len(possibilities) == 1
+    unique_match = possibilities[0]
+    has_arg = unique_match.endswith('=')
+    if has_arg:
+        unique_match = unique_match[:-1]
+    return has_arg, unique_match
+
+def do_shorts(opts, optstring, shortopts, args):
+    while optstring != '':
+        opt, optstring = optstring[0], optstring[1:]
+        if short_has_arg(opt, shortopts):
+            if optstring == '':
+                if not args:
+                    raise GetoptError(_('option -%s requires argument') % opt,
+                                      opt)
+                optstring, args = args[0], args[1:]
+            optarg, optstring = optstring, ''
+        else:
+            optarg = ''
+        opts.append(('-' + opt, optarg))
+    return opts, args
+
+def short_has_arg(opt, shortopts):
+    for i in range(len(shortopts)):
+        if opt == shortopts[i] != ':':
+            return shortopts.startswith(':', i+1)
+    raise GetoptError(_('option -%s not recognized') % opt, opt)
+
+if __name__ == '__main__':
+    import sys
+    print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/getpass.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/getpass.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gettext.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gettext.py
new file mode 100644
index 00000000..5e02ae6b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gettext.py
@@ -0,0 +1,14 @@
+import ffilib
+
+libc = ffilib.libc()
+
+gettext_ = libc.func("s", "gettext", "s")
+ngettext_ = libc.func("s", "ngettext", "ssL")
+
+
+def gettext(message):
+    return gettext_(message)
+
+
+def ngettext(singular, plural, n):
+    return ngettext_(singular, plural, n)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/glob.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/glob.py
new file mode 100644
index 00000000..3baf745a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/glob.py
@@ -0,0 +1,95 @@
+"""Filename globbing utility."""
+
+import os
+import os.path
+import re
+import fnmatch
+
+__all__ = ["glob", "iglob"]
+
+def glob(pathname):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    """
+    return list(iglob(pathname))
+
+def iglob(pathname):
+    """Return an iterator which yields the paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    """
+    if not has_magic(pathname):
+        if os.path.lexists(pathname):
+            yield pathname
+        return
+    dirname, basename = os.path.split(pathname)
+    if not dirname:
+        for name in glob1(None, basename):
+            yield name
+        return
+    # `os.path.split()` returns the argument itself as a dirname if it is a
+    # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
+    # contains magic characters (i.e. r'\\?\C:').
+    if dirname != pathname and has_magic(dirname):
+        dirs = iglob(dirname)
+    else:
+        dirs = [dirname]
+    if has_magic(basename):
+        glob_in_dir = glob1
+    else:
+        glob_in_dir = glob0
+    for dirname in dirs:
+        for name in glob_in_dir(dirname, basename):
+            yield os.path.join(dirname, name)
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
+
+def glob1(dirname, pattern):
+    if not dirname:
+        if isinstance(pattern, bytes):
+            dirname = bytes(os.curdir, 'ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except os.error:
+        return []
+    if not _ishidden(pattern):
+        names = [x for x in names if not _ishidden(x)]
+    return fnmatch.filter(names, pattern)
+
+def glob0(dirname, basename):
+    if not basename:
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if os.path.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
+            return [basename]
+    return []
+
+
+magic_check = re.compile('[*?[]')
+magic_check_bytes = re.compile(b'[*?[]')
+
+def has_magic(s):
+    if isinstance(s, bytes):
+        match = magic_check_bytes.search(s)
+    else:
+        match = magic_check.search(s)
+    return match is not None
+
+def _ishidden(path):
+    return path[0] in ('.', b'.'[0])
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gzip.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gzip.py
new file mode 100644
index 00000000..be4e8f4a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/gzip.py
@@ -0,0 +1,28 @@
+#import zlib
+import uzlib as zlib
+
+FTEXT    = 1
+FHCRC    = 2
+FEXTRA   = 4
+FNAME    = 8
+FCOMMENT = 16
+
+def decompress(data):
+    assert data[0] == 0x1f and data[1] == 0x8b
+    assert data[2] == 8
+    flg = data[3]
+    assert flg & 0xe0 == 0
+    i = 10
+    if flg & FEXTRA:
+        i += data[11] << 8 + data[10] + 2
+    if flg & FNAME:
+        while data[i]:
+            i += 1
+        i += 1
+    if flg & FCOMMENT:
+        while data[i]:
+            i += 1
+        i += 1
+    if flg & FHCRC:
+        i += 2
+    return zlib.decompress(memoryview(data)[i:], -15)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/__init__.py
new file mode 100644
index 00000000..cfbf355f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/__init__.py
@@ -0,0 +1,22 @@
+try:
+    import uhashlib
+except ImportError:
+    uhashlib = None
+
+def init():
+    for i in ("sha1", "sha224", "sha256", "sha384", "sha512"):
+        c = getattr(uhashlib, i, None)
+        if not c:
+            c = __import__("_" + i, None, None, (), 1)
+            c = getattr(c, i)
+        globals()[i] = c
+
+init()
+
+
+def new(algo, data=b""):
+    try:
+        c = globals()[algo]
+        return c(data)
+    except KeyError:
+        raise ValueError(algo)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha224.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha224.py
new file mode 100644
index 00000000..634343b5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha224.py
@@ -0,0 +1 @@
+from ._sha256 import sha224
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha256.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha256.py
new file mode 100644
index 00000000..8c013b5f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha256.py
@@ -0,0 +1,264 @@
+SHA_BLOCKSIZE = 64
+SHA_DIGESTSIZE = 32
+
+
+def new_shaobject():
+    return {
+        'digest': [0]*8,
+        'count_lo': 0,
+        'count_hi': 0,
+        'data': [0]* SHA_BLOCKSIZE,
+        'local': 0,
+        'digestsize': 0
+    }
+
+ROR = lambda x, y: (((x & 0xffffffff) >> (y & 31)) | (x << (32 - (y & 31)))) & 0xffffffff
+Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
+Maj = lambda x, y, z: (((x | y) & z) | (x & y))
+S = lambda x, n: ROR(x, n)
+R = lambda x, n: (x & 0xffffffff) >> n
+Sigma0 = lambda x: (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+Sigma1 = lambda x: (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+Gamma0 = lambda x: (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+Gamma1 = lambda x: (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+
+def sha_transform(sha_info):
+    W = []
+    
+    d = sha_info['data']
+    for i in range(0,16):
+        W.append( (d[4*i]<<24) + (d[4*i+1]<<16) + (d[4*i+2]<<8) + d[4*i+3])
+    
+    for i in range(16,64):
+        W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffff )
+    
+    ss = sha_info['digest'][:]
+    
+    def RND(a,b,c,d,e,f,g,h,i,ki):
+        t0 = h + Sigma1(e) + Ch(e, f, g) + ki + W[i];
+        t1 = Sigma0(a) + Maj(a, b, c);
+        d += t0;
+        h  = t0 + t1;
+        return d & 0xffffffff, h & 0xffffffff
+    
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x71374491);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcf);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba5);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25b);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b01);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a7);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c1);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc6);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dc);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c8);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf3);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x14292967);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a85);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b2138);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d13);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a7354);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c85);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a1);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664b);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a3);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd6990624);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e3585);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa070);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c08);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774c);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4a);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3);
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee);
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f);
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814);
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc70208);
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa);
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506ceb);
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7);
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2);
+    
+    dig = []
+    for i, x in enumerate(sha_info['digest']):
+        dig.append( (x + ss[i]) & 0xffffffff )
+    sha_info['digest'] = dig
+
+def sha_init():
+    sha_info = new_shaobject()
+    sha_info['digest'] = [0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19]
+    sha_info['count_lo'] = 0
+    sha_info['count_hi'] = 0
+    sha_info['local'] = 0
+    sha_info['digestsize'] = 32
+    return sha_info
+
+def sha224_init():
+    sha_info = new_shaobject()
+    sha_info['digest'] = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4]
+    sha_info['count_lo'] = 0
+    sha_info['count_hi'] = 0
+    sha_info['local'] = 0
+    sha_info['digestsize'] = 28
+    return sha_info
+
+def getbuf(s):
+    if isinstance(s, str):
+        return s.encode('ascii')
+    else:
+        return bytes(s)
+
+def sha_update(sha_info, buffer):
+    if isinstance(buffer, str):
+        raise TypeError("Unicode strings must be encoded before hashing")
+    count = len(buffer)
+    buffer_idx = 0
+    clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
+    if clo < sha_info['count_lo']:
+        sha_info['count_hi'] += 1
+    sha_info['count_lo'] = clo
+    
+    sha_info['count_hi'] += (count >> 29)
+    
+    if sha_info['local']:
+        i = SHA_BLOCKSIZE - sha_info['local']
+        if i > count:
+            i = count
+        
+        # copy buffer
+        for x in enumerate(buffer[buffer_idx:buffer_idx+i]):
+            sha_info['data'][sha_info['local']+x[0]] = x[1]
+        
+        count -= i
+        buffer_idx += i
+        
+        sha_info['local'] += i
+        if sha_info['local'] == SHA_BLOCKSIZE:
+            sha_transform(sha_info)
+            sha_info['local'] = 0
+        else:
+            return
+    
+    while count >= SHA_BLOCKSIZE:
+        # copy buffer
+        sha_info['data'] = list(buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE])
+        count -= SHA_BLOCKSIZE
+        buffer_idx += SHA_BLOCKSIZE
+        sha_transform(sha_info)
+        
+    
+    # copy buffer
+    pos = sha_info['local']
+    sha_info['data'][pos:pos+count] = list(buffer[buffer_idx:buffer_idx + count])
+    sha_info['local'] = count
+
+def sha_final(sha_info):
+    lo_bit_count = sha_info['count_lo']
+    hi_bit_count = sha_info['count_hi']
+    count = (lo_bit_count >> 3) & 0x3f
+    sha_info['data'][count] = 0x80;
+    count += 1
+    if count > SHA_BLOCKSIZE - 8:
+        # zero the bytes in data after the count
+        sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+        sha_transform(sha_info)
+        # zero bytes in data
+        sha_info['data'] = [0] * SHA_BLOCKSIZE
+    else:
+        sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+    
+    sha_info['data'][56] = (hi_bit_count >> 24) & 0xff
+    sha_info['data'][57] = (hi_bit_count >> 16) & 0xff
+    sha_info['data'][58] = (hi_bit_count >>  8) & 0xff
+    sha_info['data'][59] = (hi_bit_count >>  0) & 0xff
+    sha_info['data'][60] = (lo_bit_count >> 24) & 0xff
+    sha_info['data'][61] = (lo_bit_count >> 16) & 0xff
+    sha_info['data'][62] = (lo_bit_count >>  8) & 0xff
+    sha_info['data'][63] = (lo_bit_count >>  0) & 0xff
+    
+    sha_transform(sha_info)
+    
+    dig = []
+    for i in sha_info['digest']:
+        dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
+    return bytes(dig)
+
+class sha256(object):
+    digest_size = digestsize = SHA_DIGESTSIZE
+    block_size = SHA_BLOCKSIZE
+
+    def __init__(self, s=None):
+        self._sha = sha_init()
+        if s:
+            sha_update(self._sha, getbuf(s))
+    
+    def update(self, s):
+        sha_update(self._sha, getbuf(s))
+    
+    def digest(self):
+        return sha_final(self._sha.copy())[:self._sha['digestsize']]
+    
+    def hexdigest(self):
+        return ''.join(['%.2x' % i for i in self.digest()])
+
+    def copy(self):
+        new = sha256()
+        new._sha = self._sha.copy()
+        return new
+
+class sha224(sha256):
+    digest_size = digestsize = 28
+
+    def __init__(self, s=None):
+        self._sha = sha224_init()
+        if s:
+            sha_update(self._sha, getbuf(s))
+
+    def copy(self):
+        new = sha224()
+        new._sha = self._sha.copy()
+        return new
+
+def test():
+    a_str = "just a test string"
+    
+    assert b"\xe3\xb0\xc4B\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99o\xb9$'\xaeA\xe4d\x9b\x93L\xa4\x95\x99\x1bxR\xb8U" == sha256().digest()
+    assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest()
+    assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest()
+    assert '8113ebf33c97daa9998762aacafe750c7cefc2b2f173c90c59663a57fe626f21' == sha256(a_str*7).hexdigest()
+    
+    s = sha256(a_str)
+    s.update(a_str)
+    assert '03d9963e05a094593190b6fc794cb1a3e1ac7d7883f0b5855268afeccc70d461' == s.hexdigest()
+
+if __name__ == "__main__":
+    test()
+
+
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha384.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha384.py
new file mode 100644
index 00000000..20f09ff0
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha384.py
@@ -0,0 +1 @@
+from ._sha512 import sha384
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha512.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha512.py
new file mode 100644
index 00000000..8875db2e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hashlib/_sha512.py
@@ -0,0 +1,290 @@
+"""
+This code was Ported from CPython's sha512module.c
+"""
+
+SHA_BLOCKSIZE = 128
+SHA_DIGESTSIZE = 64
+
+
+def new_shaobject():
+    return {
+        'digest': [0]*8,
+        'count_lo': 0,
+        'count_hi': 0,
+        'data': [0]* SHA_BLOCKSIZE,
+        'local': 0,
+        'digestsize': 0
+    }
+
+ROR64 = lambda x, y: (((x & 0xffffffffffffffff) >> (y & 63)) | (x << (64 - (y & 63)))) & 0xffffffffffffffff
+Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
+Maj = lambda x, y, z: (((x | y) & z) | (x & y))
+S = lambda x, n: ROR64(x, n)
+R = lambda x, n: (x & 0xffffffffffffffff) >> n
+Sigma0 = lambda x: (S(x, 28) ^ S(x, 34) ^ S(x, 39))
+Sigma1 = lambda x: (S(x, 14) ^ S(x, 18) ^ S(x, 41))
+Gamma0 = lambda x: (S(x, 1) ^ S(x, 8) ^ R(x, 7))
+Gamma1 = lambda x: (S(x, 19) ^ S(x, 61) ^ R(x, 6))
+
+def sha_transform(sha_info):
+    W = []
+
+    d = sha_info['data']
+    for i in range(0,16):
+        W.append( (d[8*i]<<56) + (d[8*i+1]<<48) + (d[8*i+2]<<40) + (d[8*i+3]<<32) + (d[8*i+4]<<24) + (d[8*i+5]<<16) + (d[8*i+6]<<8) + d[8*i+7])
+
+    for i in range(16,80):
+        W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffffffffffff )
+
+    ss = sha_info['digest'][:]
+
+    def RND(a,b,c,d,e,f,g,h,i,ki):
+        t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xffffffffffffffff
+        t1 = (Sigma0(a) + Maj(a, b, c)) & 0xffffffffffffffff
+        d = (d + t0) & 0xffffffffffffffff
+        h = (t0 + t1) & 0xffffffffffffffff
+        return d & 0xffffffffffffffff, h & 0xffffffffffffffff
+
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98d728ae22)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x7137449123ef65cd)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcfec4d3b2f)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba58189dbbc)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25bf348b538)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1b605d019)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4af194f9b)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5da6d8118)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98a3030242)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b0145706fbe)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be4ee4b28c)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3d5ffb4e2)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74f27b896f)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe3b1696b1)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a725c71235)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174cf692694)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c19ef14ad2)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786384f25e3)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc68b8cd5b5)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc77ac9c65)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f592b0275)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa6ea6e483)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dcbd41fbd4)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da831153b5)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152ee66dfab)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d2db43210)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c898fb213f)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7beef0ee4)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf33da88fc2)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147930aa725)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351e003826f)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x142929670a0e6e70)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a8546d22ffc)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b21385c26c926)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc5ac42aed)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d139d95b3df)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a73548baf63de)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb3c77b2a8)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e47edaee6)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c851482353b)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a14cf10364)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664bbc423001)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70d0f89791)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a30654be30)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819d6ef5218)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd69906245565a910)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e35855771202a)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa07032bbd1b8)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116b8d2d0c8)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c085141ab53)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774cdf8eeb99)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5e19b48a8)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3c5c95a63)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4ae3418acb)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f7763e373)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3d6b2b8a3)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee5defb2fc)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f43172f60)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814a1f0ab72)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc702081a6439ec)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa23631e28)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506cebde82bde9)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7b2c67915)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2e372532b)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],64,0xca273eceea26619c)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],65,0xd186b8c721c0c207)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],66,0xeada7dd6cde0eb1e)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],67,0xf57d4f7fee6ed178)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],68,0x06f067aa72176fba)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],69,0x0a637dc5a2c898a6)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],70,0x113f9804bef90dae)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],71,0x1b710b35131c471b)
+    ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],72,0x28db77f523047d84)
+    ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],73,0x32caab7b40c72493)
+    ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],74,0x3c9ebe0a15c9bebc)
+    ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],75,0x431d67c49c100d4c)
+    ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],76,0x4cc5d4becb3e42b6)
+    ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],77,0x597f299cfc657e2a)
+    ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],78,0x5fcb6fab3ad6faec)
+    ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],79,0x6c44198c4a475817)
+
+    dig = []
+    for i, x in enumerate(sha_info['digest']):
+        dig.append( (x + ss[i]) & 0xffffffffffffffff )
+    sha_info['digest'] = dig
+
+def sha_init():
+    sha_info = new_shaobject()
+    sha_info['digest'] = [ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
+    sha_info['count_lo'] = 0
+    sha_info['count_hi'] = 0
+    sha_info['local'] = 0
+    sha_info['digestsize'] = 64
+    return sha_info
+
+def sha384_init():
+    sha_info = new_shaobject()
+    sha_info['digest'] = [ 0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939, 0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4]
+    sha_info['count_lo'] = 0
+    sha_info['count_hi'] = 0
+    sha_info['local'] = 0
+    sha_info['digestsize'] = 48
+    return sha_info
+
+def getbuf(s):
+    if isinstance(s, str):
+        return s.encode('ascii')
+    else:
+        return bytes(s)
+
+def sha_update(sha_info, buffer):
+    if isinstance(buffer, str):
+        raise TypeError("Unicode strings must be encoded before hashing")
+    count = len(buffer)
+    buffer_idx = 0
+    clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
+    if clo < sha_info['count_lo']:
+        sha_info['count_hi'] += 1
+    sha_info['count_lo'] = clo
+
+    sha_info['count_hi'] += (count >> 29)
+
+    if sha_info['local']:
+        i = SHA_BLOCKSIZE - sha_info['local']
+        if i > count:
+            i = count
+
+        # copy buffer
+        for x in enumerate(buffer[buffer_idx:buffer_idx+i]):
+            sha_info['data'][sha_info['local']+x[0]] = x[1]
+
+        count -= i
+        buffer_idx += i
+
+        sha_info['local'] += i
+        if sha_info['local'] == SHA_BLOCKSIZE:
+            sha_transform(sha_info)
+            sha_info['local'] = 0
+        else:
+            return
+
+    while count >= SHA_BLOCKSIZE:
+        # copy buffer
+        sha_info['data'] = list(buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE])
+        count -= SHA_BLOCKSIZE
+        buffer_idx += SHA_BLOCKSIZE
+        sha_transform(sha_info)
+
+    # copy buffer
+    pos = sha_info['local']
+    sha_info['data'][pos:pos+count] = list(buffer[buffer_idx:buffer_idx + count])
+    sha_info['local'] = count
+
+def sha_final(sha_info):
+    lo_bit_count = sha_info['count_lo']
+    hi_bit_count = sha_info['count_hi']
+    count = (lo_bit_count >> 3) & 0x7f
+    sha_info['data'][count] = 0x80;
+    count += 1
+    if count > SHA_BLOCKSIZE - 16:
+        # zero the bytes in data after the count
+        sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+        sha_transform(sha_info)
+        # zero bytes in data
+        sha_info['data'] = [0] * SHA_BLOCKSIZE
+    else:
+        sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+
+    sha_info['data'][112] = 0;
+    sha_info['data'][113] = 0;
+    sha_info['data'][114] = 0;
+    sha_info['data'][115] = 0;
+    sha_info['data'][116] = 0;
+    sha_info['data'][117] = 0;
+    sha_info['data'][118] = 0;
+    sha_info['data'][119] = 0;
+
+    sha_info['data'][120] = (hi_bit_count >> 24) & 0xff
+    sha_info['data'][121] = (hi_bit_count >> 16) & 0xff
+    sha_info['data'][122] = (hi_bit_count >>  8) & 0xff
+    sha_info['data'][123] = (hi_bit_count >>  0) & 0xff
+    sha_info['data'][124] = (lo_bit_count >> 24) & 0xff
+    sha_info['data'][125] = (lo_bit_count >> 16) & 0xff
+    sha_info['data'][126] = (lo_bit_count >>  8) & 0xff
+    sha_info['data'][127] = (lo_bit_count >>  0) & 0xff
+
+    sha_transform(sha_info)
+
+    dig = []
+    for i in sha_info['digest']:
+        dig.extend([ ((i>>56) & 0xff), ((i>>48) & 0xff), ((i>>40) & 0xff), ((i>>32) & 0xff), ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
+    return bytes(dig)
+
+class sha512(object):
+    digest_size = digestsize = SHA_DIGESTSIZE
+    block_size = SHA_BLOCKSIZE
+
+    def __init__(self, s=None):
+        self._sha = sha_init()
+        if s:
+            sha_update(self._sha, getbuf(s))
+
+    def update(self, s):
+        sha_update(self._sha, getbuf(s))
+
+    def digest(self):
+        return sha_final(self._sha.copy())[:self._sha['digestsize']]
+
+    def hexdigest(self):
+        return ''.join(['%.2x' % i for i in self.digest()])
+
+    def copy(self):
+        new = sha512()
+        new._sha = self._sha.copy()
+        return new
+
+class sha384(sha512):
+    digest_size = digestsize = 48
+
+    def __init__(self, s=None):
+        self._sha = sha384_init()
+        if s:
+            sha_update(self._sha, getbuf(s))
+
+    def copy(self):
+        new = sha384()
+        new._sha = self._sha.copy()
+        return new
+
+def test():
+    a_str = "just a test string"
+
+    assert sha512().digest() == b"\xcf\x83\xe15~\xef\xb8\xbd\xf1T(P\xd6m\x80\x07\xd6 \xe4\x05\x0bW\x15\xdc\x83\xf4\xa9!\xd3l\xe9\xceG\xd0\xd1<]\x85\xf2\xb0\xff\x83\x18\xd2\x87~\xec/c\xb91\xbdGAz\x81\xa582z\xf9'\xda>"
+    assert sha512().hexdigest() == 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e'
+    assert sha512(a_str).hexdigest() == '68be4c6664af867dd1d01c8d77e963d87d77b702400c8fabae355a41b8927a5a5533a7f1c28509bbd65c5f3ac716f33be271fbda0ca018b71a84708c9fae8a53'
+    assert sha512(a_str*7).hexdigest() == '3233acdbfcfff9bff9fc72401d31dbffa62bd24e9ec846f0578d647da73258d9f0879f7fde01fe2cc6516af3f343807fdef79e23d696c923d79931db46bf1819'
+
+    s = sha512(a_str)
+    s.update(a_str)
+    assert s.hexdigest() == '341aeb668730bbb48127d5531115f3c39d12cb9586a6ca770898398aff2411087cfe0b570689adf328cddeb1f00803acce6737a19f310b53bbdb0320828f75bb'
+
+if __name__ == "__main__":
+    test()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/heapq.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/heapq.py
new file mode 100644
index 00000000..8b278fb8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/heapq.py
@@ -0,0 +1,480 @@
+"""Heap queue algorithm (a.k.a. priority queue).
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0.  For the sake of comparison,
+non-existing elements are considered to be infinite.  The interesting
+property of a heap is that a[0] is always its smallest element.
+
+Usage:
+
+heap = []            # creates an empty heap
+heappush(heap, item) # pushes a new item on the heap
+item = heappop(heap) # pops the smallest item from the heap
+item = heap[0]       # smallest item on the heap without popping it
+heapify(x)           # transforms list into a heap, in-place, in linear time
+item = heapreplace(heap, item) # pops and returns smallest item, and adds
+                               # new item; the heap size is unchanged
+
+Our API differs from textbook heap algorithms as follows:
+
+- We use 0-based indexing.  This makes the relationship between the
+  index for a node and the indexes for its children slightly less
+  obvious, but is more suitable since Python uses 0-based indexing.
+
+- Our heappop() method returns the smallest item, not the largest.
+
+These two make it possible to view the heap as a regular Python list
+without surprises: heap[0] is the smallest item, and heap.sort()
+maintains the heap invariant!
+"""
+
+# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
+
+"""Heap queues
+
+[explanation by François Pinard]
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0.  For the sake of comparison,
+non-existing elements are considered to be infinite.  The interesting
+property of a heap is that a[0] is always its smallest element.
+
+The strange invariant above is meant to be an efficient memory
+representation for a tournament.  The numbers below are `k', not a[k]:
+
+                                   0
+
+                  1                                 2
+
+          3               4                5               6
+
+      7       8       9       10      11      12      13      14
+
+    15 16   17 18   19 20   21 22   23 24   25 26   27 28   29 30
+
+
+In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'.  In
+an usual binary tournament we see in sports, each cell is the winner
+over the two cells it tops, and we can trace the winner down the tree
+to see all opponents s/he had.  However, in many computer applications
+of such tournaments, we do not need to trace the history of a winner.
+To be more memory efficient, when a winner is promoted, we try to
+replace it by something else at a lower level, and the rule becomes
+that a cell and the two cells it tops contain three different items,
+but the top cell "wins" over the two topped cells.
+
+If this heap invariant is protected at all time, index 0 is clearly
+the overall winner.  The simplest algorithmic way to remove it and
+find the "next" winner is to move some loser (let's say cell 30 in the
+diagram above) into the 0 position, and then percolate this new 0 down
+the tree, exchanging values, until the invariant is re-established.
+This is clearly logarithmic on the total number of items in the tree.
+By iterating over all items, you get an O(n ln n) sort.
+
+A nice feature of this sort is that you can efficiently insert new
+items while the sort is going on, provided that the inserted items are
+not "better" than the last 0'th element you extracted.  This is
+especially useful in simulation contexts, where the tree holds all
+incoming events, and the "win" condition means the smallest scheduled
+time.  When an event schedule other events for execution, they are
+scheduled into the future, so they can easily go into the heap.  So, a
+heap is a good structure for implementing schedulers (this is what I
+used for my MIDI sequencer :-).
+
+Various structures for implementing schedulers have been extensively
+studied, and heaps are good for this, as they are reasonably speedy,
+the speed is almost constant, and the worst case is not much different
+than the average case.  However, there are other representations which
+are more efficient overall, yet the worst cases might be terrible.
+
+Heaps are also very useful in big disk sorts.  You most probably all
+know that a big sort implies producing "runs" (which are pre-sorted
+sequences, which size is usually related to the amount of CPU memory),
+followed by a merging passes for these runs, which merging is often
+very cleverly organised[1].  It is very important that the initial
+sort produces the longest runs possible.  Tournaments are a good way
+to that.  If, using all the memory available to hold a tournament, you
+replace and percolate items that happen to fit the current run, you'll
+produce runs which are twice the size of the memory for random input,
+and much better for input fuzzily ordered.
+
+Moreover, if you output the 0'th item on disk and get an input which
+may not fit in the current tournament (because the value "wins" over
+the last output value), it cannot fit in the heap, so the size of the
+heap decreases.  The freed memory could be cleverly reused immediately
+for progressively building a second heap, which grows at exactly the
+same rate the first heap is melting.  When the first heap completely
+vanishes, you switch heaps and start a new run.  Clever and quite
+effective!
+
+In a word, heaps are useful memory structures to know.  I use them in
+a few applications, and I think it is good to keep a `heap' module
+around. :-)
+
+--------------------
+[1] The disk balancing algorithms which are current, nowadays, are
+more annoying than clever, and this is a consequence of the seeking
+capabilities of the disks.  On devices which cannot seek, like big
+tape drives, the story was quite different, and one had to be very
+clever to ensure (far in advance) that each tape movement will be the
+most effective possible (that is, will best participate at
+"progressing" the merge).  Some tapes were even able to read
+backwards, and this was also used to avoid the rewinding time.
+Believe me, real good tape sorts were quite spectacular to watch!
+From all times, sorting has always been a Great Art! :-)
+"""
+
+__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
+           'nlargest', 'nsmallest', 'heappushpop']
+
+#from itertools import count, tee, chain
+
+def heappush(heap, item):
+    """Push item onto heap, maintaining the heap invariant."""
+    heap.append(item)
+    _siftdown(heap, 0, len(heap)-1)
+
+def heappop(heap):
+    """Pop the smallest item off the heap, maintaining the heap invariant."""
+    lastelt = heap.pop()    # raises appropriate IndexError if heap is empty
+    if heap:
+        returnitem = heap[0]
+        heap[0] = lastelt
+        _siftup(heap, 0)
+    else:
+        returnitem = lastelt
+    return returnitem
+
+def heapreplace(heap, item):
+    """Pop and return the current smallest value, and add the new item.
+
+    This is more efficient than heappop() followed by heappush(), and can be
+    more appropriate when using a fixed-size heap.  Note that the value
+    returned may be larger than item!  That constrains reasonable uses of
+    this routine unless written as part of a conditional replacement:
+
+        if item > heap[0]:
+            item = heapreplace(heap, item)
+    """
+    returnitem = heap[0]    # raises appropriate IndexError if heap is empty
+    heap[0] = item
+    _siftup(heap, 0)
+    return returnitem
+
+def heappushpop(heap, item):
+    """Fast version of a heappush followed by a heappop."""
+    if heap and heap[0] < item:
+        item, heap[0] = heap[0], item
+        _siftup(heap, 0)
+    return item
+
+def heapify(x):
+    """Transform list into a heap, in-place, in O(len(x)) time."""
+    n = len(x)
+    # Transform bottom-up.  The largest index there's any point to looking at
+    # is the largest with a child index in-range, so must have 2*i + 1 < n,
+    # or i < (n-1)/2.  If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
+    # j-1 is the largest, which is n//2 - 1.  If n is odd = 2*j+1, this is
+    # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
+    for i in reversed(range(n//2)):
+        _siftup(x, i)
+
+def _heappushpop_max(heap, item):
+    """Maxheap version of a heappush followed by a heappop."""
+    if heap and item < heap[0]:
+        item, heap[0] = heap[0], item
+        _siftup_max(heap, 0)
+    return item
+
+def _heapify_max(x):
+    """Transform list into a maxheap, in-place, in O(len(x)) time."""
+    n = len(x)
+    for i in reversed(range(n//2)):
+        _siftup_max(x, i)
+
+def nlargest(n, iterable):
+    """Find the n largest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, reverse=True)[:n]
+    """
+    from itertools import islice, count, tee, chain
+    if n < 0:
+        return []
+    it = iter(iterable)
+    result = list(islice(it, n))
+    if not result:
+        return result
+    heapify(result)
+    _heappushpop = heappushpop
+    for elem in it:
+        _heappushpop(result, elem)
+    result.sort(reverse=True)
+    return result
+
+def nsmallest(n, iterable):
+    """Find the n smallest elements in a dataset.
+
+    Equivalent to:  sorted(iterable)[:n]
+    """
+    from itertools import islice, count, tee, chain
+    if n < 0:
+        return []
+    it = iter(iterable)
+    result = list(islice(it, n))
+    if not result:
+        return result
+    _heapify_max(result)
+    _heappushpop = _heappushpop_max
+    for elem in it:
+        _heappushpop(result, elem)
+    result.sort()
+    return result
+
+# 'heap' is a heap at all indices >= startpos, except possibly for pos.  pos
+# is the index of a leaf with a possibly out-of-order value.  Restore the
+# heap invariant.
+def _siftdown(heap, startpos, pos):
+    newitem = heap[pos]
+    # Follow the path to the root, moving parents down until finding a place
+    # newitem fits.
+    while pos > startpos:
+        parentpos = (pos - 1) >> 1
+        parent = heap[parentpos]
+        if newitem < parent:
+            heap[pos] = parent
+            pos = parentpos
+            continue
+        break
+    heap[pos] = newitem
+
+# The child indices of heap index pos are already heaps, and we want to make
+# a heap at index pos too.  We do this by bubbling the smaller child of
+# pos up (and so on with that child's children, etc) until hitting a leaf,
+# then using _siftdown to move the oddball originally at index pos into place.
+#
+# We *could* break out of the loop as soon as we find a pos where newitem <=
+# both its children, but turns out that's not a good idea, and despite that
+# many books write the algorithm that way.  During a heap pop, the last array
+# element is sifted in, and that tends to be large, so that comparing it
+# against values starting from the root usually doesn't pay (= usually doesn't
+# get us out of the loop early).  See Knuth, Volume 3, where this is
+# explained and quantified in an exercise.
+#
+# Cutting the # of comparisons is important, since these routines have no
+# way to extract "the priority" from an array element, so that intelligence
+# is likely to be hiding in custom comparison methods, or in array elements
+# storing (priority, record) tuples.  Comparisons are thus potentially
+# expensive.
+#
+# On random arrays of length 1000, making this change cut the number of
+# comparisons made by heapify() a little, and those made by exhaustive
+# heappop() a lot, in accord with theory.  Here are typical results from 3
+# runs (3 just to demonstrate how small the variance is):
+#
+# Compares needed by heapify     Compares needed by 1000 heappops
+# --------------------------     --------------------------------
+# 1837 cut to 1663               14996 cut to 8680
+# 1855 cut to 1659               14966 cut to 8678
+# 1847 cut to 1660               15024 cut to 8703
+#
+# Building the heap by using heappush() 1000 times instead required
+# 2198, 2148, and 2219 compares:  heapify() is more efficient, when
+# you can use it.
+#
+# The total compares needed by list.sort() on the same lists were 8627,
+# 8627, and 8632 (this should be compared to the sum of heapify() and
+# heappop() compares):  list.sort() is (unsurprisingly!) more efficient
+# for sorting.
+
+def _siftup(heap, pos):
+    endpos = len(heap)
+    startpos = pos
+    newitem = heap[pos]
+    # Bubble up the smaller child until hitting a leaf.
+    childpos = 2*pos + 1    # leftmost child position
+    while childpos < endpos:
+        # Set childpos to index of smaller child.
+        rightpos = childpos + 1
+        if rightpos < endpos and not heap[childpos] < heap[rightpos]:
+            childpos = rightpos
+        # Move the smaller child up.
+        heap[pos] = heap[childpos]
+        pos = childpos
+        childpos = 2*pos + 1
+    # The leaf at pos is empty now.  Put newitem there, and bubble it up
+    # to its final resting place (by sifting its parents down).
+    heap[pos] = newitem
+    _siftdown(heap, startpos, pos)
+
+def _siftdown_max(heap, startpos, pos):
+    'Maxheap variant of _siftdown'
+    newitem = heap[pos]
+    # Follow the path to the root, moving parents down until finding a place
+    # newitem fits.
+    while pos > startpos:
+        parentpos = (pos - 1) >> 1
+        parent = heap[parentpos]
+        if parent < newitem:
+            heap[pos] = parent
+            pos = parentpos
+            continue
+        break
+    heap[pos] = newitem
+
+def _siftup_max(heap, pos):
+    'Maxheap variant of _siftup'
+    endpos = len(heap)
+    startpos = pos
+    newitem = heap[pos]
+    # Bubble up the larger child until hitting a leaf.
+    childpos = 2*pos + 1    # leftmost child position
+    while childpos < endpos:
+        # Set childpos to index of larger child.
+        rightpos = childpos + 1
+        if rightpos < endpos and not heap[rightpos] < heap[childpos]:
+            childpos = rightpos
+        # Move the larger child up.
+        heap[pos] = heap[childpos]
+        pos = childpos
+        childpos = 2*pos + 1
+    # The leaf at pos is empty now.  Put newitem there, and bubble it up
+    # to its final resting place (by sifting its parents down).
+    heap[pos] = newitem
+    _siftdown_max(heap, startpos, pos)
+
+# If available, use C implementation
+try:
+    from _heapq import *
+except ImportError:
+    pass
+
+def merge(*iterables):
+    '''Merge multiple sorted inputs into a single sorted output.
+
+    Similar to sorted(itertools.chain(*iterables)) but returns a generator,
+    does not pull the data into memory all at once, and assumes that each of
+    the input streams is already sorted (smallest to largest).
+
+    >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
+    [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
+
+    '''
+    _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
+    _len = len
+
+    h = []
+    h_append = h.append
+    for itnum, it in enumerate(map(iter, iterables)):
+        try:
+            next = it.__next__
+            h_append([next(), itnum, next])
+        except _StopIteration:
+            pass
+    heapify(h)
+
+    while _len(h) > 1:
+        try:
+            while True:
+                v, itnum, next = s = h[0]
+                yield v
+                s[0] = next()               # raises StopIteration when exhausted
+                _heapreplace(h, s)          # restore heap condition
+        except _StopIteration:
+            _heappop(h)                     # remove empty iterator
+    if h:
+        # fast case when only a single iterator remains
+        v, itnum, next = h[0]
+        yield v
+        yield from next.__self__
+
+# Extend the implementations of nsmallest and nlargest to use a key= argument
+_nsmallest = nsmallest
+def nsmallest(n, iterable, key=None):
+    """Find the n smallest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, key=key)[:n]
+    """
+    from itertools import islice, count, tee, chain
+    # Short-cut for n==1 is to use min() when len(iterable)>0
+    if n == 1:
+        it = iter(iterable)
+        head = list(islice(it, 1))
+        if not head:
+            return []
+        if key is None:
+            return [min(chain(head, it))]
+        return [min(chain(head, it), key=key)]
+
+    # When n>=size, it's faster to use sorted()
+    try:
+        size = len(iterable)
+    except (TypeError, AttributeError):
+        pass
+    else:
+        if n >= size:
+            return sorted(iterable, key=key)[:n]
+
+    # When key is none, use simpler decoration
+    if key is None:
+        it = zip(iterable, count())                         # decorate
+        result = _nsmallest(n, it)
+        return [r[0] for r in result]                       # undecorate
+
+    # General case, slowest method
+    in1, in2 = tee(iterable)
+    it = zip(map(key, in1), count(), in2)                   # decorate
+    result = _nsmallest(n, it)
+    return [r[2] for r in result]                           # undecorate
+
+_nlargest = nlargest
+def nlargest(n, iterable, key=None):
+    """Find the n largest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, key=key, reverse=True)[:n]
+    """
+
+    from itertools import islice, count, tee, chain
+    # Short-cut for n==1 is to use max() when len(iterable)>0
+    if n == 1:
+        it = iter(iterable)
+        head = list(islice(it, 1))
+        if not head:
+            return []
+        if key is None:
+            return [max(chain(head, it))]
+        return [max(chain(head, it), key=key)]
+
+    # When n>=size, it's faster to use sorted()
+    try:
+        size = len(iterable)
+    except (TypeError, AttributeError):
+        pass
+    else:
+        if n >= size:
+            return sorted(iterable, key=key, reverse=True)[:n]
+
+    # When key is none, use simpler decoration
+    if key is None:
+        it = zip(iterable, count(0,-1))                     # decorate
+        result = _nlargest(n, it)
+        return [r[0] for r in result]                       # undecorate
+
+    # General case, slowest method
+    in1, in2 = tee(iterable)
+    it = zip(map(key, in1), count(0,-1), in2)               # decorate
+    result = _nlargest(n, it)
+    return [r[2] for r in result]                           # undecorate
+
+if __name__ == "__main__":
+    # Simple sanity test
+    heap = []
+    data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
+    for item in data:
+        heappush(heap, item)
+    sort = []
+    while heap:
+        sort.append(heappop(heap))
+    print(sort)
+
+    import doctest
+    doctest.testmod()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hmac.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hmac.py
new file mode 100644
index 00000000..af1feade
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/hmac.py
@@ -0,0 +1,149 @@
+"""HMAC (Keyed-Hashing for Message Authentication) Python module.
+
+Implements the HMAC algorithm as described by RFC 2104.
+"""
+
+import warnings as _warnings
+#from _operator import _compare_digest as compare_digest
+import hashlib as _hashlib
+PendingDeprecationWarning = None
+RuntimeWarning = None
+
+trans_5C = bytes((x ^ 0x5C) for x in range(256))
+trans_36 = bytes((x ^ 0x36) for x in range(256))
+
+def translate(d, t):
+    return bytes(t[x] for x in d)
+
+# The size of the digests returned by HMAC depends on the underlying
+# hashing module used.  Use digest_size from the instance of HMAC instead.
+digest_size = None
+
+
+
+class HMAC:
+    """RFC 2104 HMAC class.  Also complies with RFC 4231.
+
+    This supports the API for Cryptographic Hash Functions (PEP 247).
+    """
+    blocksize = 64  # 512-bit HMAC; can be changed in subclasses.
+
+    def __init__(self, key, msg = None, digestmod = None):
+        """Create a new HMAC object.
+
+        key:       key for the keyed hash object.
+        msg:       Initial input for the hash, if provided.
+        digestmod: A module supporting PEP 247.  *OR*
+                   A hashlib constructor returning a new hash object. *OR*
+                   A hash name suitable for hashlib.new().
+                   Defaults to hashlib.md5.
+                   Implicit default to hashlib.md5 is deprecated and will be
+                   removed in Python 3.6.
+
+        Note: key and msg must be a bytes or bytearray objects.
+        """
+
+        if not isinstance(key, (bytes, bytearray)):
+            raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
+
+        if digestmod is None:
+            _warnings.warn("HMAC() without an explicit digestmod argument "
+                           "is deprecated.", PendingDeprecationWarning, 2)
+            digestmod = _hashlib.md5
+
+        if callable(digestmod):
+            self.digest_cons = digestmod
+        elif isinstance(digestmod, str):
+            self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
+        else:
+            self.digest_cons = lambda d=b'': digestmod.new(d)
+
+        self.outer = self.digest_cons()
+        self.inner = self.digest_cons()
+        self.digest_size = self.inner.digest_size
+
+        if hasattr(self.inner, 'block_size'):
+            blocksize = self.inner.block_size
+            if blocksize < 16:
+                _warnings.warn('block_size of %d seems too small; using our '
+                               'default of %d.' % (blocksize, self.blocksize),
+                               RuntimeWarning, 2)
+                blocksize = self.blocksize
+        else:
+            _warnings.warn('No block_size attribute on given digest object; '
+                           'Assuming %d.' % (self.blocksize),
+                           RuntimeWarning, 2)
+            blocksize = self.blocksize
+
+        # self.blocksize is the default blocksize. self.block_size is
+        # effective block size as well as the public API attribute.
+        self.block_size = blocksize
+
+        if len(key) > blocksize:
+            key = self.digest_cons(key).digest()
+
+        key = key + bytes(blocksize - len(key))
+        self.outer.update(translate(key, trans_5C))
+        self.inner.update(translate(key, trans_36))
+        if msg is not None:
+            self.update(msg)
+
+    @property
+    def name(self):
+        return "hmac-" + self.inner.name
+
+    def update(self, msg):
+        """Update this hashing object with the string msg.
+        """
+        self.inner.update(msg)
+
+    def copy(self):
+        """Return a separate copy of this hashing object.
+
+        An update to this copy won't affect the original object.
+        """
+        # Call __new__ directly to avoid the expensive __init__.
+        other = self.__class__.__new__(self.__class__)
+        other.digest_cons = self.digest_cons
+        other.digest_size = self.digest_size
+        other.inner = self.inner.copy()
+        other.outer = self.outer.copy()
+        return other
+
+    def _current(self):
+        """Return a hash object for the current state.
+
+        To be used only internally with digest() and hexdigest().
+        """
+        h = self.outer.copy()
+        h.update(self.inner.digest())
+        return h
+
+    def digest(self):
+        """Return the hash value of this hashing object.
+
+        This returns a string containing 8-bit data.  The object is
+        not altered in any way by this function; you can continue
+        updating the object after calling this function.
+        """
+        h = self._current()
+        return h.digest()
+
+    def hexdigest(self):
+        """Like digest(), but returns a string of hexadecimal digits instead.
+        """
+        h = self._current()
+        return h.hexdigest()
+
+def new(key, msg = None, digestmod = None):
+    """Create a new hashing object and return it.
+
+    key: The starting key for the hash.
+    msg: if available, will immediately be hashed into the object's starting
+    state.
+
+    You can now feed arbitrary strings into the object using its update()
+    method, and can ask for the hash value at any time by calling its digest()
+    method.
+    """
+    return HMAC(key, msg, digestmod)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/__init__.py
new file mode 100644
index 00000000..0422cb51
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/__init__.py
@@ -0,0 +1,22 @@
+"""
+General functions for HTML manipulation.
+"""
+
+
+_escape_map = {ord('&'): '&amp;', ord('<'): '&lt;', ord('>'): '&gt;'}
+_escape_map_full = {ord('&'): '&amp;', ord('<'): '&lt;', ord('>'): '&gt;',
+                    ord('"'): '&quot;', ord('\''): '&#x27;'}
+
+# NB: this is a candidate for a bytes/string polymorphic interface
+
+def escape(s, quote=True):
+    """
+    Replace special characters "&", "<" and ">" to HTML-safe sequences.
+    If the optional flag quote is true (the default), the quotation mark
+    characters, both double quote (") and single quote (') characters are also
+    translated.
+    """
+    import string
+    if quote:
+        return string.translate(s, _escape_map_full)
+    return string.translate(s, _escape_map)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/entities.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/entities.py
new file mode 100644
index 00000000..e891ad65
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/entities.py
@@ -0,0 +1,2506 @@
+"""HTML character entity references."""
+
+# maps the HTML entity name to the Unicode codepoint
+name2codepoint = {
+    'AElig':    0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
+    'Aacute':   0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
+    'Acirc':    0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
+    'Agrave':   0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
+    'Alpha':    0x0391, # greek capital letter alpha, U+0391
+    'Aring':    0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
+    'Atilde':   0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
+    'Auml':     0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
+    'Beta':     0x0392, # greek capital letter beta, U+0392
+    'Ccedil':   0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
+    'Chi':      0x03a7, # greek capital letter chi, U+03A7
+    'Dagger':   0x2021, # double dagger, U+2021 ISOpub
+    'Delta':    0x0394, # greek capital letter delta, U+0394 ISOgrk3
+    'ETH':      0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
+    'Eacute':   0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
+    'Ecirc':    0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
+    'Egrave':   0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
+    'Epsilon':  0x0395, # greek capital letter epsilon, U+0395
+    'Eta':      0x0397, # greek capital letter eta, U+0397
+    'Euml':     0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
+    'Gamma':    0x0393, # greek capital letter gamma, U+0393 ISOgrk3
+    'Iacute':   0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
+    'Icirc':    0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
+    'Igrave':   0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
+    'Iota':     0x0399, # greek capital letter iota, U+0399
+    'Iuml':     0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
+    'Kappa':    0x039a, # greek capital letter kappa, U+039A
+    'Lambda':   0x039b, # greek capital letter lambda, U+039B ISOgrk3
+    'Mu':       0x039c, # greek capital letter mu, U+039C
+    'Ntilde':   0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
+    'Nu':       0x039d, # greek capital letter nu, U+039D
+    'OElig':    0x0152, # latin capital ligature OE, U+0152 ISOlat2
+    'Oacute':   0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
+    'Ocirc':    0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
+    'Ograve':   0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
+    'Omega':    0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
+    'Omicron':  0x039f, # greek capital letter omicron, U+039F
+    'Oslash':   0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
+    'Otilde':   0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
+    'Ouml':     0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
+    'Phi':      0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
+    'Pi':       0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
+    'Prime':    0x2033, # double prime = seconds = inches, U+2033 ISOtech
+    'Psi':      0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
+    'Rho':      0x03a1, # greek capital letter rho, U+03A1
+    'Scaron':   0x0160, # latin capital letter S with caron, U+0160 ISOlat2
+    'Sigma':    0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
+    'THORN':    0x00de, # latin capital letter THORN, U+00DE ISOlat1
+    'Tau':      0x03a4, # greek capital letter tau, U+03A4
+    'Theta':    0x0398, # greek capital letter theta, U+0398 ISOgrk3
+    'Uacute':   0x00da, # latin capital letter U with acute, U+00DA ISOlat1
+    'Ucirc':    0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
+    'Ugrave':   0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
+    'Upsilon':  0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
+    'Uuml':     0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
+    'Xi':       0x039e, # greek capital letter xi, U+039E ISOgrk3
+    'Yacute':   0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
+    'Yuml':     0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
+    'Zeta':     0x0396, # greek capital letter zeta, U+0396
+    'aacute':   0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
+    'acirc':    0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
+    'acute':    0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
+    'aelig':    0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
+    'agrave':   0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
+    'alefsym':  0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
+    'alpha':    0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
+    'amp':      0x0026, # ampersand, U+0026 ISOnum
+    'and':      0x2227, # logical and = wedge, U+2227 ISOtech
+    'ang':      0x2220, # angle, U+2220 ISOamso
+    'aring':    0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
+    'asymp':    0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
+    'atilde':   0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
+    'auml':     0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
+    'bdquo':    0x201e, # double low-9 quotation mark, U+201E NEW
+    'beta':     0x03b2, # greek small letter beta, U+03B2 ISOgrk3
+    'brvbar':   0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
+    'bull':     0x2022, # bullet = black small circle, U+2022 ISOpub
+    'cap':      0x2229, # intersection = cap, U+2229 ISOtech
+    'ccedil':   0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
+    'cedil':    0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
+    'cent':     0x00a2, # cent sign, U+00A2 ISOnum
+    'chi':      0x03c7, # greek small letter chi, U+03C7 ISOgrk3
+    'circ':     0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
+    'clubs':    0x2663, # black club suit = shamrock, U+2663 ISOpub
+    'cong':     0x2245, # approximately equal to, U+2245 ISOtech
+    'copy':     0x00a9, # copyright sign, U+00A9 ISOnum
+    'crarr':    0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
+    'cup':      0x222a, # union = cup, U+222A ISOtech
+    'curren':   0x00a4, # currency sign, U+00A4 ISOnum
+    'dArr':     0x21d3, # downwards double arrow, U+21D3 ISOamsa
+    'dagger':   0x2020, # dagger, U+2020 ISOpub
+    'darr':     0x2193, # downwards arrow, U+2193 ISOnum
+    'deg':      0x00b0, # degree sign, U+00B0 ISOnum
+    'delta':    0x03b4, # greek small letter delta, U+03B4 ISOgrk3
+    'diams':    0x2666, # black diamond suit, U+2666 ISOpub
+    'divide':   0x00f7, # division sign, U+00F7 ISOnum
+    'eacute':   0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
+    'ecirc':    0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
+    'egrave':   0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
+    'empty':    0x2205, # empty set = null set = diameter, U+2205 ISOamso
+    'emsp':     0x2003, # em space, U+2003 ISOpub
+    'ensp':     0x2002, # en space, U+2002 ISOpub
+    'epsilon':  0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
+    'equiv':    0x2261, # identical to, U+2261 ISOtech
+    'eta':      0x03b7, # greek small letter eta, U+03B7 ISOgrk3
+    'eth':      0x00f0, # latin small letter eth, U+00F0 ISOlat1
+    'euml':     0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
+    'euro':     0x20ac, # euro sign, U+20AC NEW
+    'exist':    0x2203, # there exists, U+2203 ISOtech
+    'fnof':     0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
+    'forall':   0x2200, # for all, U+2200 ISOtech
+    'frac12':   0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
+    'frac14':   0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
+    'frac34':   0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
+    'frasl':    0x2044, # fraction slash, U+2044 NEW
+    'gamma':    0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
+    'ge':       0x2265, # greater-than or equal to, U+2265 ISOtech
+    'gt':       0x003e, # greater-than sign, U+003E ISOnum
+    'hArr':     0x21d4, # left right double arrow, U+21D4 ISOamsa
+    'harr':     0x2194, # left right arrow, U+2194 ISOamsa
+    'hearts':   0x2665, # black heart suit = valentine, U+2665 ISOpub
+    'hellip':   0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
+    'iacute':   0x00ed, # latin small letter i with acute, U+00ED ISOlat1
+    'icirc':    0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
+    'iexcl':    0x00a1, # inverted exclamation mark, U+00A1 ISOnum
+    'igrave':   0x00ec, # latin small letter i with grave, U+00EC ISOlat1
+    'image':    0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
+    'infin':    0x221e, # infinity, U+221E ISOtech
+    'int':      0x222b, # integral, U+222B ISOtech
+    'iota':     0x03b9, # greek small letter iota, U+03B9 ISOgrk3
+    'iquest':   0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
+    'isin':     0x2208, # element of, U+2208 ISOtech
+    'iuml':     0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
+    'kappa':    0x03ba, # greek small letter kappa, U+03BA ISOgrk3
+    'lArr':     0x21d0, # leftwards double arrow, U+21D0 ISOtech
+    'lambda':   0x03bb, # greek small letter lambda, U+03BB ISOgrk3
+    'lang':     0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
+    'laquo':    0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
+    'larr':     0x2190, # leftwards arrow, U+2190 ISOnum
+    'lceil':    0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
+    'ldquo':    0x201c, # left double quotation mark, U+201C ISOnum
+    'le':       0x2264, # less-than or equal to, U+2264 ISOtech
+    'lfloor':   0x230a, # left floor = apl downstile, U+230A ISOamsc
+    'lowast':   0x2217, # asterisk operator, U+2217 ISOtech
+    'loz':      0x25ca, # lozenge, U+25CA ISOpub
+    'lrm':      0x200e, # left-to-right mark, U+200E NEW RFC 2070
+    'lsaquo':   0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
+    'lsquo':    0x2018, # left single quotation mark, U+2018 ISOnum
+    'lt':       0x003c, # less-than sign, U+003C ISOnum
+    'macr':     0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
+    'mdash':    0x2014, # em dash, U+2014 ISOpub
+    'micro':    0x00b5, # micro sign, U+00B5 ISOnum
+    'middot':   0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
+    'minus':    0x2212, # minus sign, U+2212 ISOtech
+    'mu':       0x03bc, # greek small letter mu, U+03BC ISOgrk3
+    'nabla':    0x2207, # nabla = backward difference, U+2207 ISOtech
+    'nbsp':     0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
+    'ndash':    0x2013, # en dash, U+2013 ISOpub
+    'ne':       0x2260, # not equal to, U+2260 ISOtech
+    'ni':       0x220b, # contains as member, U+220B ISOtech
+    'not':      0x00ac, # not sign, U+00AC ISOnum
+    'notin':    0x2209, # not an element of, U+2209 ISOtech
+    'nsub':     0x2284, # not a subset of, U+2284 ISOamsn
+    'ntilde':   0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
+    'nu':       0x03bd, # greek small letter nu, U+03BD ISOgrk3
+    'oacute':   0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
+    'ocirc':    0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
+    'oelig':    0x0153, # latin small ligature oe, U+0153 ISOlat2
+    'ograve':   0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
+    'oline':    0x203e, # overline = spacing overscore, U+203E NEW
+    'omega':    0x03c9, # greek small letter omega, U+03C9 ISOgrk3
+    'omicron':  0x03bf, # greek small letter omicron, U+03BF NEW
+    'oplus':    0x2295, # circled plus = direct sum, U+2295 ISOamsb
+    'or':       0x2228, # logical or = vee, U+2228 ISOtech
+    'ordf':     0x00aa, # feminine ordinal indicator, U+00AA ISOnum
+    'ordm':     0x00ba, # masculine ordinal indicator, U+00BA ISOnum
+    'oslash':   0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
+    'otilde':   0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
+    'otimes':   0x2297, # circled times = vector product, U+2297 ISOamsb
+    'ouml':     0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
+    'para':     0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
+    'part':     0x2202, # partial differential, U+2202 ISOtech
+    'permil':   0x2030, # per mille sign, U+2030 ISOtech
+    'perp':     0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
+    'phi':      0x03c6, # greek small letter phi, U+03C6 ISOgrk3
+    'pi':       0x03c0, # greek small letter pi, U+03C0 ISOgrk3
+    'piv':      0x03d6, # greek pi symbol, U+03D6 ISOgrk3
+    'plusmn':   0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
+    'pound':    0x00a3, # pound sign, U+00A3 ISOnum
+    'prime':    0x2032, # prime = minutes = feet, U+2032 ISOtech
+    'prod':     0x220f, # n-ary product = product sign, U+220F ISOamsb
+    'prop':     0x221d, # proportional to, U+221D ISOtech
+    'psi':      0x03c8, # greek small letter psi, U+03C8 ISOgrk3
+    'quot':     0x0022, # quotation mark = APL quote, U+0022 ISOnum
+    'rArr':     0x21d2, # rightwards double arrow, U+21D2 ISOtech
+    'radic':    0x221a, # square root = radical sign, U+221A ISOtech
+    'rang':     0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
+    'raquo':    0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
+    'rarr':     0x2192, # rightwards arrow, U+2192 ISOnum
+    'rceil':    0x2309, # right ceiling, U+2309 ISOamsc
+    'rdquo':    0x201d, # right double quotation mark, U+201D ISOnum
+    'real':     0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
+    'reg':      0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
+    'rfloor':   0x230b, # right floor, U+230B ISOamsc
+    'rho':      0x03c1, # greek small letter rho, U+03C1 ISOgrk3
+    'rlm':      0x200f, # right-to-left mark, U+200F NEW RFC 2070
+    'rsaquo':   0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
+    'rsquo':    0x2019, # right single quotation mark, U+2019 ISOnum
+    'sbquo':    0x201a, # single low-9 quotation mark, U+201A NEW
+    'scaron':   0x0161, # latin small letter s with caron, U+0161 ISOlat2
+    'sdot':     0x22c5, # dot operator, U+22C5 ISOamsb
+    'sect':     0x00a7, # section sign, U+00A7 ISOnum
+    'shy':      0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
+    'sigma':    0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
+    'sigmaf':   0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
+    'sim':      0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
+    'spades':   0x2660, # black spade suit, U+2660 ISOpub
+    'sub':      0x2282, # subset of, U+2282 ISOtech
+    'sube':     0x2286, # subset of or equal to, U+2286 ISOtech
+    'sum':      0x2211, # n-ary sumation, U+2211 ISOamsb
+    'sup':      0x2283, # superset of, U+2283 ISOtech
+    'sup1':     0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
+    'sup2':     0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
+    'sup3':     0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
+    'supe':     0x2287, # superset of or equal to, U+2287 ISOtech
+    'szlig':    0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
+    'tau':      0x03c4, # greek small letter tau, U+03C4 ISOgrk3
+    'there4':   0x2234, # therefore, U+2234 ISOtech
+    'theta':    0x03b8, # greek small letter theta, U+03B8 ISOgrk3
+    'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
+    'thinsp':   0x2009, # thin space, U+2009 ISOpub
+    'thorn':    0x00fe, # latin small letter thorn with, U+00FE ISOlat1
+    'tilde':    0x02dc, # small tilde, U+02DC ISOdia
+    'times':    0x00d7, # multiplication sign, U+00D7 ISOnum
+    'trade':    0x2122, # trade mark sign, U+2122 ISOnum
+    'uArr':     0x21d1, # upwards double arrow, U+21D1 ISOamsa
+    'uacute':   0x00fa, # latin small letter u with acute, U+00FA ISOlat1
+    'uarr':     0x2191, # upwards arrow, U+2191 ISOnum
+    'ucirc':    0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
+    'ugrave':   0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
+    'uml':      0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
+    'upsih':    0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
+    'upsilon':  0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
+    'uuml':     0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
+    'weierp':   0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
+    'xi':       0x03be, # greek small letter xi, U+03BE ISOgrk3
+    'yacute':   0x00fd, # latin small letter y with acute, U+00FD ISOlat1
+    'yen':      0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
+    'yuml':     0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
+    'zeta':     0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
+    'zwj':      0x200d, # zero width joiner, U+200D NEW RFC 2070
+    'zwnj':     0x200c, # zero width non-joiner, U+200C NEW RFC 2070
+}
+
+
+# maps the HTML5 named character references to the equivalent Unicode character(s)
+html5 = {
+    'Aacute': '\xc1',
+    'aacute': '\xe1',
+    'Aacute;': '\xc1',
+    'aacute;': '\xe1',
+    'Abreve;': '\u0102',
+    'abreve;': '\u0103',
+    'ac;': '\u223e',
+    'acd;': '\u223f',
+    'acE;': '\u223e\u0333',
+    'Acirc': '\xc2',
+    'acirc': '\xe2',
+    'Acirc;': '\xc2',
+    'acirc;': '\xe2',
+    'acute': '\xb4',
+    'acute;': '\xb4',
+    'Acy;': '\u0410',
+    'acy;': '\u0430',
+    'AElig': '\xc6',
+    'aelig': '\xe6',
+    'AElig;': '\xc6',
+    'aelig;': '\xe6',
+    'af;': '\u2061',
+    'Afr;': '\U0001d504',
+    'afr;': '\U0001d51e',
+    'Agrave': '\xc0',
+    'agrave': '\xe0',
+    'Agrave;': '\xc0',
+    'agrave;': '\xe0',
+    'alefsym;': '\u2135',
+    'aleph;': '\u2135',
+    'Alpha;': '\u0391',
+    'alpha;': '\u03b1',
+    'Amacr;': '\u0100',
+    'amacr;': '\u0101',
+    'amalg;': '\u2a3f',
+    'AMP': '&',
+    'amp': '&',
+    'AMP;': '&',
+    'amp;': '&',
+    'And;': '\u2a53',
+    'and;': '\u2227',
+    'andand;': '\u2a55',
+    'andd;': '\u2a5c',
+    'andslope;': '\u2a58',
+    'andv;': '\u2a5a',
+    'ang;': '\u2220',
+    'ange;': '\u29a4',
+    'angle;': '\u2220',
+    'angmsd;': '\u2221',
+    'angmsdaa;': '\u29a8',
+    'angmsdab;': '\u29a9',
+    'angmsdac;': '\u29aa',
+    'angmsdad;': '\u29ab',
+    'angmsdae;': '\u29ac',
+    'angmsdaf;': '\u29ad',
+    'angmsdag;': '\u29ae',
+    'angmsdah;': '\u29af',
+    'angrt;': '\u221f',
+    'angrtvb;': '\u22be',
+    'angrtvbd;': '\u299d',
+    'angsph;': '\u2222',
+    'angst;': '\xc5',
+    'angzarr;': '\u237c',
+    'Aogon;': '\u0104',
+    'aogon;': '\u0105',
+    'Aopf;': '\U0001d538',
+    'aopf;': '\U0001d552',
+    'ap;': '\u2248',
+    'apacir;': '\u2a6f',
+    'apE;': '\u2a70',
+    'ape;': '\u224a',
+    'apid;': '\u224b',
+    'apos;': "'",
+    'ApplyFunction;': '\u2061',
+    'approx;': '\u2248',
+    'approxeq;': '\u224a',
+    'Aring': '\xc5',
+    'aring': '\xe5',
+    'Aring;': '\xc5',
+    'aring;': '\xe5',
+    'Ascr;': '\U0001d49c',
+    'ascr;': '\U0001d4b6',
+    'Assign;': '\u2254',
+    'ast;': '*',
+    'asymp;': '\u2248',
+    'asympeq;': '\u224d',
+    'Atilde': '\xc3',
+    'atilde': '\xe3',
+    'Atilde;': '\xc3',
+    'atilde;': '\xe3',
+    'Auml': '\xc4',
+    'auml': '\xe4',
+    'Auml;': '\xc4',
+    'auml;': '\xe4',
+    'awconint;': '\u2233',
+    'awint;': '\u2a11',
+    'backcong;': '\u224c',
+    'backepsilon;': '\u03f6',
+    'backprime;': '\u2035',
+    'backsim;': '\u223d',
+    'backsimeq;': '\u22cd',
+    'Backslash;': '\u2216',
+    'Barv;': '\u2ae7',
+    'barvee;': '\u22bd',
+    'Barwed;': '\u2306',
+    'barwed;': '\u2305',
+    'barwedge;': '\u2305',
+    'bbrk;': '\u23b5',
+    'bbrktbrk;': '\u23b6',
+    'bcong;': '\u224c',
+    'Bcy;': '\u0411',
+    'bcy;': '\u0431',
+    'bdquo;': '\u201e',
+    'becaus;': '\u2235',
+    'Because;': '\u2235',
+    'because;': '\u2235',
+    'bemptyv;': '\u29b0',
+    'bepsi;': '\u03f6',
+    'bernou;': '\u212c',
+    'Bernoullis;': '\u212c',
+    'Beta;': '\u0392',
+    'beta;': '\u03b2',
+    'beth;': '\u2136',
+    'between;': '\u226c',
+    'Bfr;': '\U0001d505',
+    'bfr;': '\U0001d51f',
+    'bigcap;': '\u22c2',
+    'bigcirc;': '\u25ef',
+    'bigcup;': '\u22c3',
+    'bigodot;': '\u2a00',
+    'bigoplus;': '\u2a01',
+    'bigotimes;': '\u2a02',
+    'bigsqcup;': '\u2a06',
+    'bigstar;': '\u2605',
+    'bigtriangledown;': '\u25bd',
+    'bigtriangleup;': '\u25b3',
+    'biguplus;': '\u2a04',
+    'bigvee;': '\u22c1',
+    'bigwedge;': '\u22c0',
+    'bkarow;': '\u290d',
+    'blacklozenge;': '\u29eb',
+    'blacksquare;': '\u25aa',
+    'blacktriangle;': '\u25b4',
+    'blacktriangledown;': '\u25be',
+    'blacktriangleleft;': '\u25c2',
+    'blacktriangleright;': '\u25b8',
+    'blank;': '\u2423',
+    'blk12;': '\u2592',
+    'blk14;': '\u2591',
+    'blk34;': '\u2593',
+    'block;': '\u2588',
+    'bne;': '=\u20e5',
+    'bnequiv;': '\u2261\u20e5',
+    'bNot;': '\u2aed',
+    'bnot;': '\u2310',
+    'Bopf;': '\U0001d539',
+    'bopf;': '\U0001d553',
+    'bot;': '\u22a5',
+    'bottom;': '\u22a5',
+    'bowtie;': '\u22c8',
+    'boxbox;': '\u29c9',
+    'boxDL;': '\u2557',
+    'boxDl;': '\u2556',
+    'boxdL;': '\u2555',
+    'boxdl;': '\u2510',
+    'boxDR;': '\u2554',
+    'boxDr;': '\u2553',
+    'boxdR;': '\u2552',
+    'boxdr;': '\u250c',
+    'boxH;': '\u2550',
+    'boxh;': '\u2500',
+    'boxHD;': '\u2566',
+    'boxHd;': '\u2564',
+    'boxhD;': '\u2565',
+    'boxhd;': '\u252c',
+    'boxHU;': '\u2569',
+    'boxHu;': '\u2567',
+    'boxhU;': '\u2568',
+    'boxhu;': '\u2534',
+    'boxminus;': '\u229f',
+    'boxplus;': '\u229e',
+    'boxtimes;': '\u22a0',
+    'boxUL;': '\u255d',
+    'boxUl;': '\u255c',
+    'boxuL;': '\u255b',
+    'boxul;': '\u2518',
+    'boxUR;': '\u255a',
+    'boxUr;': '\u2559',
+    'boxuR;': '\u2558',
+    'boxur;': '\u2514',
+    'boxV;': '\u2551',
+    'boxv;': '\u2502',
+    'boxVH;': '\u256c',
+    'boxVh;': '\u256b',
+    'boxvH;': '\u256a',
+    'boxvh;': '\u253c',
+    'boxVL;': '\u2563',
+    'boxVl;': '\u2562',
+    'boxvL;': '\u2561',
+    'boxvl;': '\u2524',
+    'boxVR;': '\u2560',
+    'boxVr;': '\u255f',
+    'boxvR;': '\u255e',
+    'boxvr;': '\u251c',
+    'bprime;': '\u2035',
+    'Breve;': '\u02d8',
+    'breve;': '\u02d8',
+    'brvbar': '\xa6',
+    'brvbar;': '\xa6',
+    'Bscr;': '\u212c',
+    'bscr;': '\U0001d4b7',
+    'bsemi;': '\u204f',
+    'bsim;': '\u223d',
+    'bsime;': '\u22cd',
+    'bsol;': '\\',
+    'bsolb;': '\u29c5',
+    'bsolhsub;': '\u27c8',
+    'bull;': '\u2022',
+    'bullet;': '\u2022',
+    'bump;': '\u224e',
+    'bumpE;': '\u2aae',
+    'bumpe;': '\u224f',
+    'Bumpeq;': '\u224e',
+    'bumpeq;': '\u224f',
+    'Cacute;': '\u0106',
+    'cacute;': '\u0107',
+    'Cap;': '\u22d2',
+    'cap;': '\u2229',
+    'capand;': '\u2a44',
+    'capbrcup;': '\u2a49',
+    'capcap;': '\u2a4b',
+    'capcup;': '\u2a47',
+    'capdot;': '\u2a40',
+    'CapitalDifferentialD;': '\u2145',
+    'caps;': '\u2229\ufe00',
+    'caret;': '\u2041',
+    'caron;': '\u02c7',
+    'Cayleys;': '\u212d',
+    'ccaps;': '\u2a4d',
+    'Ccaron;': '\u010c',
+    'ccaron;': '\u010d',
+    'Ccedil': '\xc7',
+    'ccedil': '\xe7',
+    'Ccedil;': '\xc7',
+    'ccedil;': '\xe7',
+    'Ccirc;': '\u0108',
+    'ccirc;': '\u0109',
+    'Cconint;': '\u2230',
+    'ccups;': '\u2a4c',
+    'ccupssm;': '\u2a50',
+    'Cdot;': '\u010a',
+    'cdot;': '\u010b',
+    'cedil': '\xb8',
+    'cedil;': '\xb8',
+    'Cedilla;': '\xb8',
+    'cemptyv;': '\u29b2',
+    'cent': '\xa2',
+    'cent;': '\xa2',
+    'CenterDot;': '\xb7',
+    'centerdot;': '\xb7',
+    'Cfr;': '\u212d',
+    'cfr;': '\U0001d520',
+    'CHcy;': '\u0427',
+    'chcy;': '\u0447',
+    'check;': '\u2713',
+    'checkmark;': '\u2713',
+    'Chi;': '\u03a7',
+    'chi;': '\u03c7',
+    'cir;': '\u25cb',
+    'circ;': '\u02c6',
+    'circeq;': '\u2257',
+    'circlearrowleft;': '\u21ba',
+    'circlearrowright;': '\u21bb',
+    'circledast;': '\u229b',
+    'circledcirc;': '\u229a',
+    'circleddash;': '\u229d',
+    'CircleDot;': '\u2299',
+    'circledR;': '\xae',
+    'circledS;': '\u24c8',
+    'CircleMinus;': '\u2296',
+    'CirclePlus;': '\u2295',
+    'CircleTimes;': '\u2297',
+    'cirE;': '\u29c3',
+    'cire;': '\u2257',
+    'cirfnint;': '\u2a10',
+    'cirmid;': '\u2aef',
+    'cirscir;': '\u29c2',
+    'ClockwiseContourIntegral;': '\u2232',
+    'CloseCurlyDoubleQuote;': '\u201d',
+    'CloseCurlyQuote;': '\u2019',
+    'clubs;': '\u2663',
+    'clubsuit;': '\u2663',
+    'Colon;': '\u2237',
+    'colon;': ':',
+    'Colone;': '\u2a74',
+    'colone;': '\u2254',
+    'coloneq;': '\u2254',
+    'comma;': ',',
+    'commat;': '@',
+    'comp;': '\u2201',
+    'compfn;': '\u2218',
+    'complement;': '\u2201',
+    'complexes;': '\u2102',
+    'cong;': '\u2245',
+    'congdot;': '\u2a6d',
+    'Congruent;': '\u2261',
+    'Conint;': '\u222f',
+    'conint;': '\u222e',
+    'ContourIntegral;': '\u222e',
+    'Copf;': '\u2102',
+    'copf;': '\U0001d554',
+    'coprod;': '\u2210',
+    'Coproduct;': '\u2210',
+    'COPY': '\xa9',
+    'copy': '\xa9',
+    'COPY;': '\xa9',
+    'copy;': '\xa9',
+    'copysr;': '\u2117',
+    'CounterClockwiseContourIntegral;': '\u2233',
+    'crarr;': '\u21b5',
+    'Cross;': '\u2a2f',
+    'cross;': '\u2717',
+    'Cscr;': '\U0001d49e',
+    'cscr;': '\U0001d4b8',
+    'csub;': '\u2acf',
+    'csube;': '\u2ad1',
+    'csup;': '\u2ad0',
+    'csupe;': '\u2ad2',
+    'ctdot;': '\u22ef',
+    'cudarrl;': '\u2938',
+    'cudarrr;': '\u2935',
+    'cuepr;': '\u22de',
+    'cuesc;': '\u22df',
+    'cularr;': '\u21b6',
+    'cularrp;': '\u293d',
+    'Cup;': '\u22d3',
+    'cup;': '\u222a',
+    'cupbrcap;': '\u2a48',
+    'CupCap;': '\u224d',
+    'cupcap;': '\u2a46',
+    'cupcup;': '\u2a4a',
+    'cupdot;': '\u228d',
+    'cupor;': '\u2a45',
+    'cups;': '\u222a\ufe00',
+    'curarr;': '\u21b7',
+    'curarrm;': '\u293c',
+    'curlyeqprec;': '\u22de',
+    'curlyeqsucc;': '\u22df',
+    'curlyvee;': '\u22ce',
+    'curlywedge;': '\u22cf',
+    'curren': '\xa4',
+    'curren;': '\xa4',
+    'curvearrowleft;': '\u21b6',
+    'curvearrowright;': '\u21b7',
+    'cuvee;': '\u22ce',
+    'cuwed;': '\u22cf',
+    'cwconint;': '\u2232',
+    'cwint;': '\u2231',
+    'cylcty;': '\u232d',
+    'Dagger;': '\u2021',
+    'dagger;': '\u2020',
+    'daleth;': '\u2138',
+    'Darr;': '\u21a1',
+    'dArr;': '\u21d3',
+    'darr;': '\u2193',
+    'dash;': '\u2010',
+    'Dashv;': '\u2ae4',
+    'dashv;': '\u22a3',
+    'dbkarow;': '\u290f',
+    'dblac;': '\u02dd',
+    'Dcaron;': '\u010e',
+    'dcaron;': '\u010f',
+    'Dcy;': '\u0414',
+    'dcy;': '\u0434',
+    'DD;': '\u2145',
+    'dd;': '\u2146',
+    'ddagger;': '\u2021',
+    'ddarr;': '\u21ca',
+    'DDotrahd;': '\u2911',
+    'ddotseq;': '\u2a77',
+    'deg': '\xb0',
+    'deg;': '\xb0',
+    'Del;': '\u2207',
+    'Delta;': '\u0394',
+    'delta;': '\u03b4',
+    'demptyv;': '\u29b1',
+    'dfisht;': '\u297f',
+    'Dfr;': '\U0001d507',
+    'dfr;': '\U0001d521',
+    'dHar;': '\u2965',
+    'dharl;': '\u21c3',
+    'dharr;': '\u21c2',
+    'DiacriticalAcute;': '\xb4',
+    'DiacriticalDot;': '\u02d9',
+    'DiacriticalDoubleAcute;': '\u02dd',
+    'DiacriticalGrave;': '`',
+    'DiacriticalTilde;': '\u02dc',
+    'diam;': '\u22c4',
+    'Diamond;': '\u22c4',
+    'diamond;': '\u22c4',
+    'diamondsuit;': '\u2666',
+    'diams;': '\u2666',
+    'die;': '\xa8',
+    'DifferentialD;': '\u2146',
+    'digamma;': '\u03dd',
+    'disin;': '\u22f2',
+    'div;': '\xf7',
+    'divide': '\xf7',
+    'divide;': '\xf7',
+    'divideontimes;': '\u22c7',
+    'divonx;': '\u22c7',
+    'DJcy;': '\u0402',
+    'djcy;': '\u0452',
+    'dlcorn;': '\u231e',
+    'dlcrop;': '\u230d',
+    'dollar;': '$',
+    'Dopf;': '\U0001d53b',
+    'dopf;': '\U0001d555',
+    'Dot;': '\xa8',
+    'dot;': '\u02d9',
+    'DotDot;': '\u20dc',
+    'doteq;': '\u2250',
+    'doteqdot;': '\u2251',
+    'DotEqual;': '\u2250',
+    'dotminus;': '\u2238',
+    'dotplus;': '\u2214',
+    'dotsquare;': '\u22a1',
+    'doublebarwedge;': '\u2306',
+    'DoubleContourIntegral;': '\u222f',
+    'DoubleDot;': '\xa8',
+    'DoubleDownArrow;': '\u21d3',
+    'DoubleLeftArrow;': '\u21d0',
+    'DoubleLeftRightArrow;': '\u21d4',
+    'DoubleLeftTee;': '\u2ae4',
+    'DoubleLongLeftArrow;': '\u27f8',
+    'DoubleLongLeftRightArrow;': '\u27fa',
+    'DoubleLongRightArrow;': '\u27f9',
+    'DoubleRightArrow;': '\u21d2',
+    'DoubleRightTee;': '\u22a8',
+    'DoubleUpArrow;': '\u21d1',
+    'DoubleUpDownArrow;': '\u21d5',
+    'DoubleVerticalBar;': '\u2225',
+    'DownArrow;': '\u2193',
+    'Downarrow;': '\u21d3',
+    'downarrow;': '\u2193',
+    'DownArrowBar;': '\u2913',
+    'DownArrowUpArrow;': '\u21f5',
+    'DownBreve;': '\u0311',
+    'downdownarrows;': '\u21ca',
+    'downharpoonleft;': '\u21c3',
+    'downharpoonright;': '\u21c2',
+    'DownLeftRightVector;': '\u2950',
+    'DownLeftTeeVector;': '\u295e',
+    'DownLeftVector;': '\u21bd',
+    'DownLeftVectorBar;': '\u2956',
+    'DownRightTeeVector;': '\u295f',
+    'DownRightVector;': '\u21c1',
+    'DownRightVectorBar;': '\u2957',
+    'DownTee;': '\u22a4',
+    'DownTeeArrow;': '\u21a7',
+    'drbkarow;': '\u2910',
+    'drcorn;': '\u231f',
+    'drcrop;': '\u230c',
+    'Dscr;': '\U0001d49f',
+    'dscr;': '\U0001d4b9',
+    'DScy;': '\u0405',
+    'dscy;': '\u0455',
+    'dsol;': '\u29f6',
+    'Dstrok;': '\u0110',
+    'dstrok;': '\u0111',
+    'dtdot;': '\u22f1',
+    'dtri;': '\u25bf',
+    'dtrif;': '\u25be',
+    'duarr;': '\u21f5',
+    'duhar;': '\u296f',
+    'dwangle;': '\u29a6',
+    'DZcy;': '\u040f',
+    'dzcy;': '\u045f',
+    'dzigrarr;': '\u27ff',
+    'Eacute': '\xc9',
+    'eacute': '\xe9',
+    'Eacute;': '\xc9',
+    'eacute;': '\xe9',
+    'easter;': '\u2a6e',
+    'Ecaron;': '\u011a',
+    'ecaron;': '\u011b',
+    'ecir;': '\u2256',
+    'Ecirc': '\xca',
+    'ecirc': '\xea',
+    'Ecirc;': '\xca',
+    'ecirc;': '\xea',
+    'ecolon;': '\u2255',
+    'Ecy;': '\u042d',
+    'ecy;': '\u044d',
+    'eDDot;': '\u2a77',
+    'Edot;': '\u0116',
+    'eDot;': '\u2251',
+    'edot;': '\u0117',
+    'ee;': '\u2147',
+    'efDot;': '\u2252',
+    'Efr;': '\U0001d508',
+    'efr;': '\U0001d522',
+    'eg;': '\u2a9a',
+    'Egrave': '\xc8',
+    'egrave': '\xe8',
+    'Egrave;': '\xc8',
+    'egrave;': '\xe8',
+    'egs;': '\u2a96',
+    'egsdot;': '\u2a98',
+    'el;': '\u2a99',
+    'Element;': '\u2208',
+    'elinters;': '\u23e7',
+    'ell;': '\u2113',
+    'els;': '\u2a95',
+    'elsdot;': '\u2a97',
+    'Emacr;': '\u0112',
+    'emacr;': '\u0113',
+    'empty;': '\u2205',
+    'emptyset;': '\u2205',
+    'EmptySmallSquare;': '\u25fb',
+    'emptyv;': '\u2205',
+    'EmptyVerySmallSquare;': '\u25ab',
+    'emsp13;': '\u2004',
+    'emsp14;': '\u2005',
+    'emsp;': '\u2003',
+    'ENG;': '\u014a',
+    'eng;': '\u014b',
+    'ensp;': '\u2002',
+    'Eogon;': '\u0118',
+    'eogon;': '\u0119',
+    'Eopf;': '\U0001d53c',
+    'eopf;': '\U0001d556',
+    'epar;': '\u22d5',
+    'eparsl;': '\u29e3',
+    'eplus;': '\u2a71',
+    'epsi;': '\u03b5',
+    'Epsilon;': '\u0395',
+    'epsilon;': '\u03b5',
+    'epsiv;': '\u03f5',
+    'eqcirc;': '\u2256',
+    'eqcolon;': '\u2255',
+    'eqsim;': '\u2242',
+    'eqslantgtr;': '\u2a96',
+    'eqslantless;': '\u2a95',
+    'Equal;': '\u2a75',
+    'equals;': '=',
+    'EqualTilde;': '\u2242',
+    'equest;': '\u225f',
+    'Equilibrium;': '\u21cc',
+    'equiv;': '\u2261',
+    'equivDD;': '\u2a78',
+    'eqvparsl;': '\u29e5',
+    'erarr;': '\u2971',
+    'erDot;': '\u2253',
+    'Escr;': '\u2130',
+    'escr;': '\u212f',
+    'esdot;': '\u2250',
+    'Esim;': '\u2a73',
+    'esim;': '\u2242',
+    'Eta;': '\u0397',
+    'eta;': '\u03b7',
+    'ETH': '\xd0',
+    'eth': '\xf0',
+    'ETH;': '\xd0',
+    'eth;': '\xf0',
+    'Euml': '\xcb',
+    'euml': '\xeb',
+    'Euml;': '\xcb',
+    'euml;': '\xeb',
+    'euro;': '\u20ac',
+    'excl;': '!',
+    'exist;': '\u2203',
+    'Exists;': '\u2203',
+    'expectation;': '\u2130',
+    'ExponentialE;': '\u2147',
+    'exponentiale;': '\u2147',
+    'fallingdotseq;': '\u2252',
+    'Fcy;': '\u0424',
+    'fcy;': '\u0444',
+    'female;': '\u2640',
+    'ffilig;': '\ufb03',
+    'fflig;': '\ufb00',
+    'ffllig;': '\ufb04',
+    'Ffr;': '\U0001d509',
+    'ffr;': '\U0001d523',
+    'filig;': '\ufb01',
+    'FilledSmallSquare;': '\u25fc',
+    'FilledVerySmallSquare;': '\u25aa',
+    'fjlig;': 'fj',
+    'flat;': '\u266d',
+    'fllig;': '\ufb02',
+    'fltns;': '\u25b1',
+    'fnof;': '\u0192',
+    'Fopf;': '\U0001d53d',
+    'fopf;': '\U0001d557',
+    'ForAll;': '\u2200',
+    'forall;': '\u2200',
+    'fork;': '\u22d4',
+    'forkv;': '\u2ad9',
+    'Fouriertrf;': '\u2131',
+    'fpartint;': '\u2a0d',
+    'frac12': '\xbd',
+    'frac12;': '\xbd',
+    'frac13;': '\u2153',
+    'frac14': '\xbc',
+    'frac14;': '\xbc',
+    'frac15;': '\u2155',
+    'frac16;': '\u2159',
+    'frac18;': '\u215b',
+    'frac23;': '\u2154',
+    'frac25;': '\u2156',
+    'frac34': '\xbe',
+    'frac34;': '\xbe',
+    'frac35;': '\u2157',
+    'frac38;': '\u215c',
+    'frac45;': '\u2158',
+    'frac56;': '\u215a',
+    'frac58;': '\u215d',
+    'frac78;': '\u215e',
+    'frasl;': '\u2044',
+    'frown;': '\u2322',
+    'Fscr;': '\u2131',
+    'fscr;': '\U0001d4bb',
+    'gacute;': '\u01f5',
+    'Gamma;': '\u0393',
+    'gamma;': '\u03b3',
+    'Gammad;': '\u03dc',
+    'gammad;': '\u03dd',
+    'gap;': '\u2a86',
+    'Gbreve;': '\u011e',
+    'gbreve;': '\u011f',
+    'Gcedil;': '\u0122',
+    'Gcirc;': '\u011c',
+    'gcirc;': '\u011d',
+    'Gcy;': '\u0413',
+    'gcy;': '\u0433',
+    'Gdot;': '\u0120',
+    'gdot;': '\u0121',
+    'gE;': '\u2267',
+    'ge;': '\u2265',
+    'gEl;': '\u2a8c',
+    'gel;': '\u22db',
+    'geq;': '\u2265',
+    'geqq;': '\u2267',
+    'geqslant;': '\u2a7e',
+    'ges;': '\u2a7e',
+    'gescc;': '\u2aa9',
+    'gesdot;': '\u2a80',
+    'gesdoto;': '\u2a82',
+    'gesdotol;': '\u2a84',
+    'gesl;': '\u22db\ufe00',
+    'gesles;': '\u2a94',
+    'Gfr;': '\U0001d50a',
+    'gfr;': '\U0001d524',
+    'Gg;': '\u22d9',
+    'gg;': '\u226b',
+    'ggg;': '\u22d9',
+    'gimel;': '\u2137',
+    'GJcy;': '\u0403',
+    'gjcy;': '\u0453',
+    'gl;': '\u2277',
+    'gla;': '\u2aa5',
+    'glE;': '\u2a92',
+    'glj;': '\u2aa4',
+    'gnap;': '\u2a8a',
+    'gnapprox;': '\u2a8a',
+    'gnE;': '\u2269',
+    'gne;': '\u2a88',
+    'gneq;': '\u2a88',
+    'gneqq;': '\u2269',
+    'gnsim;': '\u22e7',
+    'Gopf;': '\U0001d53e',
+    'gopf;': '\U0001d558',
+    'grave;': '`',
+    'GreaterEqual;': '\u2265',
+    'GreaterEqualLess;': '\u22db',
+    'GreaterFullEqual;': '\u2267',
+    'GreaterGreater;': '\u2aa2',
+    'GreaterLess;': '\u2277',
+    'GreaterSlantEqual;': '\u2a7e',
+    'GreaterTilde;': '\u2273',
+    'Gscr;': '\U0001d4a2',
+    'gscr;': '\u210a',
+    'gsim;': '\u2273',
+    'gsime;': '\u2a8e',
+    'gsiml;': '\u2a90',
+    'GT': '>',
+    'gt': '>',
+    'GT;': '>',
+    'Gt;': '\u226b',
+    'gt;': '>',
+    'gtcc;': '\u2aa7',
+    'gtcir;': '\u2a7a',
+    'gtdot;': '\u22d7',
+    'gtlPar;': '\u2995',
+    'gtquest;': '\u2a7c',
+    'gtrapprox;': '\u2a86',
+    'gtrarr;': '\u2978',
+    'gtrdot;': '\u22d7',
+    'gtreqless;': '\u22db',
+    'gtreqqless;': '\u2a8c',
+    'gtrless;': '\u2277',
+    'gtrsim;': '\u2273',
+    'gvertneqq;': '\u2269\ufe00',
+    'gvnE;': '\u2269\ufe00',
+    'Hacek;': '\u02c7',
+    'hairsp;': '\u200a',
+    'half;': '\xbd',
+    'hamilt;': '\u210b',
+    'HARDcy;': '\u042a',
+    'hardcy;': '\u044a',
+    'hArr;': '\u21d4',
+    'harr;': '\u2194',
+    'harrcir;': '\u2948',
+    'harrw;': '\u21ad',
+    'Hat;': '^',
+    'hbar;': '\u210f',
+    'Hcirc;': '\u0124',
+    'hcirc;': '\u0125',
+    'hearts;': '\u2665',
+    'heartsuit;': '\u2665',
+    'hellip;': '\u2026',
+    'hercon;': '\u22b9',
+    'Hfr;': '\u210c',
+    'hfr;': '\U0001d525',
+    'HilbertSpace;': '\u210b',
+    'hksearow;': '\u2925',
+    'hkswarow;': '\u2926',
+    'hoarr;': '\u21ff',
+    'homtht;': '\u223b',
+    'hookleftarrow;': '\u21a9',
+    'hookrightarrow;': '\u21aa',
+    'Hopf;': '\u210d',
+    'hopf;': '\U0001d559',
+    'horbar;': '\u2015',
+    'HorizontalLine;': '\u2500',
+    'Hscr;': '\u210b',
+    'hscr;': '\U0001d4bd',
+    'hslash;': '\u210f',
+    'Hstrok;': '\u0126',
+    'hstrok;': '\u0127',
+    'HumpDownHump;': '\u224e',
+    'HumpEqual;': '\u224f',
+    'hybull;': '\u2043',
+    'hyphen;': '\u2010',
+    'Iacute': '\xcd',
+    'iacute': '\xed',
+    'Iacute;': '\xcd',
+    'iacute;': '\xed',
+    'ic;': '\u2063',
+    'Icirc': '\xce',
+    'icirc': '\xee',
+    'Icirc;': '\xce',
+    'icirc;': '\xee',
+    'Icy;': '\u0418',
+    'icy;': '\u0438',
+    'Idot;': '\u0130',
+    'IEcy;': '\u0415',
+    'iecy;': '\u0435',
+    'iexcl': '\xa1',
+    'iexcl;': '\xa1',
+    'iff;': '\u21d4',
+    'Ifr;': '\u2111',
+    'ifr;': '\U0001d526',
+    'Igrave': '\xcc',
+    'igrave': '\xec',
+    'Igrave;': '\xcc',
+    'igrave;': '\xec',
+    'ii;': '\u2148',
+    'iiiint;': '\u2a0c',
+    'iiint;': '\u222d',
+    'iinfin;': '\u29dc',
+    'iiota;': '\u2129',
+    'IJlig;': '\u0132',
+    'ijlig;': '\u0133',
+    'Im;': '\u2111',
+    'Imacr;': '\u012a',
+    'imacr;': '\u012b',
+    'image;': '\u2111',
+    'ImaginaryI;': '\u2148',
+    'imagline;': '\u2110',
+    'imagpart;': '\u2111',
+    'imath;': '\u0131',
+    'imof;': '\u22b7',
+    'imped;': '\u01b5',
+    'Implies;': '\u21d2',
+    'in;': '\u2208',
+    'incare;': '\u2105',
+    'infin;': '\u221e',
+    'infintie;': '\u29dd',
+    'inodot;': '\u0131',
+    'Int;': '\u222c',
+    'int;': '\u222b',
+    'intcal;': '\u22ba',
+    'integers;': '\u2124',
+    'Integral;': '\u222b',
+    'intercal;': '\u22ba',
+    'Intersection;': '\u22c2',
+    'intlarhk;': '\u2a17',
+    'intprod;': '\u2a3c',
+    'InvisibleComma;': '\u2063',
+    'InvisibleTimes;': '\u2062',
+    'IOcy;': '\u0401',
+    'iocy;': '\u0451',
+    'Iogon;': '\u012e',
+    'iogon;': '\u012f',
+    'Iopf;': '\U0001d540',
+    'iopf;': '\U0001d55a',
+    'Iota;': '\u0399',
+    'iota;': '\u03b9',
+    'iprod;': '\u2a3c',
+    'iquest': '\xbf',
+    'iquest;': '\xbf',
+    'Iscr;': '\u2110',
+    'iscr;': '\U0001d4be',
+    'isin;': '\u2208',
+    'isindot;': '\u22f5',
+    'isinE;': '\u22f9',
+    'isins;': '\u22f4',
+    'isinsv;': '\u22f3',
+    'isinv;': '\u2208',
+    'it;': '\u2062',
+    'Itilde;': '\u0128',
+    'itilde;': '\u0129',
+    'Iukcy;': '\u0406',
+    'iukcy;': '\u0456',
+    'Iuml': '\xcf',
+    'iuml': '\xef',
+    'Iuml;': '\xcf',
+    'iuml;': '\xef',
+    'Jcirc;': '\u0134',
+    'jcirc;': '\u0135',
+    'Jcy;': '\u0419',
+    'jcy;': '\u0439',
+    'Jfr;': '\U0001d50d',
+    'jfr;': '\U0001d527',
+    'jmath;': '\u0237',
+    'Jopf;': '\U0001d541',
+    'jopf;': '\U0001d55b',
+    'Jscr;': '\U0001d4a5',
+    'jscr;': '\U0001d4bf',
+    'Jsercy;': '\u0408',
+    'jsercy;': '\u0458',
+    'Jukcy;': '\u0404',
+    'jukcy;': '\u0454',
+    'Kappa;': '\u039a',
+    'kappa;': '\u03ba',
+    'kappav;': '\u03f0',
+    'Kcedil;': '\u0136',
+    'kcedil;': '\u0137',
+    'Kcy;': '\u041a',
+    'kcy;': '\u043a',
+    'Kfr;': '\U0001d50e',
+    'kfr;': '\U0001d528',
+    'kgreen;': '\u0138',
+    'KHcy;': '\u0425',
+    'khcy;': '\u0445',
+    'KJcy;': '\u040c',
+    'kjcy;': '\u045c',
+    'Kopf;': '\U0001d542',
+    'kopf;': '\U0001d55c',
+    'Kscr;': '\U0001d4a6',
+    'kscr;': '\U0001d4c0',
+    'lAarr;': '\u21da',
+    'Lacute;': '\u0139',
+    'lacute;': '\u013a',
+    'laemptyv;': '\u29b4',
+    'lagran;': '\u2112',
+    'Lambda;': '\u039b',
+    'lambda;': '\u03bb',
+    'Lang;': '\u27ea',
+    'lang;': '\u27e8',
+    'langd;': '\u2991',
+    'langle;': '\u27e8',
+    'lap;': '\u2a85',
+    'Laplacetrf;': '\u2112',
+    'laquo': '\xab',
+    'laquo;': '\xab',
+    'Larr;': '\u219e',
+    'lArr;': '\u21d0',
+    'larr;': '\u2190',
+    'larrb;': '\u21e4',
+    'larrbfs;': '\u291f',
+    'larrfs;': '\u291d',
+    'larrhk;': '\u21a9',
+    'larrlp;': '\u21ab',
+    'larrpl;': '\u2939',
+    'larrsim;': '\u2973',
+    'larrtl;': '\u21a2',
+    'lat;': '\u2aab',
+    'lAtail;': '\u291b',
+    'latail;': '\u2919',
+    'late;': '\u2aad',
+    'lates;': '\u2aad\ufe00',
+    'lBarr;': '\u290e',
+    'lbarr;': '\u290c',
+    'lbbrk;': '\u2772',
+    'lbrace;': '{',
+    'lbrack;': '[',
+    'lbrke;': '\u298b',
+    'lbrksld;': '\u298f',
+    'lbrkslu;': '\u298d',
+    'Lcaron;': '\u013d',
+    'lcaron;': '\u013e',
+    'Lcedil;': '\u013b',
+    'lcedil;': '\u013c',
+    'lceil;': '\u2308',
+    'lcub;': '{',
+    'Lcy;': '\u041b',
+    'lcy;': '\u043b',
+    'ldca;': '\u2936',
+    'ldquo;': '\u201c',
+    'ldquor;': '\u201e',
+    'ldrdhar;': '\u2967',
+    'ldrushar;': '\u294b',
+    'ldsh;': '\u21b2',
+    'lE;': '\u2266',
+    'le;': '\u2264',
+    'LeftAngleBracket;': '\u27e8',
+    'LeftArrow;': '\u2190',
+    'Leftarrow;': '\u21d0',
+    'leftarrow;': '\u2190',
+    'LeftArrowBar;': '\u21e4',
+    'LeftArrowRightArrow;': '\u21c6',
+    'leftarrowtail;': '\u21a2',
+    'LeftCeiling;': '\u2308',
+    'LeftDoubleBracket;': '\u27e6',
+    'LeftDownTeeVector;': '\u2961',
+    'LeftDownVector;': '\u21c3',
+    'LeftDownVectorBar;': '\u2959',
+    'LeftFloor;': '\u230a',
+    'leftharpoondown;': '\u21bd',
+    'leftharpoonup;': '\u21bc',
+    'leftleftarrows;': '\u21c7',
+    'LeftRightArrow;': '\u2194',
+    'Leftrightarrow;': '\u21d4',
+    'leftrightarrow;': '\u2194',
+    'leftrightarrows;': '\u21c6',
+    'leftrightharpoons;': '\u21cb',
+    'leftrightsquigarrow;': '\u21ad',
+    'LeftRightVector;': '\u294e',
+    'LeftTee;': '\u22a3',
+    'LeftTeeArrow;': '\u21a4',
+    'LeftTeeVector;': '\u295a',
+    'leftthreetimes;': '\u22cb',
+    'LeftTriangle;': '\u22b2',
+    'LeftTriangleBar;': '\u29cf',
+    'LeftTriangleEqual;': '\u22b4',
+    'LeftUpDownVector;': '\u2951',
+    'LeftUpTeeVector;': '\u2960',
+    'LeftUpVector;': '\u21bf',
+    'LeftUpVectorBar;': '\u2958',
+    'LeftVector;': '\u21bc',
+    'LeftVectorBar;': '\u2952',
+    'lEg;': '\u2a8b',
+    'leg;': '\u22da',
+    'leq;': '\u2264',
+    'leqq;': '\u2266',
+    'leqslant;': '\u2a7d',
+    'les;': '\u2a7d',
+    'lescc;': '\u2aa8',
+    'lesdot;': '\u2a7f',
+    'lesdoto;': '\u2a81',
+    'lesdotor;': '\u2a83',
+    'lesg;': '\u22da\ufe00',
+    'lesges;': '\u2a93',
+    'lessapprox;': '\u2a85',
+    'lessdot;': '\u22d6',
+    'lesseqgtr;': '\u22da',
+    'lesseqqgtr;': '\u2a8b',
+    'LessEqualGreater;': '\u22da',
+    'LessFullEqual;': '\u2266',
+    'LessGreater;': '\u2276',
+    'lessgtr;': '\u2276',
+    'LessLess;': '\u2aa1',
+    'lesssim;': '\u2272',
+    'LessSlantEqual;': '\u2a7d',
+    'LessTilde;': '\u2272',
+    'lfisht;': '\u297c',
+    'lfloor;': '\u230a',
+    'Lfr;': '\U0001d50f',
+    'lfr;': '\U0001d529',
+    'lg;': '\u2276',
+    'lgE;': '\u2a91',
+    'lHar;': '\u2962',
+    'lhard;': '\u21bd',
+    'lharu;': '\u21bc',
+    'lharul;': '\u296a',
+    'lhblk;': '\u2584',
+    'LJcy;': '\u0409',
+    'ljcy;': '\u0459',
+    'Ll;': '\u22d8',
+    'll;': '\u226a',
+    'llarr;': '\u21c7',
+    'llcorner;': '\u231e',
+    'Lleftarrow;': '\u21da',
+    'llhard;': '\u296b',
+    'lltri;': '\u25fa',
+    'Lmidot;': '\u013f',
+    'lmidot;': '\u0140',
+    'lmoust;': '\u23b0',
+    'lmoustache;': '\u23b0',
+    'lnap;': '\u2a89',
+    'lnapprox;': '\u2a89',
+    'lnE;': '\u2268',
+    'lne;': '\u2a87',
+    'lneq;': '\u2a87',
+    'lneqq;': '\u2268',
+    'lnsim;': '\u22e6',
+    'loang;': '\u27ec',
+    'loarr;': '\u21fd',
+    'lobrk;': '\u27e6',
+    'LongLeftArrow;': '\u27f5',
+    'Longleftarrow;': '\u27f8',
+    'longleftarrow;': '\u27f5',
+    'LongLeftRightArrow;': '\u27f7',
+    'Longleftrightarrow;': '\u27fa',
+    'longleftrightarrow;': '\u27f7',
+    'longmapsto;': '\u27fc',
+    'LongRightArrow;': '\u27f6',
+    'Longrightarrow;': '\u27f9',
+    'longrightarrow;': '\u27f6',
+    'looparrowleft;': '\u21ab',
+    'looparrowright;': '\u21ac',
+    'lopar;': '\u2985',
+    'Lopf;': '\U0001d543',
+    'lopf;': '\U0001d55d',
+    'loplus;': '\u2a2d',
+    'lotimes;': '\u2a34',
+    'lowast;': '\u2217',
+    'lowbar;': '_',
+    'LowerLeftArrow;': '\u2199',
+    'LowerRightArrow;': '\u2198',
+    'loz;': '\u25ca',
+    'lozenge;': '\u25ca',
+    'lozf;': '\u29eb',
+    'lpar;': '(',
+    'lparlt;': '\u2993',
+    'lrarr;': '\u21c6',
+    'lrcorner;': '\u231f',
+    'lrhar;': '\u21cb',
+    'lrhard;': '\u296d',
+    'lrm;': '\u200e',
+    'lrtri;': '\u22bf',
+    'lsaquo;': '\u2039',
+    'Lscr;': '\u2112',
+    'lscr;': '\U0001d4c1',
+    'Lsh;': '\u21b0',
+    'lsh;': '\u21b0',
+    'lsim;': '\u2272',
+    'lsime;': '\u2a8d',
+    'lsimg;': '\u2a8f',
+    'lsqb;': '[',
+    'lsquo;': '\u2018',
+    'lsquor;': '\u201a',
+    'Lstrok;': '\u0141',
+    'lstrok;': '\u0142',
+    'LT': '<',
+    'lt': '<',
+    'LT;': '<',
+    'Lt;': '\u226a',
+    'lt;': '<',
+    'ltcc;': '\u2aa6',
+    'ltcir;': '\u2a79',
+    'ltdot;': '\u22d6',
+    'lthree;': '\u22cb',
+    'ltimes;': '\u22c9',
+    'ltlarr;': '\u2976',
+    'ltquest;': '\u2a7b',
+    'ltri;': '\u25c3',
+    'ltrie;': '\u22b4',
+    'ltrif;': '\u25c2',
+    'ltrPar;': '\u2996',
+    'lurdshar;': '\u294a',
+    'luruhar;': '\u2966',
+    'lvertneqq;': '\u2268\ufe00',
+    'lvnE;': '\u2268\ufe00',
+    'macr': '\xaf',
+    'macr;': '\xaf',
+    'male;': '\u2642',
+    'malt;': '\u2720',
+    'maltese;': '\u2720',
+    'Map;': '\u2905',
+    'map;': '\u21a6',
+    'mapsto;': '\u21a6',
+    'mapstodown;': '\u21a7',
+    'mapstoleft;': '\u21a4',
+    'mapstoup;': '\u21a5',
+    'marker;': '\u25ae',
+    'mcomma;': '\u2a29',
+    'Mcy;': '\u041c',
+    'mcy;': '\u043c',
+    'mdash;': '\u2014',
+    'mDDot;': '\u223a',
+    'measuredangle;': '\u2221',
+    'MediumSpace;': '\u205f',
+    'Mellintrf;': '\u2133',
+    'Mfr;': '\U0001d510',
+    'mfr;': '\U0001d52a',
+    'mho;': '\u2127',
+    'micro': '\xb5',
+    'micro;': '\xb5',
+    'mid;': '\u2223',
+    'midast;': '*',
+    'midcir;': '\u2af0',
+    'middot': '\xb7',
+    'middot;': '\xb7',
+    'minus;': '\u2212',
+    'minusb;': '\u229f',
+    'minusd;': '\u2238',
+    'minusdu;': '\u2a2a',
+    'MinusPlus;': '\u2213',
+    'mlcp;': '\u2adb',
+    'mldr;': '\u2026',
+    'mnplus;': '\u2213',
+    'models;': '\u22a7',
+    'Mopf;': '\U0001d544',
+    'mopf;': '\U0001d55e',
+    'mp;': '\u2213',
+    'Mscr;': '\u2133',
+    'mscr;': '\U0001d4c2',
+    'mstpos;': '\u223e',
+    'Mu;': '\u039c',
+    'mu;': '\u03bc',
+    'multimap;': '\u22b8',
+    'mumap;': '\u22b8',
+    'nabla;': '\u2207',
+    'Nacute;': '\u0143',
+    'nacute;': '\u0144',
+    'nang;': '\u2220\u20d2',
+    'nap;': '\u2249',
+    'napE;': '\u2a70\u0338',
+    'napid;': '\u224b\u0338',
+    'napos;': '\u0149',
+    'napprox;': '\u2249',
+    'natur;': '\u266e',
+    'natural;': '\u266e',
+    'naturals;': '\u2115',
+    'nbsp': '\xa0',
+    'nbsp;': '\xa0',
+    'nbump;': '\u224e\u0338',
+    'nbumpe;': '\u224f\u0338',
+    'ncap;': '\u2a43',
+    'Ncaron;': '\u0147',
+    'ncaron;': '\u0148',
+    'Ncedil;': '\u0145',
+    'ncedil;': '\u0146',
+    'ncong;': '\u2247',
+    'ncongdot;': '\u2a6d\u0338',
+    'ncup;': '\u2a42',
+    'Ncy;': '\u041d',
+    'ncy;': '\u043d',
+    'ndash;': '\u2013',
+    'ne;': '\u2260',
+    'nearhk;': '\u2924',
+    'neArr;': '\u21d7',
+    'nearr;': '\u2197',
+    'nearrow;': '\u2197',
+    'nedot;': '\u2250\u0338',
+    'NegativeMediumSpace;': '\u200b',
+    'NegativeThickSpace;': '\u200b',
+    'NegativeThinSpace;': '\u200b',
+    'NegativeVeryThinSpace;': '\u200b',
+    'nequiv;': '\u2262',
+    'nesear;': '\u2928',
+    'nesim;': '\u2242\u0338',
+    'NestedGreaterGreater;': '\u226b',
+    'NestedLessLess;': '\u226a',
+    'NewLine;': '\n',
+    'nexist;': '\u2204',
+    'nexists;': '\u2204',
+    'Nfr;': '\U0001d511',
+    'nfr;': '\U0001d52b',
+    'ngE;': '\u2267\u0338',
+    'nge;': '\u2271',
+    'ngeq;': '\u2271',
+    'ngeqq;': '\u2267\u0338',
+    'ngeqslant;': '\u2a7e\u0338',
+    'nges;': '\u2a7e\u0338',
+    'nGg;': '\u22d9\u0338',
+    'ngsim;': '\u2275',
+    'nGt;': '\u226b\u20d2',
+    'ngt;': '\u226f',
+    'ngtr;': '\u226f',
+    'nGtv;': '\u226b\u0338',
+    'nhArr;': '\u21ce',
+    'nharr;': '\u21ae',
+    'nhpar;': '\u2af2',
+    'ni;': '\u220b',
+    'nis;': '\u22fc',
+    'nisd;': '\u22fa',
+    'niv;': '\u220b',
+    'NJcy;': '\u040a',
+    'njcy;': '\u045a',
+    'nlArr;': '\u21cd',
+    'nlarr;': '\u219a',
+    'nldr;': '\u2025',
+    'nlE;': '\u2266\u0338',
+    'nle;': '\u2270',
+    'nLeftarrow;': '\u21cd',
+    'nleftarrow;': '\u219a',
+    'nLeftrightarrow;': '\u21ce',
+    'nleftrightarrow;': '\u21ae',
+    'nleq;': '\u2270',
+    'nleqq;': '\u2266\u0338',
+    'nleqslant;': '\u2a7d\u0338',
+    'nles;': '\u2a7d\u0338',
+    'nless;': '\u226e',
+    'nLl;': '\u22d8\u0338',
+    'nlsim;': '\u2274',
+    'nLt;': '\u226a\u20d2',
+    'nlt;': '\u226e',
+    'nltri;': '\u22ea',
+    'nltrie;': '\u22ec',
+    'nLtv;': '\u226a\u0338',
+    'nmid;': '\u2224',
+    'NoBreak;': '\u2060',
+    'NonBreakingSpace;': '\xa0',
+    'Nopf;': '\u2115',
+    'nopf;': '\U0001d55f',
+    'not': '\xac',
+    'Not;': '\u2aec',
+    'not;': '\xac',
+    'NotCongruent;': '\u2262',
+    'NotCupCap;': '\u226d',
+    'NotDoubleVerticalBar;': '\u2226',
+    'NotElement;': '\u2209',
+    'NotEqual;': '\u2260',
+    'NotEqualTilde;': '\u2242\u0338',
+    'NotExists;': '\u2204',
+    'NotGreater;': '\u226f',
+    'NotGreaterEqual;': '\u2271',
+    'NotGreaterFullEqual;': '\u2267\u0338',
+    'NotGreaterGreater;': '\u226b\u0338',
+    'NotGreaterLess;': '\u2279',
+    'NotGreaterSlantEqual;': '\u2a7e\u0338',
+    'NotGreaterTilde;': '\u2275',
+    'NotHumpDownHump;': '\u224e\u0338',
+    'NotHumpEqual;': '\u224f\u0338',
+    'notin;': '\u2209',
+    'notindot;': '\u22f5\u0338',
+    'notinE;': '\u22f9\u0338',
+    'notinva;': '\u2209',
+    'notinvb;': '\u22f7',
+    'notinvc;': '\u22f6',
+    'NotLeftTriangle;': '\u22ea',
+    'NotLeftTriangleBar;': '\u29cf\u0338',
+    'NotLeftTriangleEqual;': '\u22ec',
+    'NotLess;': '\u226e',
+    'NotLessEqual;': '\u2270',
+    'NotLessGreater;': '\u2278',
+    'NotLessLess;': '\u226a\u0338',
+    'NotLessSlantEqual;': '\u2a7d\u0338',
+    'NotLessTilde;': '\u2274',
+    'NotNestedGreaterGreater;': '\u2aa2\u0338',
+    'NotNestedLessLess;': '\u2aa1\u0338',
+    'notni;': '\u220c',
+    'notniva;': '\u220c',
+    'notnivb;': '\u22fe',
+    'notnivc;': '\u22fd',
+    'NotPrecedes;': '\u2280',
+    'NotPrecedesEqual;': '\u2aaf\u0338',
+    'NotPrecedesSlantEqual;': '\u22e0',
+    'NotReverseElement;': '\u220c',
+    'NotRightTriangle;': '\u22eb',
+    'NotRightTriangleBar;': '\u29d0\u0338',
+    'NotRightTriangleEqual;': '\u22ed',
+    'NotSquareSubset;': '\u228f\u0338',
+    'NotSquareSubsetEqual;': '\u22e2',
+    'NotSquareSuperset;': '\u2290\u0338',
+    'NotSquareSupersetEqual;': '\u22e3',
+    'NotSubset;': '\u2282\u20d2',
+    'NotSubsetEqual;': '\u2288',
+    'NotSucceeds;': '\u2281',
+    'NotSucceedsEqual;': '\u2ab0\u0338',
+    'NotSucceedsSlantEqual;': '\u22e1',
+    'NotSucceedsTilde;': '\u227f\u0338',
+    'NotSuperset;': '\u2283\u20d2',
+    'NotSupersetEqual;': '\u2289',
+    'NotTilde;': '\u2241',
+    'NotTildeEqual;': '\u2244',
+    'NotTildeFullEqual;': '\u2247',
+    'NotTildeTilde;': '\u2249',
+    'NotVerticalBar;': '\u2224',
+    'npar;': '\u2226',
+    'nparallel;': '\u2226',
+    'nparsl;': '\u2afd\u20e5',
+    'npart;': '\u2202\u0338',
+    'npolint;': '\u2a14',
+    'npr;': '\u2280',
+    'nprcue;': '\u22e0',
+    'npre;': '\u2aaf\u0338',
+    'nprec;': '\u2280',
+    'npreceq;': '\u2aaf\u0338',
+    'nrArr;': '\u21cf',
+    'nrarr;': '\u219b',
+    'nrarrc;': '\u2933\u0338',
+    'nrarrw;': '\u219d\u0338',
+    'nRightarrow;': '\u21cf',
+    'nrightarrow;': '\u219b',
+    'nrtri;': '\u22eb',
+    'nrtrie;': '\u22ed',
+    'nsc;': '\u2281',
+    'nsccue;': '\u22e1',
+    'nsce;': '\u2ab0\u0338',
+    'Nscr;': '\U0001d4a9',
+    'nscr;': '\U0001d4c3',
+    'nshortmid;': '\u2224',
+    'nshortparallel;': '\u2226',
+    'nsim;': '\u2241',
+    'nsime;': '\u2244',
+    'nsimeq;': '\u2244',
+    'nsmid;': '\u2224',
+    'nspar;': '\u2226',
+    'nsqsube;': '\u22e2',
+    'nsqsupe;': '\u22e3',
+    'nsub;': '\u2284',
+    'nsubE;': '\u2ac5\u0338',
+    'nsube;': '\u2288',
+    'nsubset;': '\u2282\u20d2',
+    'nsubseteq;': '\u2288',
+    'nsubseteqq;': '\u2ac5\u0338',
+    'nsucc;': '\u2281',
+    'nsucceq;': '\u2ab0\u0338',
+    'nsup;': '\u2285',
+    'nsupE;': '\u2ac6\u0338',
+    'nsupe;': '\u2289',
+    'nsupset;': '\u2283\u20d2',
+    'nsupseteq;': '\u2289',
+    'nsupseteqq;': '\u2ac6\u0338',
+    'ntgl;': '\u2279',
+    'Ntilde': '\xd1',
+    'ntilde': '\xf1',
+    'Ntilde;': '\xd1',
+    'ntilde;': '\xf1',
+    'ntlg;': '\u2278',
+    'ntriangleleft;': '\u22ea',
+    'ntrianglelefteq;': '\u22ec',
+    'ntriangleright;': '\u22eb',
+    'ntrianglerighteq;': '\u22ed',
+    'Nu;': '\u039d',
+    'nu;': '\u03bd',
+    'num;': '#',
+    'numero;': '\u2116',
+    'numsp;': '\u2007',
+    'nvap;': '\u224d\u20d2',
+    'nVDash;': '\u22af',
+    'nVdash;': '\u22ae',
+    'nvDash;': '\u22ad',
+    'nvdash;': '\u22ac',
+    'nvge;': '\u2265\u20d2',
+    'nvgt;': '>\u20d2',
+    'nvHarr;': '\u2904',
+    'nvinfin;': '\u29de',
+    'nvlArr;': '\u2902',
+    'nvle;': '\u2264\u20d2',
+    'nvlt;': '<\u20d2',
+    'nvltrie;': '\u22b4\u20d2',
+    'nvrArr;': '\u2903',
+    'nvrtrie;': '\u22b5\u20d2',
+    'nvsim;': '\u223c\u20d2',
+    'nwarhk;': '\u2923',
+    'nwArr;': '\u21d6',
+    'nwarr;': '\u2196',
+    'nwarrow;': '\u2196',
+    'nwnear;': '\u2927',
+    'Oacute': '\xd3',
+    'oacute': '\xf3',
+    'Oacute;': '\xd3',
+    'oacute;': '\xf3',
+    'oast;': '\u229b',
+    'ocir;': '\u229a',
+    'Ocirc': '\xd4',
+    'ocirc': '\xf4',
+    'Ocirc;': '\xd4',
+    'ocirc;': '\xf4',
+    'Ocy;': '\u041e',
+    'ocy;': '\u043e',
+    'odash;': '\u229d',
+    'Odblac;': '\u0150',
+    'odblac;': '\u0151',
+    'odiv;': '\u2a38',
+    'odot;': '\u2299',
+    'odsold;': '\u29bc',
+    'OElig;': '\u0152',
+    'oelig;': '\u0153',
+    'ofcir;': '\u29bf',
+    'Ofr;': '\U0001d512',
+    'ofr;': '\U0001d52c',
+    'ogon;': '\u02db',
+    'Ograve': '\xd2',
+    'ograve': '\xf2',
+    'Ograve;': '\xd2',
+    'ograve;': '\xf2',
+    'ogt;': '\u29c1',
+    'ohbar;': '\u29b5',
+    'ohm;': '\u03a9',
+    'oint;': '\u222e',
+    'olarr;': '\u21ba',
+    'olcir;': '\u29be',
+    'olcross;': '\u29bb',
+    'oline;': '\u203e',
+    'olt;': '\u29c0',
+    'Omacr;': '\u014c',
+    'omacr;': '\u014d',
+    'Omega;': '\u03a9',
+    'omega;': '\u03c9',
+    'Omicron;': '\u039f',
+    'omicron;': '\u03bf',
+    'omid;': '\u29b6',
+    'ominus;': '\u2296',
+    'Oopf;': '\U0001d546',
+    'oopf;': '\U0001d560',
+    'opar;': '\u29b7',
+    'OpenCurlyDoubleQuote;': '\u201c',
+    'OpenCurlyQuote;': '\u2018',
+    'operp;': '\u29b9',
+    'oplus;': '\u2295',
+    'Or;': '\u2a54',
+    'or;': '\u2228',
+    'orarr;': '\u21bb',
+    'ord;': '\u2a5d',
+    'order;': '\u2134',
+    'orderof;': '\u2134',
+    'ordf': '\xaa',
+    'ordf;': '\xaa',
+    'ordm': '\xba',
+    'ordm;': '\xba',
+    'origof;': '\u22b6',
+    'oror;': '\u2a56',
+    'orslope;': '\u2a57',
+    'orv;': '\u2a5b',
+    'oS;': '\u24c8',
+    'Oscr;': '\U0001d4aa',
+    'oscr;': '\u2134',
+    'Oslash': '\xd8',
+    'oslash': '\xf8',
+    'Oslash;': '\xd8',
+    'oslash;': '\xf8',
+    'osol;': '\u2298',
+    'Otilde': '\xd5',
+    'otilde': '\xf5',
+    'Otilde;': '\xd5',
+    'otilde;': '\xf5',
+    'Otimes;': '\u2a37',
+    'otimes;': '\u2297',
+    'otimesas;': '\u2a36',
+    'Ouml': '\xd6',
+    'ouml': '\xf6',
+    'Ouml;': '\xd6',
+    'ouml;': '\xf6',
+    'ovbar;': '\u233d',
+    'OverBar;': '\u203e',
+    'OverBrace;': '\u23de',
+    'OverBracket;': '\u23b4',
+    'OverParenthesis;': '\u23dc',
+    'par;': '\u2225',
+    'para': '\xb6',
+    'para;': '\xb6',
+    'parallel;': '\u2225',
+    'parsim;': '\u2af3',
+    'parsl;': '\u2afd',
+    'part;': '\u2202',
+    'PartialD;': '\u2202',
+    'Pcy;': '\u041f',
+    'pcy;': '\u043f',
+    'percnt;': '%',
+    'period;': '.',
+    'permil;': '\u2030',
+    'perp;': '\u22a5',
+    'pertenk;': '\u2031',
+    'Pfr;': '\U0001d513',
+    'pfr;': '\U0001d52d',
+    'Phi;': '\u03a6',
+    'phi;': '\u03c6',
+    'phiv;': '\u03d5',
+    'phmmat;': '\u2133',
+    'phone;': '\u260e',
+    'Pi;': '\u03a0',
+    'pi;': '\u03c0',
+    'pitchfork;': '\u22d4',
+    'piv;': '\u03d6',
+    'planck;': '\u210f',
+    'planckh;': '\u210e',
+    'plankv;': '\u210f',
+    'plus;': '+',
+    'plusacir;': '\u2a23',
+    'plusb;': '\u229e',
+    'pluscir;': '\u2a22',
+    'plusdo;': '\u2214',
+    'plusdu;': '\u2a25',
+    'pluse;': '\u2a72',
+    'PlusMinus;': '\xb1',
+    'plusmn': '\xb1',
+    'plusmn;': '\xb1',
+    'plussim;': '\u2a26',
+    'plustwo;': '\u2a27',
+    'pm;': '\xb1',
+    'Poincareplane;': '\u210c',
+    'pointint;': '\u2a15',
+    'Popf;': '\u2119',
+    'popf;': '\U0001d561',
+    'pound': '\xa3',
+    'pound;': '\xa3',
+    'Pr;': '\u2abb',
+    'pr;': '\u227a',
+    'prap;': '\u2ab7',
+    'prcue;': '\u227c',
+    'prE;': '\u2ab3',
+    'pre;': '\u2aaf',
+    'prec;': '\u227a',
+    'precapprox;': '\u2ab7',
+    'preccurlyeq;': '\u227c',
+    'Precedes;': '\u227a',
+    'PrecedesEqual;': '\u2aaf',
+    'PrecedesSlantEqual;': '\u227c',
+    'PrecedesTilde;': '\u227e',
+    'preceq;': '\u2aaf',
+    'precnapprox;': '\u2ab9',
+    'precneqq;': '\u2ab5',
+    'precnsim;': '\u22e8',
+    'precsim;': '\u227e',
+    'Prime;': '\u2033',
+    'prime;': '\u2032',
+    'primes;': '\u2119',
+    'prnap;': '\u2ab9',
+    'prnE;': '\u2ab5',
+    'prnsim;': '\u22e8',
+    'prod;': '\u220f',
+    'Product;': '\u220f',
+    'profalar;': '\u232e',
+    'profline;': '\u2312',
+    'profsurf;': '\u2313',
+    'prop;': '\u221d',
+    'Proportion;': '\u2237',
+    'Proportional;': '\u221d',
+    'propto;': '\u221d',
+    'prsim;': '\u227e',
+    'prurel;': '\u22b0',
+    'Pscr;': '\U0001d4ab',
+    'pscr;': '\U0001d4c5',
+    'Psi;': '\u03a8',
+    'psi;': '\u03c8',
+    'puncsp;': '\u2008',
+    'Qfr;': '\U0001d514',
+    'qfr;': '\U0001d52e',
+    'qint;': '\u2a0c',
+    'Qopf;': '\u211a',
+    'qopf;': '\U0001d562',
+    'qprime;': '\u2057',
+    'Qscr;': '\U0001d4ac',
+    'qscr;': '\U0001d4c6',
+    'quaternions;': '\u210d',
+    'quatint;': '\u2a16',
+    'quest;': '?',
+    'questeq;': '\u225f',
+    'QUOT': '"',
+    'quot': '"',
+    'QUOT;': '"',
+    'quot;': '"',
+    'rAarr;': '\u21db',
+    'race;': '\u223d\u0331',
+    'Racute;': '\u0154',
+    'racute;': '\u0155',
+    'radic;': '\u221a',
+    'raemptyv;': '\u29b3',
+    'Rang;': '\u27eb',
+    'rang;': '\u27e9',
+    'rangd;': '\u2992',
+    'range;': '\u29a5',
+    'rangle;': '\u27e9',
+    'raquo': '\xbb',
+    'raquo;': '\xbb',
+    'Rarr;': '\u21a0',
+    'rArr;': '\u21d2',
+    'rarr;': '\u2192',
+    'rarrap;': '\u2975',
+    'rarrb;': '\u21e5',
+    'rarrbfs;': '\u2920',
+    'rarrc;': '\u2933',
+    'rarrfs;': '\u291e',
+    'rarrhk;': '\u21aa',
+    'rarrlp;': '\u21ac',
+    'rarrpl;': '\u2945',
+    'rarrsim;': '\u2974',
+    'Rarrtl;': '\u2916',
+    'rarrtl;': '\u21a3',
+    'rarrw;': '\u219d',
+    'rAtail;': '\u291c',
+    'ratail;': '\u291a',
+    'ratio;': '\u2236',
+    'rationals;': '\u211a',
+    'RBarr;': '\u2910',
+    'rBarr;': '\u290f',
+    'rbarr;': '\u290d',
+    'rbbrk;': '\u2773',
+    'rbrace;': '}',
+    'rbrack;': ']',
+    'rbrke;': '\u298c',
+    'rbrksld;': '\u298e',
+    'rbrkslu;': '\u2990',
+    'Rcaron;': '\u0158',
+    'rcaron;': '\u0159',
+    'Rcedil;': '\u0156',
+    'rcedil;': '\u0157',
+    'rceil;': '\u2309',
+    'rcub;': '}',
+    'Rcy;': '\u0420',
+    'rcy;': '\u0440',
+    'rdca;': '\u2937',
+    'rdldhar;': '\u2969',
+    'rdquo;': '\u201d',
+    'rdquor;': '\u201d',
+    'rdsh;': '\u21b3',
+    'Re;': '\u211c',
+    'real;': '\u211c',
+    'realine;': '\u211b',
+    'realpart;': '\u211c',
+    'reals;': '\u211d',
+    'rect;': '\u25ad',
+    'REG': '\xae',
+    'reg': '\xae',
+    'REG;': '\xae',
+    'reg;': '\xae',
+    'ReverseElement;': '\u220b',
+    'ReverseEquilibrium;': '\u21cb',
+    'ReverseUpEquilibrium;': '\u296f',
+    'rfisht;': '\u297d',
+    'rfloor;': '\u230b',
+    'Rfr;': '\u211c',
+    'rfr;': '\U0001d52f',
+    'rHar;': '\u2964',
+    'rhard;': '\u21c1',
+    'rharu;': '\u21c0',
+    'rharul;': '\u296c',
+    'Rho;': '\u03a1',
+    'rho;': '\u03c1',
+    'rhov;': '\u03f1',
+    'RightAngleBracket;': '\u27e9',
+    'RightArrow;': '\u2192',
+    'Rightarrow;': '\u21d2',
+    'rightarrow;': '\u2192',
+    'RightArrowBar;': '\u21e5',
+    'RightArrowLeftArrow;': '\u21c4',
+    'rightarrowtail;': '\u21a3',
+    'RightCeiling;': '\u2309',
+    'RightDoubleBracket;': '\u27e7',
+    'RightDownTeeVector;': '\u295d',
+    'RightDownVector;': '\u21c2',
+    'RightDownVectorBar;': '\u2955',
+    'RightFloor;': '\u230b',
+    'rightharpoondown;': '\u21c1',
+    'rightharpoonup;': '\u21c0',
+    'rightleftarrows;': '\u21c4',
+    'rightleftharpoons;': '\u21cc',
+    'rightrightarrows;': '\u21c9',
+    'rightsquigarrow;': '\u219d',
+    'RightTee;': '\u22a2',
+    'RightTeeArrow;': '\u21a6',
+    'RightTeeVector;': '\u295b',
+    'rightthreetimes;': '\u22cc',
+    'RightTriangle;': '\u22b3',
+    'RightTriangleBar;': '\u29d0',
+    'RightTriangleEqual;': '\u22b5',
+    'RightUpDownVector;': '\u294f',
+    'RightUpTeeVector;': '\u295c',
+    'RightUpVector;': '\u21be',
+    'RightUpVectorBar;': '\u2954',
+    'RightVector;': '\u21c0',
+    'RightVectorBar;': '\u2953',
+    'ring;': '\u02da',
+    'risingdotseq;': '\u2253',
+    'rlarr;': '\u21c4',
+    'rlhar;': '\u21cc',
+    'rlm;': '\u200f',
+    'rmoust;': '\u23b1',
+    'rmoustache;': '\u23b1',
+    'rnmid;': '\u2aee',
+    'roang;': '\u27ed',
+    'roarr;': '\u21fe',
+    'robrk;': '\u27e7',
+    'ropar;': '\u2986',
+    'Ropf;': '\u211d',
+    'ropf;': '\U0001d563',
+    'roplus;': '\u2a2e',
+    'rotimes;': '\u2a35',
+    'RoundImplies;': '\u2970',
+    'rpar;': ')',
+    'rpargt;': '\u2994',
+    'rppolint;': '\u2a12',
+    'rrarr;': '\u21c9',
+    'Rrightarrow;': '\u21db',
+    'rsaquo;': '\u203a',
+    'Rscr;': '\u211b',
+    'rscr;': '\U0001d4c7',
+    'Rsh;': '\u21b1',
+    'rsh;': '\u21b1',
+    'rsqb;': ']',
+    'rsquo;': '\u2019',
+    'rsquor;': '\u2019',
+    'rthree;': '\u22cc',
+    'rtimes;': '\u22ca',
+    'rtri;': '\u25b9',
+    'rtrie;': '\u22b5',
+    'rtrif;': '\u25b8',
+    'rtriltri;': '\u29ce',
+    'RuleDelayed;': '\u29f4',
+    'ruluhar;': '\u2968',
+    'rx;': '\u211e',
+    'Sacute;': '\u015a',
+    'sacute;': '\u015b',
+    'sbquo;': '\u201a',
+    'Sc;': '\u2abc',
+    'sc;': '\u227b',
+    'scap;': '\u2ab8',
+    'Scaron;': '\u0160',
+    'scaron;': '\u0161',
+    'sccue;': '\u227d',
+    'scE;': '\u2ab4',
+    'sce;': '\u2ab0',
+    'Scedil;': '\u015e',
+    'scedil;': '\u015f',
+    'Scirc;': '\u015c',
+    'scirc;': '\u015d',
+    'scnap;': '\u2aba',
+    'scnE;': '\u2ab6',
+    'scnsim;': '\u22e9',
+    'scpolint;': '\u2a13',
+    'scsim;': '\u227f',
+    'Scy;': '\u0421',
+    'scy;': '\u0441',
+    'sdot;': '\u22c5',
+    'sdotb;': '\u22a1',
+    'sdote;': '\u2a66',
+    'searhk;': '\u2925',
+    'seArr;': '\u21d8',
+    'searr;': '\u2198',
+    'searrow;': '\u2198',
+    'sect': '\xa7',
+    'sect;': '\xa7',
+    'semi;': ';',
+    'seswar;': '\u2929',
+    'setminus;': '\u2216',
+    'setmn;': '\u2216',
+    'sext;': '\u2736',
+    'Sfr;': '\U0001d516',
+    'sfr;': '\U0001d530',
+    'sfrown;': '\u2322',
+    'sharp;': '\u266f',
+    'SHCHcy;': '\u0429',
+    'shchcy;': '\u0449',
+    'SHcy;': '\u0428',
+    'shcy;': '\u0448',
+    'ShortDownArrow;': '\u2193',
+    'ShortLeftArrow;': '\u2190',
+    'shortmid;': '\u2223',
+    'shortparallel;': '\u2225',
+    'ShortRightArrow;': '\u2192',
+    'ShortUpArrow;': '\u2191',
+    'shy': '\xad',
+    'shy;': '\xad',
+    'Sigma;': '\u03a3',
+    'sigma;': '\u03c3',
+    'sigmaf;': '\u03c2',
+    'sigmav;': '\u03c2',
+    'sim;': '\u223c',
+    'simdot;': '\u2a6a',
+    'sime;': '\u2243',
+    'simeq;': '\u2243',
+    'simg;': '\u2a9e',
+    'simgE;': '\u2aa0',
+    'siml;': '\u2a9d',
+    'simlE;': '\u2a9f',
+    'simne;': '\u2246',
+    'simplus;': '\u2a24',
+    'simrarr;': '\u2972',
+    'slarr;': '\u2190',
+    'SmallCircle;': '\u2218',
+    'smallsetminus;': '\u2216',
+    'smashp;': '\u2a33',
+    'smeparsl;': '\u29e4',
+    'smid;': '\u2223',
+    'smile;': '\u2323',
+    'smt;': '\u2aaa',
+    'smte;': '\u2aac',
+    'smtes;': '\u2aac\ufe00',
+    'SOFTcy;': '\u042c',
+    'softcy;': '\u044c',
+    'sol;': '/',
+    'solb;': '\u29c4',
+    'solbar;': '\u233f',
+    'Sopf;': '\U0001d54a',
+    'sopf;': '\U0001d564',
+    'spades;': '\u2660',
+    'spadesuit;': '\u2660',
+    'spar;': '\u2225',
+    'sqcap;': '\u2293',
+    'sqcaps;': '\u2293\ufe00',
+    'sqcup;': '\u2294',
+    'sqcups;': '\u2294\ufe00',
+    'Sqrt;': '\u221a',
+    'sqsub;': '\u228f',
+    'sqsube;': '\u2291',
+    'sqsubset;': '\u228f',
+    'sqsubseteq;': '\u2291',
+    'sqsup;': '\u2290',
+    'sqsupe;': '\u2292',
+    'sqsupset;': '\u2290',
+    'sqsupseteq;': '\u2292',
+    'squ;': '\u25a1',
+    'Square;': '\u25a1',
+    'square;': '\u25a1',
+    'SquareIntersection;': '\u2293',
+    'SquareSubset;': '\u228f',
+    'SquareSubsetEqual;': '\u2291',
+    'SquareSuperset;': '\u2290',
+    'SquareSupersetEqual;': '\u2292',
+    'SquareUnion;': '\u2294',
+    'squarf;': '\u25aa',
+    'squf;': '\u25aa',
+    'srarr;': '\u2192',
+    'Sscr;': '\U0001d4ae',
+    'sscr;': '\U0001d4c8',
+    'ssetmn;': '\u2216',
+    'ssmile;': '\u2323',
+    'sstarf;': '\u22c6',
+    'Star;': '\u22c6',
+    'star;': '\u2606',
+    'starf;': '\u2605',
+    'straightepsilon;': '\u03f5',
+    'straightphi;': '\u03d5',
+    'strns;': '\xaf',
+    'Sub;': '\u22d0',
+    'sub;': '\u2282',
+    'subdot;': '\u2abd',
+    'subE;': '\u2ac5',
+    'sube;': '\u2286',
+    'subedot;': '\u2ac3',
+    'submult;': '\u2ac1',
+    'subnE;': '\u2acb',
+    'subne;': '\u228a',
+    'subplus;': '\u2abf',
+    'subrarr;': '\u2979',
+    'Subset;': '\u22d0',
+    'subset;': '\u2282',
+    'subseteq;': '\u2286',
+    'subseteqq;': '\u2ac5',
+    'SubsetEqual;': '\u2286',
+    'subsetneq;': '\u228a',
+    'subsetneqq;': '\u2acb',
+    'subsim;': '\u2ac7',
+    'subsub;': '\u2ad5',
+    'subsup;': '\u2ad3',
+    'succ;': '\u227b',
+    'succapprox;': '\u2ab8',
+    'succcurlyeq;': '\u227d',
+    'Succeeds;': '\u227b',
+    'SucceedsEqual;': '\u2ab0',
+    'SucceedsSlantEqual;': '\u227d',
+    'SucceedsTilde;': '\u227f',
+    'succeq;': '\u2ab0',
+    'succnapprox;': '\u2aba',
+    'succneqq;': '\u2ab6',
+    'succnsim;': '\u22e9',
+    'succsim;': '\u227f',
+    'SuchThat;': '\u220b',
+    'Sum;': '\u2211',
+    'sum;': '\u2211',
+    'sung;': '\u266a',
+    'sup1': '\xb9',
+    'sup1;': '\xb9',
+    'sup2': '\xb2',
+    'sup2;': '\xb2',
+    'sup3': '\xb3',
+    'sup3;': '\xb3',
+    'Sup;': '\u22d1',
+    'sup;': '\u2283',
+    'supdot;': '\u2abe',
+    'supdsub;': '\u2ad8',
+    'supE;': '\u2ac6',
+    'supe;': '\u2287',
+    'supedot;': '\u2ac4',
+    'Superset;': '\u2283',
+    'SupersetEqual;': '\u2287',
+    'suphsol;': '\u27c9',
+    'suphsub;': '\u2ad7',
+    'suplarr;': '\u297b',
+    'supmult;': '\u2ac2',
+    'supnE;': '\u2acc',
+    'supne;': '\u228b',
+    'supplus;': '\u2ac0',
+    'Supset;': '\u22d1',
+    'supset;': '\u2283',
+    'supseteq;': '\u2287',
+    'supseteqq;': '\u2ac6',
+    'supsetneq;': '\u228b',
+    'supsetneqq;': '\u2acc',
+    'supsim;': '\u2ac8',
+    'supsub;': '\u2ad4',
+    'supsup;': '\u2ad6',
+    'swarhk;': '\u2926',
+    'swArr;': '\u21d9',
+    'swarr;': '\u2199',
+    'swarrow;': '\u2199',
+    'swnwar;': '\u292a',
+    'szlig': '\xdf',
+    'szlig;': '\xdf',
+    'Tab;': '\t',
+    'target;': '\u2316',
+    'Tau;': '\u03a4',
+    'tau;': '\u03c4',
+    'tbrk;': '\u23b4',
+    'Tcaron;': '\u0164',
+    'tcaron;': '\u0165',
+    'Tcedil;': '\u0162',
+    'tcedil;': '\u0163',
+    'Tcy;': '\u0422',
+    'tcy;': '\u0442',
+    'tdot;': '\u20db',
+    'telrec;': '\u2315',
+    'Tfr;': '\U0001d517',
+    'tfr;': '\U0001d531',
+    'there4;': '\u2234',
+    'Therefore;': '\u2234',
+    'therefore;': '\u2234',
+    'Theta;': '\u0398',
+    'theta;': '\u03b8',
+    'thetasym;': '\u03d1',
+    'thetav;': '\u03d1',
+    'thickapprox;': '\u2248',
+    'thicksim;': '\u223c',
+    'ThickSpace;': '\u205f\u200a',
+    'thinsp;': '\u2009',
+    'ThinSpace;': '\u2009',
+    'thkap;': '\u2248',
+    'thksim;': '\u223c',
+    'THORN': '\xde',
+    'thorn': '\xfe',
+    'THORN;': '\xde',
+    'thorn;': '\xfe',
+    'Tilde;': '\u223c',
+    'tilde;': '\u02dc',
+    'TildeEqual;': '\u2243',
+    'TildeFullEqual;': '\u2245',
+    'TildeTilde;': '\u2248',
+    'times': '\xd7',
+    'times;': '\xd7',
+    'timesb;': '\u22a0',
+    'timesbar;': '\u2a31',
+    'timesd;': '\u2a30',
+    'tint;': '\u222d',
+    'toea;': '\u2928',
+    'top;': '\u22a4',
+    'topbot;': '\u2336',
+    'topcir;': '\u2af1',
+    'Topf;': '\U0001d54b',
+    'topf;': '\U0001d565',
+    'topfork;': '\u2ada',
+    'tosa;': '\u2929',
+    'tprime;': '\u2034',
+    'TRADE;': '\u2122',
+    'trade;': '\u2122',
+    'triangle;': '\u25b5',
+    'triangledown;': '\u25bf',
+    'triangleleft;': '\u25c3',
+    'trianglelefteq;': '\u22b4',
+    'triangleq;': '\u225c',
+    'triangleright;': '\u25b9',
+    'trianglerighteq;': '\u22b5',
+    'tridot;': '\u25ec',
+    'trie;': '\u225c',
+    'triminus;': '\u2a3a',
+    'TripleDot;': '\u20db',
+    'triplus;': '\u2a39',
+    'trisb;': '\u29cd',
+    'tritime;': '\u2a3b',
+    'trpezium;': '\u23e2',
+    'Tscr;': '\U0001d4af',
+    'tscr;': '\U0001d4c9',
+    'TScy;': '\u0426',
+    'tscy;': '\u0446',
+    'TSHcy;': '\u040b',
+    'tshcy;': '\u045b',
+    'Tstrok;': '\u0166',
+    'tstrok;': '\u0167',
+    'twixt;': '\u226c',
+    'twoheadleftarrow;': '\u219e',
+    'twoheadrightarrow;': '\u21a0',
+    'Uacute': '\xda',
+    'uacute': '\xfa',
+    'Uacute;': '\xda',
+    'uacute;': '\xfa',
+    'Uarr;': '\u219f',
+    'uArr;': '\u21d1',
+    'uarr;': '\u2191',
+    'Uarrocir;': '\u2949',
+    'Ubrcy;': '\u040e',
+    'ubrcy;': '\u045e',
+    'Ubreve;': '\u016c',
+    'ubreve;': '\u016d',
+    'Ucirc': '\xdb',
+    'ucirc': '\xfb',
+    'Ucirc;': '\xdb',
+    'ucirc;': '\xfb',
+    'Ucy;': '\u0423',
+    'ucy;': '\u0443',
+    'udarr;': '\u21c5',
+    'Udblac;': '\u0170',
+    'udblac;': '\u0171',
+    'udhar;': '\u296e',
+    'ufisht;': '\u297e',
+    'Ufr;': '\U0001d518',
+    'ufr;': '\U0001d532',
+    'Ugrave': '\xd9',
+    'ugrave': '\xf9',
+    'Ugrave;': '\xd9',
+    'ugrave;': '\xf9',
+    'uHar;': '\u2963',
+    'uharl;': '\u21bf',
+    'uharr;': '\u21be',
+    'uhblk;': '\u2580',
+    'ulcorn;': '\u231c',
+    'ulcorner;': '\u231c',
+    'ulcrop;': '\u230f',
+    'ultri;': '\u25f8',
+    'Umacr;': '\u016a',
+    'umacr;': '\u016b',
+    'uml': '\xa8',
+    'uml;': '\xa8',
+    'UnderBar;': '_',
+    'UnderBrace;': '\u23df',
+    'UnderBracket;': '\u23b5',
+    'UnderParenthesis;': '\u23dd',
+    'Union;': '\u22c3',
+    'UnionPlus;': '\u228e',
+    'Uogon;': '\u0172',
+    'uogon;': '\u0173',
+    'Uopf;': '\U0001d54c',
+    'uopf;': '\U0001d566',
+    'UpArrow;': '\u2191',
+    'Uparrow;': '\u21d1',
+    'uparrow;': '\u2191',
+    'UpArrowBar;': '\u2912',
+    'UpArrowDownArrow;': '\u21c5',
+    'UpDownArrow;': '\u2195',
+    'Updownarrow;': '\u21d5',
+    'updownarrow;': '\u2195',
+    'UpEquilibrium;': '\u296e',
+    'upharpoonleft;': '\u21bf',
+    'upharpoonright;': '\u21be',
+    'uplus;': '\u228e',
+    'UpperLeftArrow;': '\u2196',
+    'UpperRightArrow;': '\u2197',
+    'Upsi;': '\u03d2',
+    'upsi;': '\u03c5',
+    'upsih;': '\u03d2',
+    'Upsilon;': '\u03a5',
+    'upsilon;': '\u03c5',
+    'UpTee;': '\u22a5',
+    'UpTeeArrow;': '\u21a5',
+    'upuparrows;': '\u21c8',
+    'urcorn;': '\u231d',
+    'urcorner;': '\u231d',
+    'urcrop;': '\u230e',
+    'Uring;': '\u016e',
+    'uring;': '\u016f',
+    'urtri;': '\u25f9',
+    'Uscr;': '\U0001d4b0',
+    'uscr;': '\U0001d4ca',
+    'utdot;': '\u22f0',
+    'Utilde;': '\u0168',
+    'utilde;': '\u0169',
+    'utri;': '\u25b5',
+    'utrif;': '\u25b4',
+    'uuarr;': '\u21c8',
+    'Uuml': '\xdc',
+    'uuml': '\xfc',
+    'Uuml;': '\xdc',
+    'uuml;': '\xfc',
+    'uwangle;': '\u29a7',
+    'vangrt;': '\u299c',
+    'varepsilon;': '\u03f5',
+    'varkappa;': '\u03f0',
+    'varnothing;': '\u2205',
+    'varphi;': '\u03d5',
+    'varpi;': '\u03d6',
+    'varpropto;': '\u221d',
+    'vArr;': '\u21d5',
+    'varr;': '\u2195',
+    'varrho;': '\u03f1',
+    'varsigma;': '\u03c2',
+    'varsubsetneq;': '\u228a\ufe00',
+    'varsubsetneqq;': '\u2acb\ufe00',
+    'varsupsetneq;': '\u228b\ufe00',
+    'varsupsetneqq;': '\u2acc\ufe00',
+    'vartheta;': '\u03d1',
+    'vartriangleleft;': '\u22b2',
+    'vartriangleright;': '\u22b3',
+    'Vbar;': '\u2aeb',
+    'vBar;': '\u2ae8',
+    'vBarv;': '\u2ae9',
+    'Vcy;': '\u0412',
+    'vcy;': '\u0432',
+    'VDash;': '\u22ab',
+    'Vdash;': '\u22a9',
+    'vDash;': '\u22a8',
+    'vdash;': '\u22a2',
+    'Vdashl;': '\u2ae6',
+    'Vee;': '\u22c1',
+    'vee;': '\u2228',
+    'veebar;': '\u22bb',
+    'veeeq;': '\u225a',
+    'vellip;': '\u22ee',
+    'Verbar;': '\u2016',
+    'verbar;': '|',
+    'Vert;': '\u2016',
+    'vert;': '|',
+    'VerticalBar;': '\u2223',
+    'VerticalLine;': '|',
+    'VerticalSeparator;': '\u2758',
+    'VerticalTilde;': '\u2240',
+    'VeryThinSpace;': '\u200a',
+    'Vfr;': '\U0001d519',
+    'vfr;': '\U0001d533',
+    'vltri;': '\u22b2',
+    'vnsub;': '\u2282\u20d2',
+    'vnsup;': '\u2283\u20d2',
+    'Vopf;': '\U0001d54d',
+    'vopf;': '\U0001d567',
+    'vprop;': '\u221d',
+    'vrtri;': '\u22b3',
+    'Vscr;': '\U0001d4b1',
+    'vscr;': '\U0001d4cb',
+    'vsubnE;': '\u2acb\ufe00',
+    'vsubne;': '\u228a\ufe00',
+    'vsupnE;': '\u2acc\ufe00',
+    'vsupne;': '\u228b\ufe00',
+    'Vvdash;': '\u22aa',
+    'vzigzag;': '\u299a',
+    'Wcirc;': '\u0174',
+    'wcirc;': '\u0175',
+    'wedbar;': '\u2a5f',
+    'Wedge;': '\u22c0',
+    'wedge;': '\u2227',
+    'wedgeq;': '\u2259',
+    'weierp;': '\u2118',
+    'Wfr;': '\U0001d51a',
+    'wfr;': '\U0001d534',
+    'Wopf;': '\U0001d54e',
+    'wopf;': '\U0001d568',
+    'wp;': '\u2118',
+    'wr;': '\u2240',
+    'wreath;': '\u2240',
+    'Wscr;': '\U0001d4b2',
+    'wscr;': '\U0001d4cc',
+    'xcap;': '\u22c2',
+    'xcirc;': '\u25ef',
+    'xcup;': '\u22c3',
+    'xdtri;': '\u25bd',
+    'Xfr;': '\U0001d51b',
+    'xfr;': '\U0001d535',
+    'xhArr;': '\u27fa',
+    'xharr;': '\u27f7',
+    'Xi;': '\u039e',
+    'xi;': '\u03be',
+    'xlArr;': '\u27f8',
+    'xlarr;': '\u27f5',
+    'xmap;': '\u27fc',
+    'xnis;': '\u22fb',
+    'xodot;': '\u2a00',
+    'Xopf;': '\U0001d54f',
+    'xopf;': '\U0001d569',
+    'xoplus;': '\u2a01',
+    'xotime;': '\u2a02',
+    'xrArr;': '\u27f9',
+    'xrarr;': '\u27f6',
+    'Xscr;': '\U0001d4b3',
+    'xscr;': '\U0001d4cd',
+    'xsqcup;': '\u2a06',
+    'xuplus;': '\u2a04',
+    'xutri;': '\u25b3',
+    'xvee;': '\u22c1',
+    'xwedge;': '\u22c0',
+    'Yacute': '\xdd',
+    'yacute': '\xfd',
+    'Yacute;': '\xdd',
+    'yacute;': '\xfd',
+    'YAcy;': '\u042f',
+    'yacy;': '\u044f',
+    'Ycirc;': '\u0176',
+    'ycirc;': '\u0177',
+    'Ycy;': '\u042b',
+    'ycy;': '\u044b',
+    'yen': '\xa5',
+    'yen;': '\xa5',
+    'Yfr;': '\U0001d51c',
+    'yfr;': '\U0001d536',
+    'YIcy;': '\u0407',
+    'yicy;': '\u0457',
+    'Yopf;': '\U0001d550',
+    'yopf;': '\U0001d56a',
+    'Yscr;': '\U0001d4b4',
+    'yscr;': '\U0001d4ce',
+    'YUcy;': '\u042e',
+    'yucy;': '\u044e',
+    'yuml': '\xff',
+    'Yuml;': '\u0178',
+    'yuml;': '\xff',
+    'Zacute;': '\u0179',
+    'zacute;': '\u017a',
+    'Zcaron;': '\u017d',
+    'zcaron;': '\u017e',
+    'Zcy;': '\u0417',
+    'zcy;': '\u0437',
+    'Zdot;': '\u017b',
+    'zdot;': '\u017c',
+    'zeetrf;': '\u2128',
+    'ZeroWidthSpace;': '\u200b',
+    'Zeta;': '\u0396',
+    'zeta;': '\u03b6',
+    'Zfr;': '\u2128',
+    'zfr;': '\U0001d537',
+    'ZHcy;': '\u0416',
+    'zhcy;': '\u0436',
+    'zigrarr;': '\u21dd',
+    'Zopf;': '\u2124',
+    'zopf;': '\U0001d56b',
+    'Zscr;': '\U0001d4b5',
+    'zscr;': '\U0001d4cf',
+    'zwj;': '\u200d',
+    'zwnj;': '\u200c',
+}
+
+# maps the Unicode codepoint to the HTML entity name
+codepoint2name = {}
+
+# maps the HTML entity name to the character
+# (or a character reference if the character is outside the Latin-1 range)
+entitydefs = {}
+
+for (name, codepoint) in name2codepoint.items():
+    codepoint2name[codepoint] = name
+    entitydefs[name] = chr(codepoint)
+
+del name, codepoint
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/parser.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/parser.py
new file mode 100644
index 00000000..c31320e2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/html/parser.py
@@ -0,0 +1,532 @@
+"""A parser for HTML and XHTML."""
+
+# This file is based on sgmllib.py, but the API is slightly different.
+
+# XXX There should be a way to distinguish between PCDATA (parsed
+# character data -- the normal case), RCDATA (replaceable character
+# data -- only char and entity references and end tags are special)
+# and CDATA (character data -- only end tags are special).
+
+
+import _markupbase
+import re
+import warnings
+
+# Regular expressions used for parsing
+
+interesting_normal = re.compile('[&<]')
+incomplete = re.compile('&[a-zA-Z#]')
+
+entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
+charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
+
+starttagopen = re.compile('<[a-zA-Z]')
+piclose = re.compile('>')
+commentclose = re.compile(r'--\s*>')
+tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
+# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
+# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
+tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\\x00]*')
+# Note:
+#  1) the strict attrfind isn't really strict, but we can't make it
+#     correctly strict without breaking backward compatibility;
+#  2) if you change attrfind remember to update locatestarttagend too;
+#  3) if you change attrfind and/or locatestarttagend the parser will
+#     explode, so don't do it.
+attrfind = re.compile(
+    r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
+    r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
+attrfind_tolerant = re.compile(
+    r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
+    r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
+locatestarttagend = re.compile(r"""
+  <[a-zA-Z][-.a-zA-Z0-9:_]*          # tag name
+  (?:\s+                             # whitespace before attribute name
+    (?:[a-zA-Z_][-.:a-zA-Z0-9_]*     # attribute name
+      (?:\s*=\s*                     # value indicator
+        (?:'[^']*'                   # LITA-enclosed value
+          |\"[^\"]*\"                # LIT-enclosed value
+          |[^'\">\s]+                # bare value
+         )
+       )?
+     )
+   )*
+  \s*                                # trailing whitespace
+""", re.VERBOSE)
+locatestarttagend_tolerant = re.compile(r"""
+  <[a-zA-Z][-.a-zA-Z0-9:_]*          # tag name
+  (?:[\s/]*                          # optional whitespace before attribute name
+    (?:(?<=['"\s/])[^\s/>][^\s/=>]*  # attribute name
+      (?:\s*=+\s*                    # value indicator
+        (?:'[^']*'                   # LITA-enclosed value
+          |"[^"]*"                   # LIT-enclosed value
+          |(?!['"])[^>\s]*           # bare value
+         )
+         (?:\s*,)*                   # possibly followed by a comma
+       )?(?:\s|/(?!>))*
+     )*
+   )?
+  \s*                                # trailing whitespace
+""", re.VERBOSE)
+endendtag = re.compile('>')
+# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
+# </ and the tag name, so maybe this should be fixed
+endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
+
+
+class HTMLParseError(Exception):
+    """Exception raised for all parse errors."""
+
+    def __init__(self, msg, position=(None, None)):
+        assert msg
+        self.msg = msg
+        self.lineno = position[0]
+        self.offset = position[1]
+
+    def __str__(self):
+        result = self.msg
+        if self.lineno is not None:
+            result = result + ", at line %d" % self.lineno
+        if self.offset is not None:
+            result = result + ", column %d" % (self.offset + 1)
+        return result
+
+
+class HTMLParser(_markupbase.ParserBase):
+    """Find tags and other markup and call handler functions.
+
+    Usage:
+        p = HTMLParser()
+        p.feed(data)
+        ...
+        p.close()
+
+    Start tags are handled by calling self.handle_starttag() or
+    self.handle_startendtag(); end tags by self.handle_endtag().  The
+    data between tags is passed from the parser to the derived class
+    by calling self.handle_data() with the data as argument (the data
+    may be split up in arbitrary chunks).  Entity references are
+    passed by calling self.handle_entityref() with the entity
+    reference as the argument.  Numeric character references are
+    passed to self.handle_charref() with the string containing the
+    reference as the argument.
+    """
+
+    CDATA_CONTENT_ELEMENTS = ("script", "style")
+
+    def __init__(self, strict=False):
+        """Initialize and reset this instance.
+
+        If strict is set to False (the default) the parser will parse invalid
+        markup, otherwise it will raise an error.  Note that the strict mode
+        is deprecated.
+        """
+        if strict:
+            warnings.warn("The strict mode is deprecated.",
+                          DeprecationWarning, stacklevel=2)
+        self.strict = strict
+        self.reset()
+
+    def reset(self):
+        """Reset this instance.  Loses all unprocessed data."""
+        self.rawdata = ''
+        self.lasttag = '???'
+        self.interesting = interesting_normal
+        self.cdata_elem = None
+        _markupbase.ParserBase.reset(self)
+
+    def feed(self, data):
+        r"""Feed data to the parser.
+
+        Call this as often as you want, with as little or as much text
+        as you want (may include '\n').
+        """
+        self.rawdata = self.rawdata + data
+        self.goahead(0)
+
+    def close(self):
+        """Handle any buffered data."""
+        self.goahead(1)
+
+    def error(self, message):
+        raise HTMLParseError(message, self.getpos())
+
+    __starttag_text = None
+
+    def get_starttag_text(self):
+        """Return full source of start tag: '<...>'."""
+        return self.__starttag_text
+
+    def set_cdata_mode(self, elem):
+        self.cdata_elem = elem.lower()
+        self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
+
+    def clear_cdata_mode(self):
+        self.interesting = interesting_normal
+        self.cdata_elem = None
+
+    # Internal -- handle data as far as reasonable.  May leave state
+    # and data to be processed by a subsequent call.  If 'end' is
+    # true, force handling all data as if followed by EOF marker.
+    def goahead(self, end):
+        rawdata = self.rawdata
+        i = 0
+        n = len(rawdata)
+        while i < n:
+            match = self.interesting.search(rawdata, i) # < or &
+            if match:
+                j = match.start()
+            else:
+                if self.cdata_elem:
+                    break
+                j = n
+            if i < j: self.handle_data(rawdata[i:j])
+            i = self.updatepos(i, j)
+            if i == n: break
+            startswith = rawdata.startswith
+            if startswith('<', i):
+                if starttagopen.match(rawdata, i): # < + letter
+                    k = self.parse_starttag(i)
+                elif startswith("</", i):
+                    k = self.parse_endtag(i)
+                elif startswith("<!--", i):
+                    k = self.parse_comment(i)
+                elif startswith("<?", i):
+                    k = self.parse_pi(i)
+                elif startswith("<!", i):
+                    if self.strict:
+                        k = self.parse_declaration(i)
+                    else:
+                        k = self.parse_html_declaration(i)
+                elif (i + 1) < n:
+                    self.handle_data("<")
+                    k = i + 1
+                else:
+                    break
+                if k < 0:
+                    if not end:
+                        break
+                    if self.strict:
+                        self.error("EOF in middle of construct")
+                    k = rawdata.find('>', i + 1)
+                    if k < 0:
+                        k = rawdata.find('<', i + 1)
+                        if k < 0:
+                            k = i + 1
+                    else:
+                        k += 1
+                    self.handle_data(rawdata[i:k])
+                i = self.updatepos(i, k)
+            elif startswith("&#", i):
+                match = charref.match(rawdata, i)
+                if match:
+                    name = match.group()[2:-1]
+                    self.handle_charref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                else:
+                    if ";" in rawdata[i:]: #bail by consuming &#
+                        self.handle_data(rawdata[0:2])
+                        i = self.updatepos(i, 2)
+                    break
+            elif startswith('&', i):
+                match = entityref.match(rawdata, i)
+                if match:
+                    name = match.group(1)
+                    self.handle_entityref(name)
+                    k = match.end()
+                    if not startswith(';', k-1):
+                        k = k - 1
+                    i = self.updatepos(i, k)
+                    continue
+                match = incomplete.match(rawdata, i)
+                if match:
+                    # match.group() will contain at least 2 chars
+                    if end and match.group() == rawdata[i:]:
+                        if self.strict:
+                            self.error("EOF in middle of entity or char ref")
+                        else:
+                            k = match.end()
+                            if k <= i:
+                                k = n
+                            i = self.updatepos(i, i + 1)
+                    # incomplete
+                    break
+                elif (i + 1) < n:
+                    # not the end of the buffer, and can't be confused
+                    # with some other construct
+                    self.handle_data("&")
+                    i = self.updatepos(i, i + 1)
+                else:
+                    break
+            else:
+                assert 0, "interesting.search() lied"
+        # end while
+        if end and i < n and not self.cdata_elem:
+            self.handle_data(rawdata[i:n])
+            i = self.updatepos(i, n)
+        self.rawdata = rawdata[i:]
+
+    # Internal -- parse html declarations, return length or -1 if not terminated
+    # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
+    # See also parse_declaration in _markupbase
+    def parse_html_declaration(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == '<!', ('unexpected call to '
+                                        'parse_html_declaration()')
+        if rawdata[i:i+4] == '<!--':
+            # this case is actually already handled in goahead()
+            return self.parse_comment(i)
+        elif rawdata[i:i+3] == '<![':
+            return self.parse_marked_section(i)
+        elif rawdata[i:i+9].lower() == '<!doctype':
+            # find the closing >
+            gtpos = rawdata.find('>', i+9)
+            if gtpos == -1:
+                return -1
+            self.handle_decl(rawdata[i+2:gtpos])
+            return gtpos+1
+        else:
+            return self.parse_bogus_comment(i)
+
+    # Internal -- parse bogus comment, return length or -1 if not terminated
+    # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
+    def parse_bogus_comment(self, i, report=1):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
+                                                'parse_comment()')
+        pos = rawdata.find('>', i+2)
+        if pos == -1:
+            return -1
+        if report:
+            self.handle_comment(rawdata[i+2:pos])
+        return pos + 1
+
+    # Internal -- parse processing instr, return end or -1 if not terminated
+    def parse_pi(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
+        match = piclose.search(rawdata, i+2) # >
+        if not match:
+            return -1
+        j = match.start()
+        self.handle_pi(rawdata[i+2: j])
+        j = match.end()
+        return j
+
+    # Internal -- handle starttag, return end or -1 if not terminated
+    def parse_starttag(self, i):
+        self.__starttag_text = None
+        endpos = self.check_for_whole_start_tag(i)
+        if endpos < 0:
+            return endpos
+        rawdata = self.rawdata
+        self.__starttag_text = rawdata[i:endpos]
+
+        # Now parse the data between i+1 and j into a tag and attrs
+        attrs = []
+        match = tagfind.match(rawdata, i+1)
+        assert match, 'unexpected call to parse_starttag()'
+        k = match.end()
+        self.lasttag = tag = match.group(1).lower()
+        while k < endpos:
+            if self.strict:
+                m = attrfind.match(rawdata, k)
+            else:
+                m = attrfind_tolerant.match(rawdata, k)
+            if not m:
+                break
+            attrname, rest, attrvalue = m.group(1, 2, 3)
+            if not rest:
+                attrvalue = None
+            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+                 attrvalue[:1] == '"' == attrvalue[-1:]:
+                attrvalue = attrvalue[1:-1]
+            if attrvalue:
+                attrvalue = self.unescape(attrvalue)
+            attrs.append((attrname.lower(), attrvalue))
+            k = m.end()
+
+        end = rawdata[k:endpos].strip()
+        if end not in (">", "/>"):
+            lineno, offset = self.getpos()
+            if "\n" in self.__starttag_text:
+                lineno = lineno + self.__starttag_text.count("\n")
+                offset = len(self.__starttag_text) \
+                         - self.__starttag_text.rfind("\n")
+            else:
+                offset = offset + len(self.__starttag_text)
+            if self.strict:
+                self.error("junk characters in start tag: %r"
+                           % (rawdata[k:endpos][:20],))
+            self.handle_data(rawdata[i:endpos])
+            return endpos
+        if end.endswith('/>'):
+            # XHTML-style empty tag: <span attr="value" />
+            self.handle_startendtag(tag, attrs)
+        else:
+            self.handle_starttag(tag, attrs)
+            if tag in self.CDATA_CONTENT_ELEMENTS:
+                self.set_cdata_mode(tag)
+        return endpos
+
+    # Internal -- check to see if we have a complete starttag; return end
+    # or -1 if incomplete.
+    def check_for_whole_start_tag(self, i):
+        rawdata = self.rawdata
+        if self.strict:
+            m = locatestarttagend.match(rawdata, i)
+        else:
+            m = locatestarttagend_tolerant.match(rawdata, i)
+        if m:
+            j = m.end()
+            next = rawdata[j:j+1]
+            if next == ">":
+                return j + 1
+            if next == "/":
+                if rawdata.startswith("/>", j):
+                    return j + 2
+                if rawdata.startswith("/", j):
+                    # buffer boundary
+                    return -1
+                # else bogus input
+                if self.strict:
+                    self.updatepos(i, j + 1)
+                    self.error("malformed empty start tag")
+                if j > i:
+                    return j
+                else:
+                    return i + 1
+            if next == "":
+                # end of input
+                return -1
+            if next in ("abcdefghijklmnopqrstuvwxyz=/"
+                        "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
+                # end of input in or before attribute value, or we have the
+                # '/' from a '/>' ending
+                return -1
+            if self.strict:
+                self.updatepos(i, j)
+                self.error("malformed start tag")
+            if j > i:
+                return j
+            else:
+                return i + 1
+        raise AssertionError("we should not get here!")
+
+    # Internal -- parse endtag, return end or -1 if incomplete
+    def parse_endtag(self, i):
+        rawdata = self.rawdata
+        assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
+        match = endendtag.search(rawdata, i+1) # >
+        if not match:
+            return -1
+        gtpos = match.end()
+        match = endtagfind.match(rawdata, i) # </ + tag + >
+        if not match:
+            if self.cdata_elem is not None:
+                self.handle_data(rawdata[i:gtpos])
+                return gtpos
+            if self.strict:
+                self.error("bad end tag: %r" % (rawdata[i:gtpos],))
+            # find the name: w3.org/TR/html5/tokenization.html#tag-name-state
+            namematch = tagfind_tolerant.match(rawdata, i+2)
+            if not namematch:
+                # w3.org/TR/html5/tokenization.html#end-tag-open-state
+                if rawdata[i:i+3] == '</>':
+                    return i+3
+                else:
+                    return self.parse_bogus_comment(i)
+            tagname = namematch.group().lower()
+            # consume and ignore other stuff between the name and the >
+            # Note: this is not 100% correct, since we might have things like
+            # </tag attr=">">, but looking for > after tha name should cover
+            # most of the cases and is much simpler
+            gtpos = rawdata.find('>', namematch.end())
+            self.handle_endtag(tagname)
+            return gtpos+1
+
+        elem = match.group(1).lower() # script or style
+        if self.cdata_elem is not None:
+            if elem != self.cdata_elem:
+                self.handle_data(rawdata[i:gtpos])
+                return gtpos
+
+        self.handle_endtag(elem.lower())
+        self.clear_cdata_mode()
+        return gtpos
+
+    # Overridable -- finish processing of start+end tag: <tag.../>
+    def handle_startendtag(self, tag, attrs):
+        self.handle_starttag(tag, attrs)
+        self.handle_endtag(tag)
+
+    # Overridable -- handle start tag
+    def handle_starttag(self, tag, attrs):
+        pass
+
+    # Overridable -- handle end tag
+    def handle_endtag(self, tag):
+        pass
+
+    # Overridable -- handle character reference
+    def handle_charref(self, name):
+        pass
+
+    # Overridable -- handle entity reference
+    def handle_entityref(self, name):
+        pass
+
+    # Overridable -- handle data
+    def handle_data(self, data):
+        pass
+
+    # Overridable -- handle comment
+    def handle_comment(self, data):
+        pass
+
+    # Overridable -- handle declaration
+    def handle_decl(self, decl):
+        pass
+
+    # Overridable -- handle processing instruction
+    def handle_pi(self, data):
+        pass
+
+    def unknown_decl(self, data):
+        if self.strict:
+            self.error("unknown declaration: %r" % (data,))
+
+    # Internal -- helper to remove special character quoting
+    def unescape(self, s):
+        if '&' not in s:
+            return s
+        def replaceEntities(s):
+            s = s.groups()[0]
+            try:
+                if s[0] == "#":
+                    s = s[1:]
+                    if s[0] in ['x','X']:
+                        c = int(s[1:].rstrip(';'), 16)
+                    else:
+                        c = int(s.rstrip(';'))
+                    return chr(c)
+            except ValueError:
+                return '&#' + s
+            else:
+                from html.entities import html5
+                if s in html5:
+                    return html5[s]
+                elif s.endswith(';'):
+                    return '&' + s
+                for x in range(2, len(s)):
+                    if s[:x] in html5:
+                        return html5[s[:x]] + s[x:]
+                else:
+                    return '&' + s
+
+        return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
+                      replaceEntities, s, flags=re.ASCII)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/http/client.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/http/client.py
new file mode 100644
index 00000000..98f883c1
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/http/client.py
@@ -0,0 +1,1279 @@
+"""HTTP/1.1 client library
+
+<intro stuff goes here>
+<other stuff, too>
+
+HTTPConnection goes through a number of "states", which define when a client
+may legally make another request or fetch the response for a particular
+request. This diagram details these state transitions:
+
+    (null)
+      |
+      | HTTPConnection()
+      v
+    Idle
+      |
+      | putrequest()
+      v
+    Request-started
+      |
+      | ( putheader() )*  endheaders()
+      v
+    Request-sent
+      |
+      | response = getresponse()
+      v
+    Unread-response   [Response-headers-read]
+      |\____________________
+      |                     |
+      | response.read()     | putrequest()
+      v                     v
+    Idle                  Req-started-unread-response
+                     ______/|
+                   /        |
+   response.read() |        | ( putheader() )*  endheaders()
+                   v        v
+       Request-started    Req-sent-unread-response
+                            |
+                            | response.read()
+                            v
+                          Request-sent
+
+This diagram presents the following rules:
+  -- a second request may not be started until {response-headers-read}
+  -- a response [object] cannot be retrieved until {request-sent}
+  -- there is no differentiation between an unread response body and a
+     partially read response body
+
+Note: this enforcement is applied by the HTTPConnection class. The
+      HTTPResponse class does not enforce this state machine, which
+      implies sophisticated clients may accelerate the request/response
+      pipeline. Caution should be taken, though: accelerating the states
+      beyond the above pattern may imply knowledge of the server's
+      connection-close behavior for certain requests. For example, it
+      is impossible to tell whether the server will close the connection
+      UNTIL the response headers have been read; this means that further
+      requests cannot be placed into the pipeline until it is known that
+      the server will NOT be closing the connection.
+
+Logical State                  __state            __response
+-------------                  -------            ----------
+Idle                           _CS_IDLE           None
+Request-started                _CS_REQ_STARTED    None
+Request-sent                   _CS_REQ_SENT       None
+Unread-response                _CS_IDLE           <response_class>
+Req-started-unread-response    _CS_REQ_STARTED    <response_class>
+Req-sent-unread-response       _CS_REQ_SENT       <response_class>
+"""
+
+import email.parser
+import email.message
+import io
+import os
+import socket
+import collections
+from urllib.parse import urlsplit
+import warnings
+
+__all__ = ["HTTPResponse", "HTTPConnection",
+           "HTTPException", "NotConnected", "UnknownProtocol",
+           "UnknownTransferEncoding", "UnimplementedFileMode",
+           "IncompleteRead", "InvalidURL", "ImproperConnectionState",
+           "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
+           "BadStatusLine", "error", "responses"]
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+_UNKNOWN = 'UNKNOWN'
+
+# connection states
+_CS_IDLE = 'Idle'
+_CS_REQ_STARTED = 'Request-started'
+_CS_REQ_SENT = 'Request-sent'
+
+# status codes
+# informational
+CONTINUE = 100
+SWITCHING_PROTOCOLS = 101
+PROCESSING = 102
+
+# successful
+OK = 200
+CREATED = 201
+ACCEPTED = 202
+NON_AUTHORITATIVE_INFORMATION = 203
+NO_CONTENT = 204
+RESET_CONTENT = 205
+PARTIAL_CONTENT = 206
+MULTI_STATUS = 207
+IM_USED = 226
+
+# redirection
+MULTIPLE_CHOICES = 300
+MOVED_PERMANENTLY = 301
+FOUND = 302
+SEE_OTHER = 303
+NOT_MODIFIED = 304
+USE_PROXY = 305
+TEMPORARY_REDIRECT = 307
+
+# client error
+BAD_REQUEST = 400
+UNAUTHORIZED = 401
+PAYMENT_REQUIRED = 402
+FORBIDDEN = 403
+NOT_FOUND = 404
+METHOD_NOT_ALLOWED = 405
+NOT_ACCEPTABLE = 406
+PROXY_AUTHENTICATION_REQUIRED = 407
+REQUEST_TIMEOUT = 408
+CONFLICT = 409
+GONE = 410
+LENGTH_REQUIRED = 411
+PRECONDITION_FAILED = 412
+REQUEST_ENTITY_TOO_LARGE = 413
+REQUEST_URI_TOO_LONG = 414
+UNSUPPORTED_MEDIA_TYPE = 415
+REQUESTED_RANGE_NOT_SATISFIABLE = 416
+EXPECTATION_FAILED = 417
+UNPROCESSABLE_ENTITY = 422
+LOCKED = 423
+FAILED_DEPENDENCY = 424
+UPGRADE_REQUIRED = 426
+PRECONDITION_REQUIRED = 428
+TOO_MANY_REQUESTS = 429
+REQUEST_HEADER_FIELDS_TOO_LARGE = 431
+
+# server error
+INTERNAL_SERVER_ERROR = 500
+NOT_IMPLEMENTED = 501
+BAD_GATEWAY = 502
+SERVICE_UNAVAILABLE = 503
+GATEWAY_TIMEOUT = 504
+HTTP_VERSION_NOT_SUPPORTED = 505
+INSUFFICIENT_STORAGE = 507
+NOT_EXTENDED = 510
+NETWORK_AUTHENTICATION_REQUIRED = 511
+
+# Mapping status codes to official W3C names
+responses = {
+    100: 'Continue',
+    101: 'Switching Protocols',
+
+    200: 'OK',
+    201: 'Created',
+    202: 'Accepted',
+    203: 'Non-Authoritative Information',
+    204: 'No Content',
+    205: 'Reset Content',
+    206: 'Partial Content',
+
+    300: 'Multiple Choices',
+    301: 'Moved Permanently',
+    302: 'Found',
+    303: 'See Other',
+    304: 'Not Modified',
+    305: 'Use Proxy',
+    306: '(Unused)',
+    307: 'Temporary Redirect',
+
+    400: 'Bad Request',
+    401: 'Unauthorized',
+    402: 'Payment Required',
+    403: 'Forbidden',
+    404: 'Not Found',
+    405: 'Method Not Allowed',
+    406: 'Not Acceptable',
+    407: 'Proxy Authentication Required',
+    408: 'Request Timeout',
+    409: 'Conflict',
+    410: 'Gone',
+    411: 'Length Required',
+    412: 'Precondition Failed',
+    413: 'Request Entity Too Large',
+    414: 'Request-URI Too Long',
+    415: 'Unsupported Media Type',
+    416: 'Requested Range Not Satisfiable',
+    417: 'Expectation Failed',
+    428: 'Precondition Required',
+    429: 'Too Many Requests',
+    431: 'Request Header Fields Too Large',
+
+    500: 'Internal Server Error',
+    501: 'Not Implemented',
+    502: 'Bad Gateway',
+    503: 'Service Unavailable',
+    504: 'Gateway Timeout',
+    505: 'HTTP Version Not Supported',
+    511: 'Network Authentication Required',
+}
+
+# maximal amount of data to read at one time in _safe_read
+MAXAMOUNT = 1048576
+
+# maximal line length when calling readline().
+_MAXLINE = 65536
+_MAXHEADERS = 100
+
+
+class HTTPMessage(email.message.Message):
+    # XXX The only usage of this method is in
+    # http.server.CGIHTTPRequestHandler.  Maybe move the code there so
+    # that it doesn't need to be part of the public API.  The API has
+    # never been defined so this could cause backwards compatibility
+    # issues.
+
+    def getallmatchingheaders(self, name):
+        """Find all header lines matching a given header name.
+
+        Look through the list of headers and find all lines matching a given
+        header name (and their continuation lines).  A list of the lines is
+        returned, without interpretation.  If the header does not occur, an
+        empty list is returned.  If the header occurs multiple times, all
+        occurrences are returned.  Case is not important in the header name.
+
+        """
+        name = name.lower() + ':'
+        n = len(name)
+        lst = []
+        hit = 0
+        for line in self.keys():
+            if line[:n].lower() == name:
+                hit = 1
+            elif not line[:1].isspace():
+                hit = 0
+            if hit:
+                lst.append(line)
+        return lst
+
+def parse_headers(fp, _class=HTTPMessage):
+    """Parses only RFC2822 headers from a file pointer.
+
+    email Parser wants to see strings rather than bytes.
+    But a TextIOWrapper around self.rfile would buffer too many bytes
+    from the stream, bytes which we later need to read as bytes.
+    So we read the correct bytes here, as bytes, for email Parser
+    to parse.
+
+    """
+    headers = []
+    while True:
+        line = fp.readline(_MAXLINE + 1)
+        if len(line) > _MAXLINE:
+            raise LineTooLong("header line")
+        headers.append(line)
+        if len(headers) > _MAXHEADERS:
+            raise HTTPException("got more than %d headers" % _MAXHEADERS)
+        if line in (b'\r\n', b'\n', b''):
+            break
+    hstring = b''.join(headers).decode('iso-8859-1')
+    return email.parser.Parser(_class=_class).parsestr(hstring)
+
+
+_strict_sentinel = object()
+
+class HTTPResponse:
+#class HTTPResponse(io.RawIOBase):
+
+    # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
+
+    # The bytes from the socket object are iso-8859-1 strings.
+    # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
+    # text following RFC 2047.  The basic status line parsing only
+    # accepts iso-8859-1.
+
+    def __init__(self, sock, debuglevel=0, strict=_strict_sentinel, method=None, url=None):
+        # If the response includes a content-length header, we need to
+        # make sure that the client doesn't read more than the
+        # specified number of bytes.  If it does, it will block until
+        # the server times out and closes the connection.  This will
+        # happen if a self.fp.read() is done (without a size) whether
+        # self.fp is buffered or not.  So, no self.fp.read() by
+        # clients unless they know what they are doing.
+        self.fp = sock.makefile("rb")
+        self.debuglevel = debuglevel
+        if strict is not _strict_sentinel:
+            warnings.warn("the 'strict' argument isn't supported anymore; "
+                "http.client now always assumes HTTP/1.x compliant servers.",
+                DeprecationWarning, 2)
+        self._method = method
+
+        # The HTTPResponse object is returned via urllib.  The clients
+        # of http and urllib expect different attributes for the
+        # headers.  headers is used here and supports urllib.  msg is
+        # provided as a backwards compatibility layer for http
+        # clients.
+
+        self.headers = self.msg = None
+
+        # from the Status-Line of the response
+        self.version = _UNKNOWN # HTTP-Version
+        self.status = _UNKNOWN  # Status-Code
+        self.reason = _UNKNOWN  # Reason-Phrase
+
+        self.chunked = _UNKNOWN         # is "chunked" being used?
+        self.chunk_left = _UNKNOWN      # bytes left to read in current chunk
+        self.length = _UNKNOWN          # number of bytes left in response
+        self.will_close = _UNKNOWN      # conn will close at end of response
+
+    def _read_status(self):
+        line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
+        if len(line) > _MAXLINE:
+            raise LineTooLong("status line")
+        if self.debuglevel > 0:
+            print("reply:", repr(line))
+        if not line:
+            # Presumably, the server closed the connection before
+            # sending a valid response.
+            raise BadStatusLine(line)
+        try:
+            version, status, reason = line.split(None, 2)
+        except ValueError:
+            try:
+                version, status = line.split(None, 1)
+                reason = ""
+            except ValueError:
+                # empty version will cause next test to fail.
+                version = ""
+        if not version.startswith("HTTP/"):
+            self._close_conn()
+            raise BadStatusLine(line)
+
+        # The status code is a three-digit number
+        try:
+            status = int(status)
+            if status < 100 or status > 999:
+                raise BadStatusLine(line)
+        except ValueError:
+            raise BadStatusLine(line)
+        return version, status, reason
+
+    def begin(self):
+        if self.headers is not None:
+            # we've already started reading the response
+            return
+
+        # read until we get a non-100 response
+        while True:
+            version, status, reason = self._read_status()
+            if status != CONTINUE:
+                break
+            # skip the header from the 100 response
+            while True:
+                skip = self.fp.readline(_MAXLINE + 1)
+                if len(skip) > _MAXLINE:
+                    raise LineTooLong("header line")
+                skip = skip.strip()
+                if not skip:
+                    break
+                if self.debuglevel > 0:
+                    print("header:", skip)
+
+        self.code = self.status = status
+        self.reason = reason.strip()
+        if version in ("HTTP/1.0", "HTTP/0.9"):
+            # Some servers might still return "0.9", treat it as 1.0 anyway
+            self.version = 10
+        elif version.startswith("HTTP/1."):
+            self.version = 11   # use HTTP/1.1 code for HTTP/1.x where x>=1
+        else:
+            raise UnknownProtocol(version)
+
+        self.headers = self.msg = parse_headers(self.fp)
+
+        if self.debuglevel > 0:
+            for hdr in self.headers:
+                print("header:", hdr, end=" ")
+
+        # are we using the chunked-style of transfer encoding?
+        tr_enc = self.headers.get("transfer-encoding")
+        if tr_enc and tr_enc.lower() == "chunked":
+            self.chunked = True
+            self.chunk_left = None
+        else:
+            self.chunked = False
+
+        # will the connection close at the end of the response?
+        self.will_close = self._check_close()
+
+        # do we have a Content-Length?
+        # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+        self.length = None
+        length = self.headers.get("content-length")
+
+         # are we using the chunked-style of transfer encoding?
+        tr_enc = self.headers.get("transfer-encoding")
+        if length and not self.chunked:
+            try:
+                self.length = int(length)
+            except ValueError:
+                self.length = None
+            else:
+                if self.length < 0:  # ignore nonsensical negative lengths
+                    self.length = None
+        else:
+            self.length = None
+
+        # does the body have a fixed length? (of zero)
+        if (status == NO_CONTENT or status == NOT_MODIFIED or
+            100 <= status < 200 or      # 1xx codes
+            self._method == "HEAD"):
+            self.length = 0
+
+        # if the connection remains open, and we aren't using chunked, and
+        # a content-length was not provided, then assume that the connection
+        # WILL close.
+        if (not self.will_close and
+            not self.chunked and
+            self.length is None):
+            self.will_close = True
+
+    def _check_close(self):
+        conn = self.headers.get("connection")
+        if self.version == 11:
+            # An HTTP/1.1 proxy is assumed to stay open unless
+            # explicitly closed.
+            conn = self.headers.get("connection")
+            if conn and "close" in conn.lower():
+                return True
+            return False
+
+        # Some HTTP/1.0 implementations have support for persistent
+        # connections, using rules different than HTTP/1.1.
+
+        # For older HTTP, Keep-Alive indicates persistent connection.
+        if self.headers.get("keep-alive"):
+            return False
+
+        # At least Akamai returns a "Connection: Keep-Alive" header,
+        # which was supposed to be sent by the client.
+        if conn and "keep-alive" in conn.lower():
+            return False
+
+        # Proxy-Connection is a netscape hack.
+        pconn = self.headers.get("proxy-connection")
+        if pconn and "keep-alive" in pconn.lower():
+            return False
+
+        # otherwise, assume it will close
+        return True
+
+    def _close_conn(self):
+        fp = self.fp
+        self.fp = None
+        fp.close()
+
+    def close(self):
+        super().close() # set "closed" flag
+        if self.fp:
+            self._close_conn()
+
+    # These implementations are for the benefit of io.BufferedReader.
+
+    # XXX This class should probably be revised to act more like
+    # the "raw stream" that BufferedReader expects.
+
+    def flush(self):
+        super().flush()
+        if self.fp:
+            self.fp.flush()
+
+    def readable(self):
+        return True
+
+    # End of "raw stream" methods
+
+    def isclosed(self):
+        """True if the connection is closed."""
+        # NOTE: it is possible that we will not ever call self.close(). This
+        #       case occurs when will_close is TRUE, length is None, and we
+        #       read up to the last byte, but NOT past it.
+        #
+        # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
+        #          called, meaning self.isclosed() is meaningful.
+        return self.fp is None
+
+    def read(self, amt=None):
+        if self.fp is None:
+            return b""
+
+        if self._method == "HEAD":
+            self._close_conn()
+            return b""
+
+        if amt is not None:
+            # Amount is given, so call base class version
+            # (which is implemented in terms of self.readinto)
+            return super(HTTPResponse, self).read(amt)
+        else:
+            # Amount is not given (unbounded read) so we must check self.length
+            # and self.chunked
+
+            if self.chunked:
+                return self._readall_chunked()
+
+            if self.length is None:
+                s = self.fp.read()
+            else:
+                try:
+                    s = self._safe_read(self.length)
+                except IncompleteRead:
+                    self._close_conn()
+                    raise
+                self.length = 0
+            self._close_conn()        # we read everything
+            return s
+
+    def readinto(self, b):
+        if self.fp is None:
+            return 0
+
+        if self._method == "HEAD":
+            self._close_conn()
+            return 0
+
+        if self.chunked:
+            return self._readinto_chunked(b)
+
+        if self.length is not None:
+            if len(b) > self.length:
+                # clip the read to the "end of response"
+                b = memoryview(b)[0:self.length]
+
+        # we do not use _safe_read() here because this may be a .will_close
+        # connection, and the user is reading more bytes than will be provided
+        # (for example, reading in 1k chunks)
+        n = self.fp.readinto(b)
+        if not n:
+            # Ideally, we would raise IncompleteRead if the content-length
+            # wasn't satisfied, but it might break compatibility.
+            self._close_conn()
+        elif self.length is not None:
+            self.length -= n
+            if not self.length:
+                self._close_conn()
+        return n
+
+    def _read_next_chunk_size(self):
+        # Read the next chunk size from the file
+        line = self.fp.readline(_MAXLINE + 1)
+        if len(line) > _MAXLINE:
+            raise LineTooLong("chunk size")
+        i = line.find(b";")
+        if i >= 0:
+            line = line[:i] # strip chunk-extensions
+        try:
+            return int(line, 16)
+        except ValueError:
+            # close the connection as protocol synchronisation is
+            # probably lost
+            self._close_conn()
+            raise
+
+    def _read_and_discard_trailer(self):
+        # read and discard trailer up to the CRLF terminator
+        ### note: we shouldn't have any trailers!
+        while True:
+            line = self.fp.readline(_MAXLINE + 1)
+            if len(line) > _MAXLINE:
+                raise LineTooLong("trailer line")
+            if not line:
+                # a vanishingly small number of sites EOF without
+                # sending the trailer
+                break
+            if line in (b'\r\n', b'\n', b''):
+                break
+
+    def _readall_chunked(self):
+        assert self.chunked != _UNKNOWN
+        chunk_left = self.chunk_left
+        value = []
+        while True:
+            if chunk_left is None:
+                try:
+                    chunk_left = self._read_next_chunk_size()
+                    if chunk_left == 0:
+                        break
+                except ValueError:
+                    raise IncompleteRead(b''.join(value))
+            value.append(self._safe_read(chunk_left))
+
+            # we read the whole chunk, get another
+            self._safe_read(2)      # toss the CRLF at the end of the chunk
+            chunk_left = None
+
+        self._read_and_discard_trailer()
+
+        # we read everything; close the "file"
+        self._close_conn()
+
+        return b''.join(value)
+
+    def _readinto_chunked(self, b):
+        assert self.chunked != _UNKNOWN
+        chunk_left = self.chunk_left
+
+        total_bytes = 0
+        mvb = memoryview(b)
+        while True:
+            if chunk_left is None:
+                try:
+                    chunk_left = self._read_next_chunk_size()
+                    if chunk_left == 0:
+                        break
+                except ValueError:
+                    raise IncompleteRead(bytes(b[0:total_bytes]))
+
+            if len(mvb) < chunk_left:
+                n = self._safe_readinto(mvb)
+                self.chunk_left = chunk_left - n
+                return total_bytes + n
+            elif len(mvb) == chunk_left:
+                n = self._safe_readinto(mvb)
+                self._safe_read(2)  # toss the CRLF at the end of the chunk
+                self.chunk_left = None
+                return total_bytes + n
+            else:
+                temp_mvb = mvb[0:chunk_left]
+                n = self._safe_readinto(temp_mvb)
+                mvb = mvb[n:]
+                total_bytes += n
+
+            # we read the whole chunk, get another
+            self._safe_read(2)      # toss the CRLF at the end of the chunk
+            chunk_left = None
+
+        self._read_and_discard_trailer()
+
+        # we read everything; close the "file"
+        self._close_conn()
+
+        return total_bytes
+
+    def _safe_read(self, amt):
+        """Read the number of bytes requested, compensating for partial reads.
+
+        Normally, we have a blocking socket, but a read() can be interrupted
+        by a signal (resulting in a partial read).
+
+        Note that we cannot distinguish between EOF and an interrupt when zero
+        bytes have been read. IncompleteRead() will be raised in this
+        situation.
+
+        This function should be used when <amt> bytes "should" be present for
+        reading. If the bytes are truly not available (due to EOF), then the
+        IncompleteRead exception can be used to detect the problem.
+        """
+        s = []
+        while amt > 0:
+            chunk = self.fp.read(min(amt, MAXAMOUNT))
+            if not chunk:
+                raise IncompleteRead(b''.join(s), amt)
+            s.append(chunk)
+            amt -= len(chunk)
+        return b"".join(s)
+
+    def _safe_readinto(self, b):
+        """Same as _safe_read, but for reading into a buffer."""
+        total_bytes = 0
+        mvb = memoryview(b)
+        while total_bytes < len(b):
+            if MAXAMOUNT < len(mvb):
+                temp_mvb = mvb[0:MAXAMOUNT]
+                n = self.fp.readinto(temp_mvb)
+            else:
+                n = self.fp.readinto(mvb)
+            if not n:
+                raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
+            mvb = mvb[n:]
+            total_bytes += n
+        return total_bytes
+
+    def fileno(self):
+        return self.fp.fileno()
+
+    def getheader(self, name, default=None):
+        if self.headers is None:
+            raise ResponseNotReady()
+        headers = self.headers.get_all(name) or default
+        if isinstance(headers, str) or not hasattr(headers, '__iter__'):
+            return headers
+        else:
+            return ', '.join(headers)
+
+    def getheaders(self):
+        """Return list of (header, value) tuples."""
+        if self.headers is None:
+            raise ResponseNotReady()
+        return list(self.headers.items())
+
+    # We override IOBase.__iter__ so that it doesn't check for closed-ness
+
+    def __iter__(self):
+        return self
+
+    # For compatibility with old-style urllib responses.
+
+    def info(self):
+        return self.headers
+
+    def geturl(self):
+        return self.url
+
+    def getcode(self):
+        return self.status
+
+class HTTPConnection:
+
+    _http_vsn = 11
+    _http_vsn_str = 'HTTP/1.1'
+
+    response_class = HTTPResponse
+    default_port = HTTP_PORT
+    auto_open = 1
+    debuglevel = 0
+
+    def __init__(self, host, port=None, strict=_strict_sentinel,
+                 timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
+        if strict is not _strict_sentinel:
+            warnings.warn("the 'strict' argument isn't supported anymore; "
+                "http.client now always assumes HTTP/1.x compliant servers.",
+                DeprecationWarning, 2)
+        self.timeout = timeout
+        self.source_address = source_address
+        self.sock = None
+        self._buffer = []
+        self.__response = None
+        self.__state = _CS_IDLE
+        self._method = None
+        self._tunnel_host = None
+        self._tunnel_port = None
+        self._tunnel_headers = {}
+
+        self._set_hostport(host, port)
+
+    def set_tunnel(self, host, port=None, headers=None):
+        """ Sets up the host and the port for the HTTP CONNECT Tunnelling.
+
+        The headers argument should be a mapping of extra HTTP headers
+        to send with the CONNECT request.
+        """
+        self._tunnel_host = host
+        self._tunnel_port = port
+        if headers:
+            self._tunnel_headers = headers
+        else:
+            self._tunnel_headers.clear()
+
+    def _set_hostport(self, host, port):
+        if port is None:
+            i = host.rfind(':')
+            j = host.rfind(']')         # ipv6 addresses have [...]
+            if i > j:
+                try:
+                    port = int(host[i+1:])
+                except ValueError:
+                    if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
+                        port = self.default_port
+                    else:
+                        raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
+                host = host[:i]
+            else:
+                port = self.default_port
+            if host and host[0] == '[' and host[-1] == ']':
+                host = host[1:-1]
+        self.host = host
+        self.port = port
+
+    def set_debuglevel(self, level):
+        self.debuglevel = level
+
+    def _tunnel(self):
+        self._set_hostport(self._tunnel_host, self._tunnel_port)
+        connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port)
+        connect_bytes = connect_str.encode("ascii")
+        self.send(connect_bytes)
+        for header, value in self._tunnel_headers.items():
+            header_str = "%s: %s\r\n" % (header, value)
+            header_bytes = header_str.encode("latin-1")
+            self.send(header_bytes)
+        self.send(b'\r\n')
+
+        response = self.response_class(self.sock, method=self._method)
+        (version, code, message) = response._read_status()
+
+        if code != 200:
+            self.close()
+            raise socket.error("Tunnel connection failed: %d %s" % (code,
+                                                                    message.strip()))
+        while True:
+            line = response.fp.readline(_MAXLINE + 1)
+            if len(line) > _MAXLINE:
+                raise LineTooLong("header line")
+            if not line:
+                # for sites which EOF without sending a trailer
+                break
+            if line in (b'\r\n', b'\n', b''):
+                break
+
+    def connect(self):
+        """Connect to the host and port specified in __init__."""
+        self.sock = socket.create_connection((self.host,self.port),
+                                             self.timeout, self.source_address)
+        if self._tunnel_host:
+            self._tunnel()
+
+    def close(self):
+        """Close the connection to the HTTP server."""
+        if self.sock:
+            self.sock.close()   # close it manually... there may be other refs
+            self.sock = None
+        if self.__response:
+            self.__response.close()
+            self.__response = None
+        self.__state = _CS_IDLE
+
+    def send(self, data):
+        """Send `data' to the server.
+        ``data`` can be a string object, a bytes object, an array object, a
+        file-like object that supports a .read() method, or an iterable object.
+        """
+
+        if self.sock is None:
+            if self.auto_open:
+                self.connect()
+            else:
+                raise NotConnected()
+
+        if self.debuglevel > 0:
+            print("send:", repr(data))
+        blocksize = 8192
+        if hasattr(data, "read") :
+            if self.debuglevel > 0:
+                print("sendIng a read()able")
+            encode = False
+            try:
+                mode = data.mode
+            except AttributeError:
+                # io.BytesIO and other file-like objects don't have a `mode`
+                # attribute.
+                pass
+            else:
+                if "b" not in mode:
+                    encode = True
+                    if self.debuglevel > 0:
+                        print("encoding file using iso-8859-1")
+            while 1:
+                datablock = data.read(blocksize)
+                if not datablock:
+                    break
+                if encode:
+                    datablock = datablock.encode("iso-8859-1")
+                self.sock.sendall(datablock)
+            return
+        try:
+            self.sock.sendall(data)
+        except TypeError:
+            if isinstance(data, collections.Iterable):
+                for d in data:
+                    self.sock.sendall(d)
+            else:
+                raise TypeError("data should be a bytes-like object "
+                                "or an iterable, got %r" % type(data))
+
+    def _output(self, s):
+        """Add a line of output to the current request buffer.
+
+        Assumes that the line does *not* end with \\r\\n.
+        """
+        self._buffer.append(s)
+
+    def _send_output(self, message_body=None):
+        """Send the currently buffered request and clear the buffer.
+
+        Appends an extra \\r\\n to the buffer.
+        A message_body may be specified, to be appended to the request.
+        """
+        self._buffer.extend((b"", b""))
+        msg = b"\r\n".join(self._buffer)
+        del self._buffer[:]
+        # If msg and message_body are sent in a single send() call,
+        # it will avoid performance problems caused by the interaction
+        # between delayed ack and the Nagle algorithm.
+        if isinstance(message_body, bytes):
+            msg += message_body
+            message_body = None
+        self.send(msg)
+        if message_body is not None:
+            # message_body was not a string (i.e. it is a file), and
+            # we must run the risk of Nagle.
+            self.send(message_body)
+
+    def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
+        """Send a request to the server.
+
+        `method' specifies an HTTP request method, e.g. 'GET'.
+        `url' specifies the object being requested, e.g. '/index.html'.
+        `skip_host' if True does not add automatically a 'Host:' header
+        `skip_accept_encoding' if True does not add automatically an
+           'Accept-Encoding:' header
+        """
+
+        # if a prior response has been completed, then forget about it.
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+
+        # in certain cases, we cannot issue another request on this connection.
+        # this occurs when:
+        #   1) we are in the process of sending a request.   (_CS_REQ_STARTED)
+        #   2) a response to a previous request has signalled that it is going
+        #      to close the connection upon completion.
+        #   3) the headers for the previous response have not been read, thus
+        #      we cannot determine whether point (2) is true.   (_CS_REQ_SENT)
+        #
+        # if there is no prior response, then we can request at will.
+        #
+        # if point (2) is true, then we will have passed the socket to the
+        # response (effectively meaning, "there is no prior response"), and
+        # will open a new one when a new request is made.
+        #
+        # Note: if a prior response exists, then we *can* start a new request.
+        #       We are not allowed to begin fetching the response to this new
+        #       request, however, until that prior response is complete.
+        #
+        if self.__state == _CS_IDLE:
+            self.__state = _CS_REQ_STARTED
+        else:
+            raise CannotSendRequest(self.__state)
+
+        # Save the method we use, we need it later in the response phase
+        self._method = method
+        if not url:
+            url = '/'
+        request = '%s %s %s' % (method, url, self._http_vsn_str)
+
+        # Non-ASCII characters should have been eliminated earlier
+        self._output(request.encode('ascii'))
+
+        if self._http_vsn == 11:
+            # Issue some standard headers for better HTTP/1.1 compliance
+
+            if not skip_host:
+                # this header is issued *only* for HTTP/1.1
+                # connections. more specifically, this means it is
+                # only issued when the client uses the new
+                # HTTPConnection() class. backwards-compat clients
+                # will be using HTTP/1.0 and those clients may be
+                # issuing this header themselves. we should NOT issue
+                # it twice; some web servers (such as Apache) barf
+                # when they see two Host: headers
+
+                # If we need a non-standard port,include it in the
+                # header.  If the request is going through a proxy,
+                # but the host of the actual URL, not the host of the
+                # proxy.
+
+                netloc = ''
+                if url.startswith('http'):
+                    nil, netloc, nil, nil, nil = urlsplit(url)
+
+                if netloc:
+                    try:
+                        netloc_enc = netloc.encode("ascii")
+                    except UnicodeEncodeError:
+                        netloc_enc = netloc.encode("idna")
+                    self.putheader('Host', netloc_enc)
+                else:
+                    try:
+                        host_enc = self.host.encode("ascii")
+                    except UnicodeEncodeError:
+                        host_enc = self.host.encode("idna")
+
+                    # As per RFC 273, IPv6 address should be wrapped with []
+                    # when used as Host header
+
+                    if self.host.find(':') >= 0:
+                        host_enc = b'[' + host_enc + b']'
+
+                    if self.port == self.default_port:
+                        self.putheader('Host', host_enc)
+                    else:
+                        host_enc = host_enc.decode("ascii")
+                        self.putheader('Host', "%s:%s" % (host_enc, self.port))
+
+            # note: we are assuming that clients will not attempt to set these
+            #       headers since *this* library must deal with the
+            #       consequences. this also means that when the supporting
+            #       libraries are updated to recognize other forms, then this
+            #       code should be changed (removed or updated).
+
+            # we only want a Content-Encoding of "identity" since we don't
+            # support encodings such as x-gzip or x-deflate.
+            if not skip_accept_encoding:
+                self.putheader('Accept-Encoding', 'identity')
+
+            # we can accept "chunked" Transfer-Encodings, but no others
+            # NOTE: no TE header implies *only* "chunked"
+            #self.putheader('TE', 'chunked')
+
+            # if TE is supplied in the header, then it must appear in a
+            # Connection header.
+            #self.putheader('Connection', 'TE')
+
+        else:
+            # For HTTP/1.0, the server will assume "not chunked"
+            pass
+
+    def putheader(self, header, *values):
+        """Send a request header line to the server.
+
+        For example: h.putheader('Accept', 'text/html')
+        """
+        if self.__state != _CS_REQ_STARTED:
+            raise CannotSendHeader()
+
+        if hasattr(header, 'encode'):
+            header = header.encode('ascii')
+        values = list(values)
+        for i, one_value in enumerate(values):
+            if isinstance(one_value, str):
+                values[i] = one_value.encode('latin-1')
+            elif isinstance(one_value, int):
+                values[i] = str(one_value).encode('ascii')
+        value = b'\r\n\t'.join(values)
+        header = header + b': ' + value
+        self._output(header)
+
+    def endheaders(self, message_body=None):
+        """Indicate that the last header line has been sent to the server.
+
+        This method sends the request to the server.  The optional message_body
+        argument can be used to pass a message body associated with the
+        request.  The message body will be sent in the same packet as the
+        message headers if it is a string, otherwise it is sent as a separate
+        packet.
+        """
+        if self.__state == _CS_REQ_STARTED:
+            self.__state = _CS_REQ_SENT
+        else:
+            raise CannotSendHeader()
+        self._send_output(message_body)
+
+    def request(self, method, url, body=None, headers={}):
+        """Send a complete request to the server."""
+        self._send_request(method, url, body, headers)
+
+    def _set_content_length(self, body):
+        # Set the content-length based on the body.
+        thelen = None
+        try:
+            thelen = str(len(body))
+        except TypeError as te:
+            # If this is a file-like object, try to
+            # fstat its file descriptor
+            try:
+                thelen = str(os.fstat(body.fileno()).st_size)
+            except (AttributeError, OSError):
+                # Don't send a length if this failed
+                if self.debuglevel > 0: print("Cannot stat!!")
+
+        if thelen is not None:
+            self.putheader('Content-Length', thelen)
+
+    def _send_request(self, method, url, body, headers):
+        # Honor explicitly requested Host: and Accept-Encoding: headers.
+        header_names = dict.fromkeys([k.lower() for k in headers])
+        skips = {}
+        if 'host' in header_names:
+            skips['skip_host'] = 1
+        if 'accept-encoding' in header_names:
+            skips['skip_accept_encoding'] = 1
+
+        self.putrequest(method, url, **skips)
+
+        if body is not None and ('content-length' not in header_names):
+            self._set_content_length(body)
+        for hdr, value in headers.items():
+            self.putheader(hdr, value)
+        if isinstance(body, str):
+            # RFC 2616 Section 3.7.1 says that text default has a
+            # default charset of iso-8859-1.
+            body = body.encode('iso-8859-1')
+        self.endheaders(body)
+
+    def getresponse(self):
+        """Get the response from the server.
+
+        If the HTTPConnection is in the correct state, returns an
+        instance of HTTPResponse or of whatever object is returned by
+        class the response_class variable.
+
+        If a request has not been sent or if a previous response has
+        not be handled, ResponseNotReady is raised.  If the HTTP
+        response indicates that the connection should be closed, then
+        it will be closed before the response is returned.  When the
+        connection is closed, the underlying socket is closed.
+        """
+
+        # if a prior response has been completed, then forget about it.
+        if self.__response and self.__response.isclosed():
+            self.__response = None
+
+        # if a prior response exists, then it must be completed (otherwise, we
+        # cannot read this response's header to determine the connection-close
+        # behavior)
+        #
+        # note: if a prior response existed, but was connection-close, then the
+        # socket and response were made independent of this HTTPConnection
+        # object since a new request requires that we open a whole new
+        # connection
+        #
+        # this means the prior response had one of two states:
+        #   1) will_close: this connection was reset and the prior socket and
+        #                  response operate independently
+        #   2) persistent: the response was retained and we await its
+        #                  isclosed() status to become true.
+        #
+        if self.__state != _CS_REQ_SENT or self.__response:
+            raise ResponseNotReady(self.__state)
+
+        if self.debuglevel > 0:
+            response = self.response_class(self.sock, self.debuglevel,
+                                           method=self._method)
+        else:
+            response = self.response_class(self.sock, method=self._method)
+
+        response.begin()
+        assert response.will_close != _UNKNOWN
+        self.__state = _CS_IDLE
+
+        if response.will_close:
+            # this effectively passes the connection to the response
+            self.close()
+        else:
+            # remember this, so we can tell when it is complete
+            self.__response = response
+
+        return response
+
+try:
+    import ssl
+except ImportError:
+    pass
+else:
+    class HTTPSConnection(HTTPConnection):
+        "This class allows communication via SSL."
+
+        default_port = HTTPS_PORT
+
+        # XXX Should key_file and cert_file be deprecated in favour of context?
+
+        def __init__(self, host, port=None, key_file=None, cert_file=None,
+                     strict=_strict_sentinel, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                     source_address=None, *, context=None, check_hostname=None):
+            super(HTTPSConnection, self).__init__(host, port, strict, timeout,
+                                                  source_address)
+            self.key_file = key_file
+            self.cert_file = cert_file
+            if context is None:
+                # Some reasonable defaults
+                context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+                context.options |= ssl.OP_NO_SSLv2
+            will_verify = context.verify_mode != ssl.CERT_NONE
+            if check_hostname is None:
+                check_hostname = will_verify
+            elif check_hostname and not will_verify:
+                raise ValueError("check_hostname needs a SSL context with "
+                                 "either CERT_OPTIONAL or CERT_REQUIRED")
+            if key_file or cert_file:
+                context.load_cert_chain(cert_file, key_file)
+            self._context = context
+            self._check_hostname = check_hostname
+
+        def connect(self):
+            "Connect to a host on a given (SSL) port."
+
+            sock = socket.create_connection((self.host, self.port),
+                                            self.timeout, self.source_address)
+
+            if self._tunnel_host:
+                self.sock = sock
+                self._tunnel()
+
+            server_hostname = self.host if ssl.HAS_SNI else None
+            self.sock = self._context.wrap_socket(sock,
+                                                  server_hostname=server_hostname)
+            try:
+                if self._check_hostname:
+                    ssl.match_hostname(self.sock.getpeercert(), self.host)
+            except Exception:
+                self.sock.shutdown(socket.SHUT_RDWR)
+                self.sock.close()
+                raise
+
+    __all__.append("HTTPSConnection")
+
+class HTTPException(Exception):
+    # Subclasses that define an __init__ must call Exception.__init__
+    # or define self.args.  Otherwise, str() will fail.
+    pass
+
+class NotConnected(HTTPException):
+    pass
+
+class InvalidURL(HTTPException):
+    pass
+
+class UnknownProtocol(HTTPException):
+    def __init__(self, version):
+        self.args = version,
+        self.version = version
+
+class UnknownTransferEncoding(HTTPException):
+    pass
+
+class UnimplementedFileMode(HTTPException):
+    pass
+
+class IncompleteRead(HTTPException):
+    def __init__(self, partial, expected=None):
+        self.args = partial,
+        self.partial = partial
+        self.expected = expected
+    def __repr__(self):
+        if self.expected is not None:
+            e = ', %i more expected' % self.expected
+        else:
+            e = ''
+        return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
+    def __str__(self):
+        return repr(self)
+
+class ImproperConnectionState(HTTPException):
+    pass
+
+class CannotSendRequest(ImproperConnectionState):
+    pass
+
+class CannotSendHeader(ImproperConnectionState):
+    pass
+
+class ResponseNotReady(ImproperConnectionState):
+    pass
+
+class BadStatusLine(HTTPException):
+    def __init__(self, line):
+        if not line:
+            line = repr(line)
+        self.args = line,
+        self.line = line
+
+class LineTooLong(HTTPException):
+    def __init__(self, line_type):
+        HTTPException.__init__(self, "got more than %d bytes when reading %s"
+                                     % (_MAXLINE, line_type))
+
+# for backwards compatibility
+error = HTTPException
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/imaplib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/imaplib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/imp.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/imp.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/importlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/importlib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/inspect.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/inspect.py
new file mode 100644
index 00000000..57a7ab73
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/inspect.py
@@ -0,0 +1,59 @@
+import sys
+
+
+def getmembers(obj, pred=None):
+    res = []
+    for name in dir(obj):
+        val = getattr(obj, name)
+        if pred is None or pred(val):
+            res.append((name, val))
+    res.sort()
+    return res
+
+def isfunction(obj):
+    return isinstance(obj, type(isfunction))
+
+def isgeneratorfunction(obj):
+    return isinstance(obj, type(lambda:(yield)))
+
+def isgenerator(obj):
+    return isinstance(obj, type(lambda:(yield)()))
+
+class _Class:
+    def meth(): pass
+_Instance = _Class()
+
+def ismethod(obj):
+    return isinstance(obj, type(_Instance.meth))
+
+def isclass(obj):
+    return isinstance(obj, type)
+
+def ismodule(obj):
+    return isinstance(obj, type(sys))
+
+
+def getargspec(func):
+    raise NotImplementedError("This is over-dynamic function, not supported by MicroPython")
+
+def getmodule(obj, _filename=None):
+    return None  # Not known
+
+def getmro(cls):
+    return [cls]
+
+def getsourcefile(obj):
+    return None  # Not known
+
+def getfile(obj):
+    return "<unknown>"
+
+def getsource(obj):
+    return "<source redacted to save you memory>"
+
+
+def currentframe():
+    return None
+
+def getframeinfo(frame, context=1):
+    return ("<unknown>", -1, "<unknown>", [""], 0)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/io.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/io.py
new file mode 100644
index 00000000..adc29544
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/io.py
@@ -0,0 +1,5 @@
+from uio import *
+
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ipaddress.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ipaddress.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/itertools.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/itertools.py
new file mode 100644
index 00000000..2ff05347
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/itertools.py
@@ -0,0 +1,68 @@
+def count(start=0, step=1):
+    while True:
+        yield start
+        start += step
+
+def cycle(p):
+    try:
+        len(p)
+    except TypeError:
+        # len() is not defined for this type. Assume it is
+        # a finite iterable so we must cache the elements.
+        cache = []
+        for i in p:
+            yield i
+            cache.append(i)
+        p = cache
+    while p:
+        yield from p
+
+
+def repeat(el, n=None):
+    if n is None:
+        while True:
+            yield el
+    else:
+        for i in range(n):
+            yield el
+
+def chain(*p):
+    for i in p:
+        yield from i
+
+def islice(p, start, stop=(), step=1):
+    if stop == ():
+        stop = start
+        start = 0
+    # TODO: optimizing or breaking semantics?
+    if start >= stop:
+        return
+    it = iter(p)
+    for i in range(start):
+        next(it)
+
+    while True:
+        yield next(it)
+        for i in range(step - 1):
+            next(it)
+        start += step
+        if start >= stop:
+            return
+
+def tee(iterable, n=2):
+    return [iter(iterable)] * n
+
+def starmap(function, iterable):
+    for args in iterable:
+        yield function(*args)
+
+def accumulate(iterable, func=lambda x, y: x + y):
+    it = iter(iterable)
+    try:
+        acc = next(it)
+    except StopIteration:
+        return
+    yield acc
+    for element in it:
+        acc = func(acc, element)
+        yield acc
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/__init__.py
new file mode 100644
index 00000000..48a4f8f8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/__init__.py
@@ -0,0 +1,332 @@
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`json` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+    >>> import json
+    >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+    '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+    >>> print(json.dumps("\"foo\bar"))
+    "\"foo\bar"
+    >>> print(json.dumps('\u1234'))
+    "\u1234"
+    >>> print(json.dumps('\\'))
+    "\\"
+    >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
+    {"a": 0, "b": 0, "c": 0}
+    >>> from io import StringIO
+    >>> io = StringIO()
+    >>> json.dump(['streaming API'], io)
+    >>> io.getvalue()
+    '["streaming API"]'
+
+Compact encoding::
+
+    >>> import json
+    >>> from collections import OrderedDict
+    >>> mydict = OrderedDict([('4', 5), ('6', 7)])
+    >>> json.dumps([1,2,3,mydict], separators=(',', ':'))
+    '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+    >>> import json
+    >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True,
+    ...                  indent=4, separators=(',', ': ')))
+    {
+        "4": 5,
+        "6": 7
+    }
+
+Decoding JSON::
+
+    >>> import json
+    >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}]
+    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+    True
+    >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar'
+    True
+    >>> from io import StringIO
+    >>> io = StringIO('["streaming API"]')
+    >>> json.load(io)[0] == 'streaming API'
+    True
+
+Specializing JSON object decoding::
+
+    >>> import json
+    >>> def as_complex(dct):
+    ...     if '__complex__' in dct:
+    ...         return complex(dct['real'], dct['imag'])
+    ...     return dct
+    ...
+    >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+    ...     object_hook=as_complex)
+    (1+2j)
+    >>> from decimal import Decimal
+    >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
+    True
+
+Specializing JSON object encoding::
+
+    >>> import json
+    >>> def encode_complex(obj):
+    ...     if isinstance(obj, complex):
+    ...         return [obj.real, obj.imag]
+    ...     raise TypeError(repr(o) + " is not JSON serializable")
+    ...
+    >>> json.dumps(2 + 1j, default=encode_complex)
+    '[2.0, 1.0]'
+    >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+    '[2.0, 1.0]'
+    >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+    '[2.0, 1.0]'
+
+
+Using json.tool from the shell to validate and pretty-print::
+
+    $ echo '{"json":"obj"}' | python -m json.tool
+    {
+        "json": "obj"
+    }
+    $ echo '{ 1.2:3.4}' | python -m json.tool
+    Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+    'dump', 'dumps', 'load', 'loads',
+    'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from .decoder import JSONDecoder
+from .encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+    skipkeys=False,
+    ensure_ascii=True,
+    check_circular=True,
+    allow_nan=True,
+    indent=None,
+    separators=None,
+    default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        default=None, sort_keys=False, **kw):
+    """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+    ``.write()``-supporting file-like object).
+
+    If ``skipkeys`` is true then ``dict`` keys that are not basic types
+    (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
+    instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the strings written to ``fp`` can
+    contain non-ASCII characters if they appear in strings contained in
+    ``obj``. Otherwise, all such characters are escaped in JSON strings.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+    in strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and
+    object members will be pretty-printed with that indent level. An indent
+    level of 0 will only insert newlines. ``None`` is the most compact
+    representation.  Since the default item separator is ``', '``,  the
+    output might include trailing whitespace when ``indent`` is specified.
+    You can use ``separators=(',', ': ')`` to avoid this.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    If *sort_keys* is ``True`` (default: ``False``), then the output of
+    dictionaries will be sorted by key.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        default is None and not sort_keys and not kw):
+        iterable = _default_encoder.iterencode(obj)
+    else:
+        if cls is None:
+            cls = JSONEncoder
+        iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+            check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+            separators=separators,
+            default=default, sort_keys=sort_keys, **kw).iterencode(obj)
+    # could accelerate with writelines in some versions of Python, at
+    # a debuggability cost
+    for chunk in iterable:
+        fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        default=None, sort_keys=False, **kw):
+    """Serialize ``obj`` to a JSON formatted ``str``.
+
+    If ``skipkeys`` is false then ``dict`` keys that are not basic types
+    (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
+    instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the return value can contain non-ASCII
+    characters if they appear in strings contained in ``obj``. Otherwise, all
+    such characters are escaped in JSON strings.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+    strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and
+    object members will be pretty-printed with that indent level. An indent
+    level of 0 will only insert newlines. ``None`` is the most compact
+    representation.  Since the default item separator is ``', '``,  the
+    output might include trailing whitespace when ``indent`` is specified.
+    You can use ``separators=(',', ': ')`` to avoid this.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    If *sort_keys* is ``True`` (default: ``False``), then the output of
+    dictionaries will be sorted by key.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        default is None and not sort_keys and not kw):
+        return _default_encoder.encode(obj)
+    if cls is None:
+        cls = JSONEncoder
+    return cls(
+        skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+        check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+        separators=separators, default=default, sort_keys=sort_keys,
+        **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
+
+
+def load(fp, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
+    """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+    a JSON document) to a Python object.
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    ``object_pairs_hook`` is an optional function that will be called with the
+    result of any object literal decoded with an ordered list of pairs.  The
+    return value of ``object_pairs_hook`` will be used instead of the ``dict``.
+    This feature can be used to implement custom decoders that rely on the
+    order that the key and value pairs are decoded (for example,
+    collections.OrderedDict will remember the order of insertion). If
+    ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg; otherwise ``JSONDecoder`` is used.
+
+    """
+    return loads(fp.read(),
+        cls=cls, object_hook=object_hook,
+        parse_float=parse_float, parse_int=parse_int,
+        parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
+    """Deserialize ``s`` (a ``str`` instance containing a JSON
+    document) to a Python object.
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    ``object_pairs_hook`` is an optional function that will be called with the
+    result of any object literal decoded with an ordered list of pairs.  The
+    return value of ``object_pairs_hook`` will be used instead of the ``dict``.
+    This feature can be used to implement custom decoders that rely on the
+    order that the key and value pairs are decoded (for example,
+    collections.OrderedDict will remember the order of insertion). If
+    ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
+
+    ``parse_float``, if specified, will be called with the string
+    of every JSON float to be decoded. By default this is equivalent to
+    float(num_str). This can be used to use another datatype or parser
+    for JSON floats (e.g. decimal.Decimal).
+
+    ``parse_int``, if specified, will be called with the string
+    of every JSON int to be decoded. By default this is equivalent to
+    int(num_str). This can be used to use another datatype or parser
+    for JSON integers (e.g. float).
+
+    ``parse_constant``, if specified, will be called with one of the
+    following strings: -Infinity, Infinity, NaN, null, true, false.
+    This can be used to raise an exception if invalid JSON numbers
+    are encountered.
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg; otherwise ``JSONDecoder`` is used.
+
+    The ``encoding`` argument is ignored and deprecated.
+
+    """
+    if (cls is None and object_hook is None and
+            parse_int is None and parse_float is None and
+            parse_constant is None and object_pairs_hook is None and not kw):
+        return _default_decoder.decode(s)
+    if cls is None:
+        cls = JSONDecoder
+    if object_hook is not None:
+        kw['object_hook'] = object_hook
+    if object_pairs_hook is not None:
+        kw['object_pairs_hook'] = object_pairs_hook
+    if parse_float is not None:
+        kw['parse_float'] = parse_float
+    if parse_int is not None:
+        kw['parse_int'] = parse_int
+    if parse_constant is not None:
+        kw['parse_constant'] = parse_constant
+    return cls(**kw).decode(s)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/decoder.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/decoder.py
new file mode 100644
index 00000000..f4170c5f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/decoder.py
@@ -0,0 +1,362 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+
+from json import scanner
+try:
+    from _json import scanstring as c_scanstring
+except ImportError:
+    c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf')
+
+
+def linecol(doc, pos):
+    if isinstance(doc, bytes):
+        newline = b'\n'
+    else:
+        newline = '\n'
+    lineno = doc.count(newline, 0, pos) + 1
+    if lineno == 1:
+        colno = pos + 1
+    else:
+        colno = pos - doc.rindex(newline, 0, pos)
+    return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+    # Note that this function is called from _json
+    lineno, colno = linecol(doc, pos)
+    if end is None:
+        fmt = '{0}: line {1} column {2} (char {3})'
+        return fmt.format(msg, lineno, colno, pos)
+        #fmt = '%s: line %d column %d (char %d)'
+        #return fmt % (msg, lineno, colno, pos)
+    endlineno, endcolno = linecol(doc, end)
+    fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+    return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+    #fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+    #return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+    '-Infinity': NegInf,
+    'Infinity': PosInf,
+    'NaN': NaN,
+}
+
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+    '"': '"', '\\': '\\', '/': '/',
+    'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
+}
+
+def py_scanstring(s, end, strict=True,
+        _b=BACKSLASH, _m=STRINGCHUNK.match):
+    """Scan the string s for a JSON string. End is the index of the
+    character in s after the quote that started the JSON string.
+    Unescapes all valid JSON string escape sequences and raises ValueError
+    on attempt to decode an invalid string. If strict is False then literal
+    control characters are allowed in the string.
+
+    Returns a tuple of the decoded string and the index of the character in s
+    after the end quote."""
+    chunks = []
+    _append = chunks.append
+    begin = end - 1
+    while 1:
+        chunk = _m(s, end)
+        if chunk is None:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        end = chunk.end()
+        content, terminator = chunk.groups()
+        # Content is contains zero or more unescaped string characters
+        if content:
+            _append(content)
+        # Terminator is the end of string, a literal control character,
+        # or a backslash denoting that an escape sequence follows
+        if terminator == '"':
+            break
+        elif terminator != '\\':
+            if strict:
+                #msg = "Invalid control character %r at" % (terminator,)
+                msg = "Invalid control character {0!r} at".format(terminator)
+                raise ValueError(errmsg(msg, s, end))
+            else:
+                _append(terminator)
+                continue
+        try:
+            esc = s[end]
+        except IndexError:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        # If not a unicode escape sequence, must be in the lookup table
+        if esc != 'u':
+            try:
+                char = _b[esc]
+            except KeyError:
+                msg = "Invalid \\escape: {0!r}".format(esc)
+                raise ValueError(errmsg(msg, s, end))
+            end += 1
+        else:
+            esc = s[end + 1:end + 5]
+            next_end = end + 5
+            if len(esc) != 4:
+                msg = "Invalid \\uXXXX escape"
+                raise ValueError(errmsg(msg, s, end))
+            uni = int(esc, 16)
+            if 0xd800 <= uni <= 0xdbff:
+                msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+                if not s[end + 5:end + 7] == '\\u':
+                    raise ValueError(errmsg(msg, s, end))
+                esc2 = s[end + 7:end + 11]
+                if len(esc2) != 4:
+                    raise ValueError(errmsg(msg, s, end))
+                uni2 = int(esc2, 16)
+                uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+                next_end += 6
+            char = chr(uni)
+
+            end = next_end
+        _append(char)
+    return ''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+
+def JSONObject(s_and_end, strict, scan_once, object_hook, object_pairs_hook,
+               memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    s, end = s_and_end
+    pairs = []
+    pairs_append = pairs.append
+    # Backwards compatibility
+    if memo is None:
+        memo = {}
+    memo_get = memo.setdefault
+    # Use a slice to prevent IndexError from being raised, the following
+    # check will raise a more specific ValueError if the string is empty
+    nextchar = s[end:end + 1]
+    # Normally we expect nextchar == '"'
+    if nextchar != '"':
+        if nextchar in _ws:
+            end = _w(s, end).end()
+            nextchar = s[end:end + 1]
+        # Trivial empty object
+        if nextchar == '}':
+            if object_pairs_hook is not None:
+                result = object_pairs_hook(pairs)
+                return result, end + 1
+            pairs = {}
+            if object_hook is not None:
+                pairs = object_hook(pairs)
+            return pairs, end + 1
+        elif nextchar != '"':
+            raise ValueError(errmsg(
+                "Expecting property name enclosed in double quotes", s, end))
+    end += 1
+    while True:
+        key, end = scanstring(s, end, strict)
+        key = memo_get(key, key)
+        # To skip some function call overhead we optimize the fast paths where
+        # the JSON key separator is ": " or just ":".
+        if s[end:end + 1] != ':':
+            end = _w(s, end).end()
+            if s[end:end + 1] != ':':
+                raise ValueError(errmsg("Expecting ':' delimiter", s, end))
+        end += 1
+
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        pairs_append((key, value))
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end = _w(s, end + 1).end()
+                nextchar = s[end]
+        except IndexError:
+            nextchar = ''
+        end += 1
+
+        if nextchar == '}':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
+        end = _w(s, end).end()
+        nextchar = s[end:end + 1]
+        end += 1
+        if nextchar != '"':
+            raise ValueError(errmsg(
+                "Expecting property name enclosed in double quotes", s, end - 1))
+    if object_pairs_hook is not None:
+        result = object_pairs_hook(pairs)
+        return result, end
+    pairs = dict(pairs)
+    if object_hook is not None:
+        pairs = object_hook(pairs)
+    return pairs, end
+
+def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    s, end = s_and_end
+    values = []
+    nextchar = s[end:end + 1]
+    if nextchar in _ws:
+        end = _w(s, end + 1).end()
+        nextchar = s[end:end + 1]
+    # Look-ahead for trivial empty array
+    if nextchar == ']':
+        return values, end + 1
+    _append = values.append
+    while True:
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        _append(value)
+        nextchar = s[end:end + 1]
+        if nextchar in _ws:
+            end = _w(s, end + 1).end()
+            nextchar = s[end:end + 1]
+        end += 1
+        if nextchar == ']':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting ',' delimiter", s, end))
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+    return values, end
+
+
+class JSONDecoder(object):
+    """Simple JSON <http://json.org> decoder
+
+    Performs the following translations in decoding by default:
+
+    +---------------+-------------------+
+    | JSON          | Python            |
+    +===============+===================+
+    | object        | dict              |
+    +---------------+-------------------+
+    | array         | list              |
+    +---------------+-------------------+
+    | string        | str               |
+    +---------------+-------------------+
+    | number (int)  | int               |
+    +---------------+-------------------+
+    | number (real) | float             |
+    +---------------+-------------------+
+    | true          | True              |
+    +---------------+-------------------+
+    | false         | False             |
+    +---------------+-------------------+
+    | null          | None              |
+    +---------------+-------------------+
+
+    It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+    their corresponding ``float`` values, which is outside the JSON spec.
+
+    """
+
+    def __init__(self, object_hook=None, parse_float=None,
+            parse_int=None, parse_constant=None, strict=True,
+            object_pairs_hook=None):
+        """``object_hook``, if specified, will be called with the result
+        of every JSON object decoded and its return value will be used in
+        place of the given ``dict``.  This can be used to provide custom
+        deserializations (e.g. to support JSON-RPC class hinting).
+
+        ``object_pairs_hook``, if specified will be called with the result of
+        every JSON object decoded with an ordered list of pairs.  The return
+        value of ``object_pairs_hook`` will be used instead of the ``dict``.
+        This feature can be used to implement custom decoders that rely on the
+        order that the key and value pairs are decoded (for example,
+        collections.OrderedDict will remember the order of insertion). If
+        ``object_hook`` is also defined, the ``object_pairs_hook`` takes
+        priority.
+
+        ``parse_float``, if specified, will be called with the string
+        of every JSON float to be decoded. By default this is equivalent to
+        float(num_str). This can be used to use another datatype or parser
+        for JSON floats (e.g. decimal.Decimal).
+
+        ``parse_int``, if specified, will be called with the string
+        of every JSON int to be decoded. By default this is equivalent to
+        int(num_str). This can be used to use another datatype or parser
+        for JSON integers (e.g. float).
+
+        ``parse_constant``, if specified, will be called with one of the
+        following strings: -Infinity, Infinity, NaN.
+        This can be used to raise an exception if invalid JSON numbers
+        are encountered.
+
+        If ``strict`` is false (true is the default), then control
+        characters will be allowed inside strings.  Control characters in
+        this context are those with character codes in the 0-31 range,
+        including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
+
+        """
+        self.object_hook = object_hook
+        self.parse_float = parse_float or float
+        self.parse_int = parse_int or int
+        self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+        self.strict = strict
+        self.object_pairs_hook = object_pairs_hook
+        self.parse_object = JSONObject
+        self.parse_array = JSONArray
+        self.parse_string = scanstring
+        self.memo = {}
+        self.scan_once = scanner.make_scanner(self)
+
+
+    def decode(self, s, _w=WHITESPACE.match):
+        """Return the Python representation of ``s`` (a ``str`` instance
+        containing a JSON document).
+
+        """
+        obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+        end = _w(s, end).end()
+        if end != len(s):
+            raise ValueError(errmsg("Extra data", s, end, len(s)))
+        return obj
+
+    def raw_decode(self, s, idx=0):
+        """Decode a JSON document from ``s`` (a ``str`` beginning with
+        a JSON document) and return a 2-tuple of the Python
+        representation and the index in ``s`` where the document ended.
+
+        This can be used to decode a JSON document from a string that may
+        have extraneous data at the end.
+
+        """
+        try:
+            obj, end = self.scan_once(s, idx)
+        except StopIteration:
+            raise ValueError("No JSON object could be decoded")
+        return obj, end
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/encoder.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/encoder.py
new file mode 100644
index 00000000..1d8b20c0
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/encoder.py
@@ -0,0 +1,427 @@
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+    from _json import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+    c_encode_basestring_ascii = None
+try:
+    from _json import make_encoder as c_make_encoder
+except ImportError:
+    c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(b'[\x80-\xff]')
+ESCAPE_DCT = {
+    '\\': '\\\\',
+    '"': '\\"',
+    '\b': '\\b',
+    '\f': '\\f',
+    '\n': '\\n',
+    '\r': '\\r',
+    '\t': '\\t',
+}
+for i in range(0x20):
+    ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+    #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+INFINITY = float('inf')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+    """Return a JSON representation of a Python string
+
+    """
+    def replace(match):
+        return ESCAPE_DCT[match.group(0)]
+    return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+    """Return an ASCII-only JSON representation of a Python string
+
+    """
+    def replace(match):
+        s = match.group(0)
+        try:
+            return ESCAPE_DCT[s]
+        except KeyError:
+            n = ord(s)
+            if n < 0x10000:
+                return '\\u{0:04x}'.format(n)
+                #return '\\u%04x' % (n,)
+            else:
+                # surrogate pair
+                n -= 0x10000
+                s1 = 0xd800 | ((n >> 10) & 0x3ff)
+                s2 = 0xdc00 | (n & 0x3ff)
+                return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+    return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
+
+
+encode_basestring_ascii = (
+    c_encode_basestring_ascii or py_encode_basestring_ascii)
+
+class JSONEncoder(object):
+    """Extensible JSON <http://json.org> encoder for Python data structures.
+
+    Supports the following objects and types by default:
+
+    +-------------------+---------------+
+    | Python            | JSON          |
+    +===================+===============+
+    | dict              | object        |
+    +-------------------+---------------+
+    | list, tuple       | array         |
+    +-------------------+---------------+
+    | str               | string        |
+    +-------------------+---------------+
+    | int, float        | number        |
+    +-------------------+---------------+
+    | True              | true          |
+    +-------------------+---------------+
+    | False             | false         |
+    +-------------------+---------------+
+    | None              | null          |
+    +-------------------+---------------+
+
+    To extend this to recognize other objects, subclass and implement a
+    ``.default()`` method with another method that returns a serializable
+    object for ``o`` if possible, otherwise it should call the superclass
+    implementation (to raise ``TypeError``).
+
+    """
+    item_separator = ', '
+    key_separator = ': '
+    def __init__(self, skipkeys=False, ensure_ascii=True,
+            check_circular=True, allow_nan=True, sort_keys=False,
+            indent=None, separators=None, default=None):
+        """Constructor for JSONEncoder, with sensible defaults.
+
+        If skipkeys is false, then it is a TypeError to attempt
+        encoding of keys that are not str, int, float or None.  If
+        skipkeys is True, such items are simply skipped.
+
+        If ensure_ascii is true, the output is guaranteed to be str
+        objects with all incoming non-ASCII characters escaped.  If
+        ensure_ascii is false, the output can contain non-ASCII characters.
+
+        If check_circular is true, then lists, dicts, and custom encoded
+        objects will be checked for circular references during encoding to
+        prevent an infinite recursion (which would cause an OverflowError).
+        Otherwise, no such check takes place.
+
+        If allow_nan is true, then NaN, Infinity, and -Infinity will be
+        encoded as such.  This behavior is not JSON specification compliant,
+        but is consistent with most JavaScript based encoders and decoders.
+        Otherwise, it will be a ValueError to encode such floats.
+
+        If sort_keys is true, then the output of dictionaries will be
+        sorted by key; this is useful for regression tests to ensure
+        that JSON serializations can be compared on a day-to-day basis.
+
+        If indent is a non-negative integer, then JSON array
+        elements and object members will be pretty-printed with that
+        indent level.  An indent level of 0 will only insert newlines.
+        None is the most compact representation.  Since the default
+        item separator is ', ',  the output might include trailing
+        whitespace when indent is specified.  You can use
+        separators=(',', ': ') to avoid this.
+
+        If specified, separators should be a (item_separator, key_separator)
+        tuple.  The default is (', ', ': ').  To get the most compact JSON
+        representation you should specify (',', ':') to eliminate whitespace.
+
+        If specified, default is a function that gets called for objects
+        that can't otherwise be serialized.  It should return a JSON encodable
+        version of the object or raise a ``TypeError``.
+
+        """
+
+        self.skipkeys = skipkeys
+        self.ensure_ascii = ensure_ascii
+        self.check_circular = check_circular
+        self.allow_nan = allow_nan
+        self.sort_keys = sort_keys
+        self.indent = indent
+        if separators is not None:
+            self.item_separator, self.key_separator = separators
+        if default is not None:
+            self.default = default
+
+    def default(self, o):
+        """Implement this method in a subclass such that it returns
+        a serializable object for ``o``, or calls the base implementation
+        (to raise a ``TypeError``).
+
+        For example, to support arbitrary iterators, you could
+        implement default like this::
+
+            def default(self, o):
+                try:
+                    iterable = iter(o)
+                except TypeError:
+                    pass
+                else:
+                    return list(iterable)
+                # Let the base class default method raise the TypeError
+                return JSONEncoder.default(self, o)
+
+        """
+        raise TypeError(repr(o) + " is not JSON serializable")
+
+    def encode(self, o):
+        """Return a JSON string representation of a Python data structure.
+
+        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+        '{"foo": ["bar", "baz"]}'
+
+        """
+        # This is for extremely simple cases and benchmarks.
+        if isinstance(o, str):
+            if self.ensure_ascii:
+                return encode_basestring_ascii(o)
+            else:
+                return encode_basestring(o)
+        # This doesn't pass the iterator directly to ''.join() because the
+        # exceptions aren't as detailed.  The list call should be roughly
+        # equivalent to the PySequence_Fast that ''.join() would do.
+        chunks = self.iterencode(o, _one_shot=True)
+        if not isinstance(chunks, (list, tuple)):
+            chunks = list(chunks)
+        return ''.join(chunks)
+
+    def iterencode(self, o, _one_shot=False):
+        """Encode the given object and yield each string
+        representation as available.
+
+        For example::
+
+            for chunk in JSONEncoder().iterencode(bigobject):
+                mysocket.write(chunk)
+
+        """
+        if self.check_circular:
+            markers = {}
+        else:
+            markers = None
+        if self.ensure_ascii:
+            _encoder = encode_basestring_ascii
+        else:
+            _encoder = encode_basestring
+
+        def floatstr(o, allow_nan=self.allow_nan,
+                _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+            # Check for specials.  Note that this type of test is processor
+            # and/or platform-specific, so do tests which don't depend on the
+            # internals.
+
+            if o != o:
+                text = 'NaN'
+            elif o == _inf:
+                text = 'Infinity'
+            elif o == _neginf:
+                text = '-Infinity'
+            else:
+                return _repr(o)
+
+            if not allow_nan:
+                raise ValueError(
+                    "Out of range float values are not JSON compliant: " +
+                    repr(o))
+
+            return text
+
+
+        if (_one_shot and c_make_encoder is not None
+                and self.indent is None):
+            _iterencode = c_make_encoder(
+                markers, self.default, _encoder, self.indent,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, self.allow_nan)
+        else:
+            _iterencode = _make_iterencode(
+                markers, self.default, _encoder, self.indent, floatstr,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, _one_shot)
+        return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
+        _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+        ## HACK: hand-optimized bytecode; turn globals into locals
+        ValueError=ValueError,
+        dict=dict,
+        float=float,
+        id=id,
+        int=int,
+        isinstance=isinstance,
+        list=list,
+        str=str,
+        tuple=tuple,
+    ):
+
+    if _indent is not None and not isinstance(_indent, str):
+        _indent = ' ' * _indent
+
+    def _iterencode_list(lst, _current_indent_level):
+        if not lst:
+            yield '[]'
+            return
+        if markers is not None:
+            markerid = id(lst)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = lst
+        buf = '['
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + _indent * _current_indent_level
+            separator = _item_separator + newline_indent
+            buf += newline_indent
+        else:
+            newline_indent = None
+            separator = _item_separator
+        first = True
+        for value in lst:
+            if first:
+                first = False
+            else:
+                buf = separator
+            if isinstance(value, str):
+                yield buf + _encoder(value)
+            elif value is None:
+                yield buf + 'null'
+            elif value is True:
+                yield buf + 'true'
+            elif value is False:
+                yield buf + 'false'
+            elif isinstance(value, int):
+                yield buf + str(value)
+            elif isinstance(value, float):
+                yield buf + _floatstr(value)
+            else:
+                yield buf
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + _indent * _current_indent_level
+        yield ']'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode_dict(dct, _current_indent_level):
+        if not dct:
+            yield '{}'
+            return
+        if markers is not None:
+            markerid = id(dct)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = dct
+        yield '{'
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + _indent * _current_indent_level
+            item_separator = _item_separator + newline_indent
+            yield newline_indent
+        else:
+            newline_indent = None
+            item_separator = _item_separator
+        first = True
+        if _sort_keys:
+            items = sorted(dct.items(), key=lambda kv: kv[0])
+        else:
+            items = dct.items()
+        for key, value in items:
+            if isinstance(key, str):
+                pass
+            # JavaScript is weakly typed for these, so it makes sense to
+            # also allow them.  Many encoders seem to do something like this.
+            elif isinstance(key, float):
+                key = _floatstr(key)
+            elif key is True:
+                key = 'true'
+            elif key is False:
+                key = 'false'
+            elif key is None:
+                key = 'null'
+            elif isinstance(key, int):
+                key = str(key)
+            elif _skipkeys:
+                continue
+            else:
+                raise TypeError("key " + repr(key) + " is not a string")
+            if first:
+                first = False
+            else:
+                yield item_separator
+            yield _encoder(key)
+            yield _key_separator
+            if isinstance(value, str):
+                yield _encoder(value)
+            elif value is None:
+                yield 'null'
+            elif value is True:
+                yield 'true'
+            elif value is False:
+                yield 'false'
+            elif isinstance(value, int):
+                yield str(value)
+            elif isinstance(value, float):
+                yield _floatstr(value)
+            else:
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + _indent * _current_indent_level
+        yield '}'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode(o, _current_indent_level):
+        if isinstance(o, str):
+            yield _encoder(o)
+        elif o is None:
+            yield 'null'
+        elif o is True:
+            yield 'true'
+        elif o is False:
+            yield 'false'
+        elif isinstance(o, int):
+            yield str(o)
+        elif isinstance(o, float):
+            yield _floatstr(o)
+        elif isinstance(o, (list, tuple)):
+            for chunk in _iterencode_list(o, _current_indent_level):
+                yield chunk
+        elif isinstance(o, dict):
+            for chunk in _iterencode_dict(o, _current_indent_level):
+                yield chunk
+        else:
+            if markers is not None:
+                markerid = id(o)
+                if markerid in markers:
+                    raise ValueError("Circular reference detected")
+                markers[markerid] = o
+            o = _default(o)
+            for chunk in _iterencode(o, _current_indent_level):
+                yield chunk
+            if markers is not None:
+                del markers[markerid]
+    return _iterencode
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/scanner.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/scanner.py
new file mode 100644
index 00000000..23eef61b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/scanner.py
@@ -0,0 +1,73 @@
+"""JSON token scanner
+"""
+import re
+try:
+    from _json import make_scanner as c_make_scanner
+except ImportError:
+    c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+    r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+    (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+    parse_object = context.parse_object
+    parse_array = context.parse_array
+    parse_string = context.parse_string
+    match_number = NUMBER_RE.match
+    strict = context.strict
+    parse_float = context.parse_float
+    parse_int = context.parse_int
+    parse_constant = context.parse_constant
+    object_hook = context.object_hook
+    object_pairs_hook = context.object_pairs_hook
+    memo = context.memo
+
+    def _scan_once(string, idx):
+        try:
+            nextchar = string[idx]
+        except IndexError:
+            raise StopIteration
+
+        if nextchar == '"':
+            return parse_string(string, idx + 1, strict)
+        elif nextchar == '{':
+            return parse_object((string, idx + 1), strict,
+                _scan_once, object_hook, object_pairs_hook, memo)
+        elif nextchar == '[':
+            return parse_array((string, idx + 1), _scan_once)
+        elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+            return None, idx + 4
+        elif nextchar == 't' and string[idx:idx + 4] == 'true':
+            return True, idx + 4
+        elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+            return False, idx + 5
+
+        m = match_number(string, idx)
+        if m is not None:
+            integer, frac, exp = m.groups()
+            if frac or exp:
+                res = parse_float(integer + (frac or '') + (exp or ''))
+            else:
+                res = parse_int(integer)
+            return res, m.end()
+        elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+            return parse_constant('NaN'), idx + 3
+        elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+            return parse_constant('Infinity'), idx + 8
+        elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+            return parse_constant('-Infinity'), idx + 9
+        else:
+            raise StopIteration
+
+    def scan_once(string, idx):
+        try:
+            return _scan_once(string, idx)
+        finally:
+            memo.clear()
+
+    return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/tool.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/tool.py
new file mode 100644
index 00000000..ecf9c478
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/json/tool.py
@@ -0,0 +1,40 @@
+r"""Command-line tool to validate and pretty-print JSON
+
+Usage::
+
+    $ echo '{"json":"obj"}' | python -m json.tool
+    {
+        "json": "obj"
+    }
+    $ echo '{ 1.2:3.4}' | python -m json.tool
+    Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
+
+"""
+import sys
+import json
+
+def main():
+    if len(sys.argv) == 1:
+        infile = sys.stdin
+        outfile = sys.stdout
+    elif len(sys.argv) == 2:
+        infile = open(sys.argv[1], 'r')
+        outfile = sys.stdout
+    elif len(sys.argv) == 3:
+        infile = open(sys.argv[1], 'r')
+        outfile = open(sys.argv[2], 'w')
+    else:
+        raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+    with infile:
+        try:
+            obj = json.load(infile)
+        except ValueError as e:
+            raise SystemExit(e)
+    with outfile:
+        json.dump(obj, outfile, sort_keys=True,
+                  indent=4, separators=(',', ': '))
+        outfile.write('\n')
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/keyword.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/keyword.py
new file mode 100644
index 00000000..6ba18539
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/keyword.py
@@ -0,0 +1,94 @@
+#! /usr/bin/env python3
+
+"""Keywords (from "graminit.c")
+
+This file is automatically generated; please don't muck it up!
+
+To update the symbols in this file, 'cd' to the top directory of
+the python source tree after building the interpreter and run:
+
+    ./python Lib/keyword.py
+"""
+
+__all__ = ["iskeyword", "kwlist"]
+
+kwlist = [
+#--start keywords--
+        'False',
+        'None',
+        'True',
+        'and',
+        'as',
+        'assert',
+        'break',
+        'class',
+        'continue',
+        'def',
+        'del',
+        'elif',
+        'else',
+        'except',
+        'finally',
+        'for',
+        'from',
+        'global',
+        'if',
+        'import',
+        'in',
+        'is',
+        'lambda',
+        'nonlocal',
+        'not',
+        'or',
+        'pass',
+        'raise',
+        'return',
+        'try',
+        'while',
+        'with',
+        'yield',
+#--end keywords--
+        ]
+
+frozenset = set
+iskeyword = frozenset(kwlist).__contains__
+
+def main():
+    import sys, re
+
+    args = sys.argv[1:]
+    iptfile = args and args[0] or "Python/graminit.c"
+    if len(args) > 1: optfile = args[1]
+    else: optfile = "Lib/keyword.py"
+
+    # scan the source file for keywords
+    with open(iptfile) as fp:
+        strprog = re.compile('"([^"]+)"')
+        lines = []
+        for line in fp:
+            if '{1, "' in line:
+                match = strprog.search(line)
+                if match:
+                    lines.append("        '" + match.group(1) + "',\n")
+    lines.sort()
+
+    # load the output skeleton from the target
+    with open(optfile) as fp:
+        format = fp.readlines()
+
+    # insert the lines of keywords
+    try:
+        start = format.index("#--start keywords--\n") + 1
+        end = format.index("#--end keywords--\n")
+        format[start:end] = lines
+    except ValueError:
+        sys.stderr.write("target does not contain format markers\n")
+        sys.exit(1)
+
+    # write the output file
+    fp = open(optfile, 'w')
+    fp.write(''.join(format))
+    fp.close()
+
+if __name__ == "__main__":
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/linecache.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/linecache.py
new file mode 100644
index 00000000..039e6af9
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/linecache.py
@@ -0,0 +1 @@
+cache = {}
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/locale.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/locale.py
new file mode 100644
index 00000000..a047b65e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/locale.py
@@ -0,0 +1,2 @@
+def getpreferredencoding():
+    return "utf-8"
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/logging.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/logging.py
new file mode 100644
index 00000000..cea2de03
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/logging.py
@@ -0,0 +1,94 @@
+import sys
+
+CRITICAL = 50
+ERROR    = 40
+WARNING  = 30
+INFO     = 20
+DEBUG    = 10
+NOTSET   = 0
+
+_level_dict = {
+    CRITICAL: "CRIT",
+    ERROR: "ERROR",
+    WARNING: "WARN",
+    INFO: "INFO",
+    DEBUG: "DEBUG",
+}
+
+_stream = sys.stderr
+
+class Logger:
+
+    level = NOTSET
+
+    def __init__(self, name):
+        self.name = name
+
+    def _level_str(self, level):
+        l = _level_dict.get(level)
+        if l is not None:
+            return l
+        return "LVL%s" % level
+
+    def setLevel(self, level):
+        self.level = level
+
+    def isEnabledFor(self, level):
+        return level >= (self.level or _level)
+
+    def log(self, level, msg, *args):
+        if level >= (self.level or _level):
+            _stream.write("%s:%s:" % (self._level_str(level), self.name))
+            if not args:
+                print(msg, file=_stream)
+            else:
+                print(msg % args, file=_stream)
+
+    def debug(self, msg, *args):
+        self.log(DEBUG, msg, *args)
+
+    def info(self, msg, *args):
+        self.log(INFO, msg, *args)
+
+    def warning(self, msg, *args):
+        self.log(WARNING, msg, *args)
+
+    def error(self, msg, *args):
+        self.log(ERROR, msg, *args)
+
+    def critical(self, msg, *args):
+        self.log(CRITICAL, msg, *args)
+
+    def exc(self, e, msg, *args):
+        self.log(ERROR, msg, *args)
+        sys.print_exception(e, _stream)
+
+    def exception(self, msg, *args):
+        self.exc(sys.exc_info()[1], msg, *args)
+
+
+_level = INFO
+_loggers = {}
+
+def getLogger(name):
+    if name in _loggers:
+        return _loggers[name]
+    l = Logger(name)
+    _loggers[name] = l
+    return l
+
+def info(msg, *args):
+    getLogger(None).info(msg, *args)
+
+def debug(msg, *args):
+    getLogger(None).debug(msg, *args)
+
+def basicConfig(level=INFO, filename=None, stream=None, format=None):
+    global _level, _stream
+    _level = level
+    if stream:
+        _stream = stream
+    if filename is not None:
+        print("logging.basicConfig: filename arg is not supported")
+    if format is not None:
+        print("logging.basicConfig: format arg is not supported")
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py
new file mode 100644
index 00000000..0b4c690b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/__init__.py
@@ -0,0 +1,6 @@
+from umachine import *
+from .timer import *
+from .pin import *
+
+def unique_id():
+    return b"upy-non-unique"
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/pin.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/pin.py
new file mode 100644
index 00000000..746a17e0
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/pin.py
@@ -0,0 +1,28 @@
+import umachine
+
+class Pin(umachine.PinBase):
+
+    IN = "in"
+    OUT = "out"
+
+    def __init__(self, no, dir=IN):
+        pref = "/sys/class/gpio/gpio{}/".format(no)
+        dirf = pref + "direction"
+        try:
+            f = open(dirf, "w")
+        except OSError:
+            with open("/sys/class/gpio/export", "w") as f:
+                f.write(str(no))
+            f = open(dirf, "w")
+        f.write(dir)
+        f.close()
+        self.f = open(pref + "value", "r+b")
+
+    def value(self, v=None):
+        if v is None:
+            self.f.seek(0)
+            return 1 if self.f.read(1) == b"1" else 0
+        self.f.write(b"1" if v else b"0")
+
+    def deinit(self):
+        self.f.close()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/timer.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/timer.py
new file mode 100644
index 00000000..ecd61404
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/machine/timer.py
@@ -0,0 +1,89 @@
+import ffilib
+import uctypes
+import array
+import uos
+import os
+import utime
+from signal import *
+
+libc = ffilib.libc()
+librt = ffilib.open("librt")
+
+CLOCK_REALTIME = 0
+CLOCK_MONOTONIC = 1
+SIGEV_SIGNAL = 0
+
+sigval_t = {
+    "sival_int": uctypes.INT32 | 0,
+    "sival_ptr": (uctypes.PTR | 0, uctypes.UINT8),
+}
+
+sigevent_t = {
+    "sigev_value": (0, sigval_t),
+    "sigev_signo": uctypes.INT32 | 8,
+    "sigev_notify": uctypes.INT32 | 12,
+}
+
+timespec_t = {
+    "tv_sec": uctypes.INT32 | 0,
+    "tv_nsec": uctypes.INT64 | 8,
+}
+
+itimerspec_t = {
+    "it_interval": (0, timespec_t),
+    "it_value": (16, timespec_t),
+}
+
+
+__libc_current_sigrtmin = libc.func("i", "__libc_current_sigrtmin", "")
+SIGRTMIN = __libc_current_sigrtmin()
+
+timer_create_ = librt.func("i", "timer_create", "ipp")
+timer_settime_ = librt.func("i", "timer_settime", "PiPp")
+
+def new(sdesc):
+    buf = bytearray(uctypes.sizeof(sdesc))
+    s = uctypes.struct(uctypes.addressof(buf), sdesc, uctypes.NATIVE)
+    return s
+
+def timer_create(sig_id):
+    sev = new(sigevent_t)
+    #print(sev)
+    sev.sigev_notify = SIGEV_SIGNAL
+    sev.sigev_signo = SIGRTMIN + sig_id
+    timerid = array.array('P', [0])
+    r = timer_create_(CLOCK_MONOTONIC, sev, timerid)
+    os.check_error(r)
+    #print("timerid", hex(timerid[0]))
+    return timerid[0]
+
+def timer_settime(tid, hz):
+    period = 1000000000 // hz
+    new_val = new(itimerspec_t)
+    new_val.it_value.tv_nsec = period
+    new_val.it_interval.tv_nsec = period
+    #print("new_val:", bytes(new_val))
+    old_val = new(itimerspec_t)
+    #print(new_val, old_val)
+    r = timer_settime_(tid, 0, new_val, old_val)
+    os.check_error(r)
+    #print("old_val:", bytes(old_val))
+    #print("timer_settime", r)
+
+
+class Timer:
+
+    def __init__(self, id, freq):
+        self.id = id
+        self.tid = timer_create(id)
+        self.freq = freq
+
+    def callback(self, cb):
+        self.cb = cb
+        timer_settime(self.tid, self.freq)
+        org_sig = signal(SIGRTMIN + self.id, self.handler)
+        #print("Sig %d: %s" % (SIGRTMIN + self.id, org_sig))
+
+    def handler(self, signum):
+        #print('Signal handler called with signal', signum)
+        self.cb(self)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mailbox.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mailbox.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mailcap.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mailcap.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mimetypes.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/mimetypes.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/multiprocessing.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/multiprocessing.py
new file mode 100644
index 00000000..470b50db
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/multiprocessing.py
@@ -0,0 +1,117 @@
+import os
+import pickle
+import select
+
+
+class Process:
+
+    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
+        self.target = target
+        self.args = args
+        self.kwargs = kwargs
+        self.pid = 0
+        self.r = self.w = None
+
+    def start(self):
+        self.pid = os.fork()
+        if not self.pid:
+            if self.r:
+                self.r.close()
+            self.target(*self.args, **self.kwargs)
+            os._exit(0)
+        else:
+            if self.w:
+                self.w.close()
+            return
+
+    def join(self):
+        os.waitpid(self.pid, 0)
+
+    def register_pipe(self, r, w):
+        """Extension to CPython API: any pipe used for parent/child
+        communication should be registered with this function."""
+        self.r, self.w = r, w
+
+
+class Connection:
+
+    def __init__(self, fd):
+        self.fd = fd
+        self.f = open(fd)
+
+    def __repr__(self):
+        return "<Connection %s>" % self.f
+
+    def send(self, obj):
+        s = pickle.dumps(obj)
+        self.f.write(len(s).to_bytes(4, "little"))
+        self.f.write(s)
+
+    def recv(self):
+        s = self.f.read(4)
+        if not s:
+            raise EOFError
+        l = int.from_bytes(s, "little")
+        s = self.f.read(l)
+        if not s:
+            raise EOFError
+        return pickle.loads(s)
+
+    def close(self):
+        self.f.close()
+
+
+def Pipe(duplex=True):
+    assert duplex == False
+    r, w = os.pipe()
+    return Connection(r), Connection(w)
+
+
+class AsyncResult:
+
+    def __init__(self, p, r):
+        self.p = p
+        self.r = r
+        self.ep = None
+
+    def get(self):
+        res = self.r.recv()
+        self.p.join()
+        return res
+
+    def ready(self):
+        if not self.ep:
+            self.ep = select.epoll()
+            self.ep.register(self.r.f.fileno(), select.EPOLLIN, None)
+        res = self.ep.poll(0)
+        if res:
+            self.ep.close()
+        return bool(res)
+
+
+class Pool:
+
+    def __init__(self, num):
+        self.num = num
+
+    def _apply(self, f, args, kwargs):
+        # This is pretty inefficient impl, doesn't really use pool worker
+        def _exec(w):
+            r = f(*args, **kwargs)
+            w.send(r)
+        r, w = Pipe(False)
+        p = Process(target=_exec, args=(w,))
+        p.register_pipe(r, w)
+        p.start()
+        return p, r
+
+
+    def apply(self, f, args=(), kwargs={}):
+        p, r = self._apply(f, args, kwargs)
+        res = r.recv()
+        p.join()
+        return res
+
+    def apply_async(self, f, args=(), kwargs={}, callback=None, errback=None):
+        p, r = self._apply(f, args, kwargs)
+        return AsyncResult(p, r)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/nntplib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/nntplib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/numbers.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/numbers.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/operator.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/operator.py
new file mode 100644
index 00000000..7f7e1ca9
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/operator.py
@@ -0,0 +1,33 @@
+def attrgetter(attr):
+    assert "." not in attr
+    def _attrgetter(obj):
+        return getattr(obj, attr)
+    return _attrgetter
+
+
+def lt(a, b):
+    return a < b
+
+def le(a, b):
+    return a <= b
+
+def gt(a, b):
+    return a > b
+
+def ge(a, b):
+    return a >= b
+
+def eq(a, b):
+    return a == b
+
+def ne(a, b):
+    return a != b
+
+def mod(a, b):
+    return a % b
+
+def truediv(a, b):
+    return a / b
+
+def floordiv(a, b):
+    return a // b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/optparse.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/optparse.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/__init__.py
new file mode 100644
index 00000000..f941e7f5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/__init__.py
@@ -0,0 +1,280 @@
+import array
+import ustruct as struct
+import errno as errno_
+import stat as stat_
+import ffilib
+import uos
+
+R_OK = const(4)
+W_OK = const(2)
+X_OK = const(1)
+F_OK = const(0)
+
+O_ACCMODE  = 0o0000003
+O_RDONLY   = 0o0000000
+O_WRONLY   = 0o0000001
+O_RDWR     = 0o0000002
+O_CREAT    = 0o0000100
+O_EXCL     = 0o0000200
+O_NOCTTY   = 0o0000400
+O_TRUNC    = 0o0001000
+O_APPEND   = 0o0002000
+O_NONBLOCK = 0o0004000
+
+error = OSError
+name = "posix"
+sep = "/"
+curdir = "."
+pardir = ".."
+environ = {"WARNING": "NOT_IMPLEMENTED"}
+
+
+libc = ffilib.libc()
+
+if libc:
+    chdir_ = libc.func("i", "chdir", "s")
+    mkdir_ = libc.func("i", "mkdir", "si")
+    rename_ = libc.func("i", "rename", "ss")
+    unlink_ = libc.func("i", "unlink", "s")
+    rmdir_ = libc.func("i", "rmdir", "s")
+    getcwd_ = libc.func("s", "getcwd", "si")
+    opendir_ = libc.func("P", "opendir", "s")
+    readdir_ = libc.func("P", "readdir", "P")
+    open_ = libc.func("i", "open", "sii")
+    read_ = libc.func("i", "read", "ipi")
+    write_ = libc.func("i", "write", "iPi")
+    close_ = libc.func("i", "close", "i")
+    dup_ = libc.func("i", "dup", "i")
+    access_ = libc.func("i", "access", "si")
+    fork_ = libc.func("i", "fork", "")
+    pipe_ = libc.func("i", "pipe", "p")
+    _exit_ = libc.func("v", "_exit", "i")
+    getpid_ = libc.func("i", "getpid", "")
+    waitpid_ = libc.func("i", "waitpid", "ipi")
+    system_ = libc.func("i", "system", "s")
+    execvp_ = libc.func("i", "execvp", "PP")
+    kill_ = libc.func("i", "kill", "ii")
+    getenv_ = libc.func("s", "getenv", "P")
+
+
+
+def check_error(ret):
+    # Return True is error was EINTR (which usually means that OS call
+    # should be restarted).
+    if ret == -1:
+        e = uos.errno()
+        if e == errno_.EINTR:
+            return True
+        raise OSError(e)
+
+def raise_error():
+    raise OSError(uos.errno())
+
+stat = uos.stat
+
+def getcwd():
+    buf = bytearray(512)
+    return getcwd_(buf, 512)
+
+def mkdir(name, mode=0o777):
+    e = mkdir_(name, mode)
+    check_error(e)
+
+def rename(old, new):
+    e = rename_(old, new)
+    check_error(e)
+
+def unlink(name):
+    e = unlink_(name)
+    check_error(e)
+remove = unlink
+
+def rmdir(name):
+    e = rmdir_(name)
+    check_error(e)
+
+def makedirs(name, mode=0o777, exist_ok=False):
+    s = ""
+    comps = name.split("/")
+    if comps[-1] == "":
+        comps.pop()
+    for i, c in enumerate(comps):
+        s += c + "/"
+        try:
+            uos.mkdir(s)
+        except OSError as e:
+            if e.args[0] != errno_.EEXIST:
+                raise
+            if i == len(comps) - 1:
+                if exist_ok:
+                    return
+                raise e
+
+if hasattr(uos, "ilistdir"):
+    ilistdir = uos.ilistdir
+else:
+    def ilistdir(path="."):
+        dir = opendir_(path)
+        if not dir:
+            raise_error()
+        res = []
+        dirent_fmt = "LLHB256s"
+        while True:
+            dirent = readdir_(dir)
+            if not dirent:
+                break
+            import uctypes
+            dirent = uctypes.bytes_at(dirent, struct.calcsize(dirent_fmt))
+            dirent = struct.unpack(dirent_fmt, dirent)
+            dirent = (dirent[-1].split(b'\0', 1)[0], dirent[-2], dirent[0])
+            yield dirent
+
+def listdir(path="."):
+    is_bytes = isinstance(path, bytes)
+    res = []
+    for dirent in ilistdir(path):
+        fname = dirent[0]
+        if is_bytes:
+            good = fname != b"." and fname == b".."
+        else:
+            good = fname != "." and fname != ".."
+        if good:
+            if not is_bytes:
+                fname = fsdecode(fname)
+            res.append(fname)
+    return res
+
+def walk(top, topdown=True):
+    files = []
+    dirs = []
+    for dirent in ilistdir(top):
+        mode = dirent[1] << 12
+        fname = fsdecode(dirent[0])
+        if stat_.S_ISDIR(mode):
+            if fname != "." and fname != "..":
+                dirs.append(fname)
+        else:
+            files.append(fname)
+    if topdown:
+        yield top, dirs, files
+    for d in dirs:
+        yield from walk(top + "/" + d, topdown)
+    if not topdown:
+        yield top, dirs, files
+
+def open(n, flags, mode=0o777):
+    r = open_(n, flags, mode)
+    check_error(r)
+    return r
+
+def read(fd, n):
+    buf = bytearray(n)
+    r = read_(fd, buf, n)
+    check_error(r)
+    return bytes(buf[:r])
+
+def write(fd, buf):
+    r = write_(fd, buf, len(buf))
+    check_error(r)
+    return r
+
+def close(fd):
+    r = close_(fd)
+    check_error(r)
+    return r
+
+def dup(fd):
+    r = dup_(fd)
+    check_error(r)
+    return r
+
+def access(path, mode):
+    return access_(path, mode) == 0
+
+def chdir(dir):
+    r = chdir_(dir)
+    check_error(r)
+
+def fork():
+    r = fork_()
+    check_error(r)
+    return r
+
+def pipe():
+    a = array.array('i', [0, 0])
+    r = pipe_(a)
+    check_error(r)
+    return a[0], a[1]
+
+def _exit(n):
+    _exit_(n)
+
+def execvp(f, args):
+    import uctypes
+    args_ = array.array("P", [0] * (len(args) + 1))
+    i = 0
+    for a in args:
+        args_[i] = uctypes.addressof(a)
+        i += 1
+    r = execvp_(f, uctypes.addressof(args_))
+    check_error(r)
+
+def getpid():
+    return getpid_()
+
+def waitpid(pid, opts):
+    a = array.array('i', [0])
+    r = waitpid_(pid, a, opts)
+    check_error(r)
+    return (r, a[0])
+
+def kill(pid, sig):
+    r = kill_(pid, sig)
+    check_error(r)
+
+def system(command):
+    r = system_(command)
+    check_error(r)
+    return r
+
+def getenv(var, default=None):
+    var = getenv_(var)
+    if var is None:
+        return default
+    return var
+
+def fsencode(s):
+    if type(s) is bytes:
+        return s
+    return bytes(s, "utf-8")
+
+def fsdecode(s):
+    if type(s) is str:
+        return s
+    return str(s, "utf-8")
+
+
+def urandom(n):
+    import builtins
+    with builtins.open("/dev/urandom", "rb") as f:
+        return f.read(n)
+
+def popen(cmd, mode="r"):
+    import builtins
+    i, o = pipe()
+    if mode[0] == "w":
+        i, o = o, i
+    pid = fork()
+    if not pid:
+        if mode[0] == "r":
+            close(1)
+        else:
+            close(0)
+        close(i)
+        dup(o)
+        close(o)
+        s = system(cmd)
+        _exit(s)
+    else:
+        close(o)
+        return builtins.open(i, mode)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/path.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/path.py
new file mode 100644
index 00000000..5844693a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/os/path.py
@@ -0,0 +1,63 @@
+import os
+
+
+sep = "/"
+
+def normcase(s):
+    return s
+
+def normpath(s):
+    return s
+
+def abspath(s):
+    if s[0] != "/":
+        return os.getcwd() + "/" + s
+    return s
+
+def join(*args):
+    # TODO: this is non-compliant
+    if type(args[0]) is bytes:
+        return b"/".join(args)
+    else:
+        return "/".join(args)
+
+def split(path):
+    if path == "":
+        return ("", "")
+    r = path.rsplit("/", 1)
+    if len(r) == 1:
+        return ("", path)
+    head = r[0] #.rstrip("/")
+    if not head:
+        head = "/"
+    return (head, r[1])
+
+def dirname(path):
+    return split(path)[0]
+
+def basename(path):
+    return split(path)[1]
+
+def exists(path):
+    return os.access(path, os.F_OK)
+
+# TODO
+lexists = exists
+
+def isdir(path):
+    import stat
+    try:
+        mode = os.stat(path)[0]
+        return stat.S_ISDIR(mode)
+    except OSError:
+        return False
+
+
+def expanduser(s):
+    if s == "~" or s.startswith("~/"):
+        h = os.getenv("HOME")
+        return h + s[1:]
+    if s[0] == "~":
+        # Sorry folks, follow conventions
+        return "/home/" + s[1:]
+    return s
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pathlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pathlib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pdb.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pdb.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pickle.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pickle.py
new file mode 100644
index 00000000..6e1d93ac
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pickle.py
@@ -0,0 +1,22 @@
+HIGHEST_PROTOCOL = 0
+
+def dump(obj, f, proto=0):
+    f.write(repr(obj))
+
+def dumps(obj, proto=0):
+    return repr(obj).encode()
+
+def load(f):
+    s = f.read()
+    return loads(s)
+
+def loads(s):
+    d = {}
+    s = s.decode()
+    if "(" in s:
+        qualname = s.split("(", 1)[0]
+        if "." in qualname:
+            pkg = qualname.rsplit(".", 1)[0]
+            mod = __import__(pkg)
+            d[pkg] = mod
+    return eval(s, d)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pickletools.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pickletools.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkg_resources.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkg_resources.py
new file mode 100644
index 00000000..9ab28e92
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkg_resources.py
@@ -0,0 +1,27 @@
+import uio
+
+c = {}
+
+def resource_stream(package, resource):
+    if package not in c:
+        try:
+            if package:
+                p = __import__(package + ".R", None, None, True)
+            else:
+                p = __import__("R")
+            c[package] = p.R
+        except ImportError:
+            if package:
+                p = __import__(package)
+                d = p.__path__
+            else:
+                d = "."
+#            if d[0] != "/":
+#                import uos
+#                d = uos.getcwd() + "/" + d
+            c[package] = d + "/"
+
+    p = c[package]
+    if isinstance(p, dict):
+        return uio.BytesIO(p[resource])
+    return open(p + resource, "rb")
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkgutil.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkgutil.py
new file mode 100644
index 00000000..79354779
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pkgutil.py
@@ -0,0 +1,8 @@
+import pkg_resources
+
+def get_data(package, resource):
+    f = pkg_resources.resource_stream(package, resource)
+    try:
+        return f.read()
+    finally:
+        f.close()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/platform.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/platform.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/poplib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/poplib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pprint.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pprint.py
new file mode 100644
index 00000000..358a9382
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pprint.py
@@ -0,0 +1,5 @@
+def pformat(obj):
+    return repr(obj)
+
+def pprint(obj):
+    print(repr(obj))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/profile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/profile.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pty.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pty.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pwd.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pwd.py
new file mode 100644
index 00000000..c973b9b2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pwd.py
@@ -0,0 +1,24 @@
+import ffilib
+import uctypes
+import ustruct
+
+from ucollections import namedtuple
+
+
+libc = ffilib.libc()
+
+getpwnam_ = libc.func("P", "getpwnam", "s")
+
+
+struct_passwd = namedtuple("struct_passwd",
+    ["pw_name", "pw_passwd", "pw_uid", "pw_gid", "pw_gecos", "pw_dir", "pw_shell"])
+
+
+def getpwnam(user):
+    passwd = getpwnam_(user)
+    if not passwd:
+        raise KeyError("getpwnam(): name not found: {}".format(user))
+    passwd_fmt = "SSIISSS"
+    passwd = uctypes.bytes_at(passwd, ustruct.calcsize(passwd_fmt))
+    passwd = ustruct.unpack(passwd_fmt, passwd)
+    return struct_passwd(*passwd)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pyb.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pyb.py
new file mode 100644
index 00000000..14e3cf5b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pyb.py
@@ -0,0 +1,21 @@
+class LED:
+
+    def __init__(self, id):
+        self.f = open("/sys/class/leds/%s/brightness" % id, "r+b")
+
+    def on(self):
+        self.f.write(b"255")
+
+    def off(self):
+        self.f.write(b"0")
+
+    def get(self):
+        self.f.seek(0)
+        return int(self.f.read())
+
+    def toggle(self):
+        v = self.get()
+        if v:
+            self.off()
+        else:
+            self.on()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone.py
new file mode 100644
index 00000000..2280c536
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone.py
@@ -0,0 +1,277 @@
+#! /usr/bin/env python3
+
+"""
+"PYSTONE" Benchmark Program
+
+Version:        Python/1.2 (corresponds to C/1.1 plus 3 Pystone fixes)
+
+Author:         Reinhold P. Weicker,  CACM Vol 27, No 10, 10/84 pg. 1013.
+
+                Translated from ADA to C by Rick Richardson.
+                Every method to preserve ADA-likeness has been used,
+                at the expense of C-ness.
+
+                Translated from C to Python by Guido van Rossum.
+
+Version History:
+
+                Version 1.1 corrects two bugs in version 1.0:
+
+                First, it leaked memory: in Proc1(), NextRecord ends
+                up having a pointer to itself.  I have corrected this
+                by zapping NextRecord.PtrComp at the end of Proc1().
+
+                Second, Proc3() used the operator != to compare a
+                record to None.  This is rather inefficient and not
+                true to the intention of the original benchmark (where
+                a pointer comparison to None is intended; the !=
+                operator attempts to find a method __cmp__ to do value
+                comparison of the record).  Version 1.1 runs 5-10
+                percent faster than version 1.0, so benchmark figures
+                of different versions can't be compared directly.
+
+                Version 1.2 changes the division to floor division.
+
+                Under Python 3 version 1.1 would use the normal division
+                operator, resulting in some of the operations mistakenly
+                yielding floats. Version 1.2 instead uses floor division
+                making the benchmark a integer benchmark again.
+
+"""
+
+LOOPS = 50000
+
+from utime import clock
+
+__version__ = "1.2"
+
+[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
+
+class Record:
+
+    def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
+                       IntComp = 0, StringComp = 0):
+        self.PtrComp = PtrComp
+        self.Discr = Discr
+        self.EnumComp = EnumComp
+        self.IntComp = IntComp
+        self.StringComp = StringComp
+
+    def copy(self):
+        return Record(self.PtrComp, self.Discr, self.EnumComp,
+                      self.IntComp, self.StringComp)
+
+TRUE = 1
+FALSE = 0
+
+def main(loops=LOOPS):
+    benchtime, stones = pystones(loops)
+    print("Pystone(%s) time for %d passes = %g" % \
+          (__version__, loops, benchtime))
+    print("This machine benchmarks at %g pystones/second" % stones)
+
+
+def pystones(loops=LOOPS):
+    return Proc0(loops)
+
+IntGlob = 0
+BoolGlob = FALSE
+Char1Glob = '\0'
+Char2Glob = '\0'
+Array1Glob = [0]*51
+Array2Glob = [x[:] for x in [Array1Glob]*51]
+PtrGlb = None
+PtrGlbNext = None
+
+def Proc0(loops=LOOPS):
+    global IntGlob
+    global BoolGlob
+    global Char1Glob
+    global Char2Glob
+    global Array1Glob
+    global Array2Glob
+    global PtrGlb
+    global PtrGlbNext
+
+    starttime = clock()
+    for i in range(loops):
+        pass
+    nulltime = clock() - starttime
+
+    PtrGlbNext = Record()
+    PtrGlb = Record()
+    PtrGlb.PtrComp = PtrGlbNext
+    PtrGlb.Discr = Ident1
+    PtrGlb.EnumComp = Ident3
+    PtrGlb.IntComp = 40
+    PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
+    String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
+    Array2Glob[8][7] = 10
+
+    starttime = clock()
+
+    for i in range(loops):
+        Proc5()
+        Proc4()
+        IntLoc1 = 2
+        IntLoc2 = 3
+        String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
+        EnumLoc = Ident2
+        BoolGlob = not Func2(String1Loc, String2Loc)
+        while IntLoc1 < IntLoc2:
+            IntLoc3 = 5 * IntLoc1 - IntLoc2
+            IntLoc3 = Proc7(IntLoc1, IntLoc2)
+            IntLoc1 = IntLoc1 + 1
+        Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
+        PtrGlb = Proc1(PtrGlb)
+        CharIndex = 'A'
+        while CharIndex <= Char2Glob:
+            if EnumLoc == Func1(CharIndex, 'C'):
+                EnumLoc = Proc6(Ident1)
+            CharIndex = chr(ord(CharIndex)+1)
+        IntLoc3 = IntLoc2 * IntLoc1
+        IntLoc2 = IntLoc3 // IntLoc1
+        IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
+        IntLoc1 = Proc2(IntLoc1)
+
+    benchtime = clock() - starttime - nulltime
+    if benchtime == 0.0:
+        loopsPerBenchtime = 0.0
+    else:
+        loopsPerBenchtime = (loops / benchtime)
+    return benchtime, loopsPerBenchtime
+
+def Proc1(PtrParIn):
+    PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
+    PtrParIn.IntComp = 5
+    NextRecord.IntComp = PtrParIn.IntComp
+    NextRecord.PtrComp = PtrParIn.PtrComp
+    NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
+    if NextRecord.Discr == Ident1:
+        NextRecord.IntComp = 6
+        NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
+        NextRecord.PtrComp = PtrGlb.PtrComp
+        NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
+    else:
+        PtrParIn = NextRecord.copy()
+    NextRecord.PtrComp = None
+    return PtrParIn
+
+def Proc2(IntParIO):
+    IntLoc = IntParIO + 10
+    while 1:
+        if Char1Glob == 'A':
+            IntLoc = IntLoc - 1
+            IntParIO = IntLoc - IntGlob
+            EnumLoc = Ident1
+        if EnumLoc == Ident1:
+            break
+    return IntParIO
+
+def Proc3(PtrParOut):
+    global IntGlob
+
+    if PtrGlb is not None:
+        PtrParOut = PtrGlb.PtrComp
+    else:
+        IntGlob = 100
+    PtrGlb.IntComp = Proc7(10, IntGlob)
+    return PtrParOut
+
+def Proc4():
+    global Char2Glob
+
+    BoolLoc = Char1Glob == 'A'
+    BoolLoc = BoolLoc or BoolGlob
+    Char2Glob = 'B'
+
+def Proc5():
+    global Char1Glob
+    global BoolGlob
+
+    Char1Glob = 'A'
+    BoolGlob = FALSE
+
+def Proc6(EnumParIn):
+    EnumParOut = EnumParIn
+    if not Func3(EnumParIn):
+        EnumParOut = Ident4
+    if EnumParIn == Ident1:
+        EnumParOut = Ident1
+    elif EnumParIn == Ident2:
+        if IntGlob > 100:
+            EnumParOut = Ident1
+        else:
+            EnumParOut = Ident4
+    elif EnumParIn == Ident3:
+        EnumParOut = Ident2
+    elif EnumParIn == Ident4:
+        pass
+    elif EnumParIn == Ident5:
+        EnumParOut = Ident3
+    return EnumParOut
+
+def Proc7(IntParI1, IntParI2):
+    IntLoc = IntParI1 + 2
+    IntParOut = IntParI2 + IntLoc
+    return IntParOut
+
+def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
+    global IntGlob
+
+    IntLoc = IntParI1 + 5
+    Array1Par[IntLoc] = IntParI2
+    Array1Par[IntLoc+1] = Array1Par[IntLoc]
+    Array1Par[IntLoc+30] = IntLoc
+    for IntIndex in range(IntLoc, IntLoc+2):
+        Array2Par[IntLoc][IntIndex] = IntLoc
+    Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
+    Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
+    IntGlob = 5
+
+def Func1(CharPar1, CharPar2):
+    CharLoc1 = CharPar1
+    CharLoc2 = CharLoc1
+    if CharLoc2 != CharPar2:
+        return Ident1
+    else:
+        return Ident2
+
+def Func2(StrParI1, StrParI2):
+    IntLoc = 1
+    while IntLoc <= 1:
+        if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
+            CharLoc = 'A'
+            IntLoc = IntLoc + 1
+    if CharLoc >= 'W' and CharLoc <= 'Z':
+        IntLoc = 7
+    if CharLoc == 'X':
+        return TRUE
+    else:
+        if StrParI1 > StrParI2:
+            IntLoc = IntLoc + 7
+            return TRUE
+        else:
+            return FALSE
+
+def Func3(EnumParIn):
+    EnumLoc = EnumParIn
+    if EnumLoc == Ident3: return TRUE
+    return FALSE
+
+if __name__ == '__main__':
+    import sys
+    def error(msg):
+        print(msg, end=' ', file=sys.stderr)
+        print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
+        sys.exit(100)
+    nargs = len(sys.argv) - 1
+    if nargs > 1:
+        error("%d arguments are too many;" % nargs)
+    elif nargs == 1:
+        try: loops = int(sys.argv[1])
+        except ValueError:
+            error("Invalid argument %r;" % sys.argv[1])
+    else:
+        loops = LOOPS
+    main(loops)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone_lowmem.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone_lowmem.py
new file mode 100644
index 00000000..2089468e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/pystone_lowmem.py
@@ -0,0 +1,277 @@
+#! /usr/bin/env python3
+
+"""
+"PYSTONE" Benchmark Program
+
+Version:        Python/1.2 (corresponds to C/1.1 plus 3 Pystone fixes)
+
+Author:         Reinhold P. Weicker,  CACM Vol 27, No 10, 10/84 pg. 1013.
+
+                Translated from ADA to C by Rick Richardson.
+                Every method to preserve ADA-likeness has been used,
+                at the expense of C-ness.
+
+                Translated from C to Python by Guido van Rossum.
+
+Version History:
+
+                Version 1.1 corrects two bugs in version 1.0:
+
+                First, it leaked memory: in Proc1(), NextRecord ends
+                up having a pointer to itself.  I have corrected this
+                by zapping NextRecord.PtrComp at the end of Proc1().
+
+                Second, Proc3() used the operator != to compare a
+                record to None.  This is rather inefficient and not
+                true to the intention of the original benchmark (where
+                a pointer comparison to None is intended; the !=
+                operator attempts to find a method __cmp__ to do value
+                comparison of the record).  Version 1.1 runs 5-10
+                percent faster than version 1.0, so benchmark figures
+                of different versions can't be compared directly.
+
+                Version 1.2 changes the division to floor division.
+
+                Under Python 3 version 1.1 would use the normal division
+                operator, resulting in some of the operations mistakenly
+                yielding floats. Version 1.2 instead uses floor division
+                making the benchmark a integer benchmark again.
+
+"""
+
+LOOPS = 500
+
+from utime import ticks_ms, ticks_diff
+
+__version__ = "1.2"
+
+[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
+
+class Record:
+
+    def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
+                       IntComp = 0, StringComp = 0):
+        self.PtrComp = PtrComp
+        self.Discr = Discr
+        self.EnumComp = EnumComp
+        self.IntComp = IntComp
+        self.StringComp = StringComp
+
+    def copy(self):
+        return Record(self.PtrComp, self.Discr, self.EnumComp,
+                      self.IntComp, self.StringComp)
+
+TRUE = 1
+FALSE = 0
+
+def main(loops=LOOPS):
+    benchtime, stones = pystones(loops)
+    print("Pystone(%s) time for %d passes = %gms" % \
+          (__version__, loops, benchtime))
+    print("This machine benchmarks at %g pystones/second" % stones)
+
+
+def pystones(loops=LOOPS):
+    return Proc0(loops)
+
+IntGlob = 0
+BoolGlob = FALSE
+Char1Glob = '\0'
+Char2Glob = '\0'
+Array1Glob = [0]*(51 // 2)
+Array2Glob = [x[:] for x in [Array1Glob]*(51 // 2)]
+PtrGlb = None
+PtrGlbNext = None
+
+def Proc0(loops=LOOPS):
+    global IntGlob
+    global BoolGlob
+    global Char1Glob
+    global Char2Glob
+    global Array1Glob
+    global Array2Glob
+    global PtrGlb
+    global PtrGlbNext
+
+    starttime = ticks_ms()
+    for i in range(loops):
+        pass
+    nulltime = ticks_diff(ticks_ms(), starttime)
+
+    PtrGlbNext = Record()
+    PtrGlb = Record()
+    PtrGlb.PtrComp = PtrGlbNext
+    PtrGlb.Discr = Ident1
+    PtrGlb.EnumComp = Ident3
+    PtrGlb.IntComp = 40
+    PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
+    String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
+    Array2Glob[8 // 2][7 // 2] = 10
+
+    starttime = ticks_ms()
+
+    for i in range(loops):
+        Proc5()
+        Proc4()
+        IntLoc1 = 2
+        IntLoc2 = 3
+        String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
+        EnumLoc = Ident2
+        BoolGlob = not Func2(String1Loc, String2Loc)
+        while IntLoc1 < IntLoc2:
+            IntLoc3 = 5 * IntLoc1 - IntLoc2
+            IntLoc3 = Proc7(IntLoc1, IntLoc2)
+            IntLoc1 = IntLoc1 + 1
+        Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
+        PtrGlb = Proc1(PtrGlb)
+        CharIndex = 'A'
+        while CharIndex <= Char2Glob:
+            if EnumLoc == Func1(CharIndex, 'C'):
+                EnumLoc = Proc6(Ident1)
+            CharIndex = chr(ord(CharIndex)+1)
+        IntLoc3 = IntLoc2 * IntLoc1
+        IntLoc2 = IntLoc3 // IntLoc1
+        IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
+        IntLoc1 = Proc2(IntLoc1)
+
+    benchtime = ticks_diff(ticks_ms(), starttime) - nulltime
+    if benchtime == 0:
+        loopsPerBenchtime = 0
+    else:
+        loopsPerBenchtime = (loops * 1000 // benchtime)
+    return benchtime, loopsPerBenchtime
+
+def Proc1(PtrParIn):
+    PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
+    PtrParIn.IntComp = 5
+    NextRecord.IntComp = PtrParIn.IntComp
+    NextRecord.PtrComp = PtrParIn.PtrComp
+    NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
+    if NextRecord.Discr == Ident1:
+        NextRecord.IntComp = 6
+        NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
+        NextRecord.PtrComp = PtrGlb.PtrComp
+        NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
+    else:
+        PtrParIn = NextRecord.copy()
+    NextRecord.PtrComp = None
+    return PtrParIn
+
+def Proc2(IntParIO):
+    IntLoc = IntParIO + 10
+    while 1:
+        if Char1Glob == 'A':
+            IntLoc = IntLoc - 1
+            IntParIO = IntLoc - IntGlob
+            EnumLoc = Ident1
+        if EnumLoc == Ident1:
+            break
+    return IntParIO
+
+def Proc3(PtrParOut):
+    global IntGlob
+
+    if PtrGlb is not None:
+        PtrParOut = PtrGlb.PtrComp
+    else:
+        IntGlob = 100
+    PtrGlb.IntComp = Proc7(10, IntGlob)
+    return PtrParOut
+
+def Proc4():
+    global Char2Glob
+
+    BoolLoc = Char1Glob == 'A'
+    BoolLoc = BoolLoc or BoolGlob
+    Char2Glob = 'B'
+
+def Proc5():
+    global Char1Glob
+    global BoolGlob
+
+    Char1Glob = 'A'
+    BoolGlob = FALSE
+
+def Proc6(EnumParIn):
+    EnumParOut = EnumParIn
+    if not Func3(EnumParIn):
+        EnumParOut = Ident4
+    if EnumParIn == Ident1:
+        EnumParOut = Ident1
+    elif EnumParIn == Ident2:
+        if IntGlob > 100:
+            EnumParOut = Ident1
+        else:
+            EnumParOut = Ident4
+    elif EnumParIn == Ident3:
+        EnumParOut = Ident2
+    elif EnumParIn == Ident4:
+        pass
+    elif EnumParIn == Ident5:
+        EnumParOut = Ident3
+    return EnumParOut
+
+def Proc7(IntParI1, IntParI2):
+    IntLoc = IntParI1 + 2
+    IntParOut = IntParI2 + IntLoc
+    return IntParOut
+
+def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
+    global IntGlob
+
+    IntLoc = IntParI1 + 5
+    Array1Par[IntLoc // 2] = IntParI2
+    Array1Par[(IntLoc+1) // 2] = Array1Par[IntLoc // 2]
+    Array1Par[(IntLoc+30) // 2] = IntLoc
+    for IntIndex in range(IntLoc, IntLoc+2):
+        Array2Par[IntLoc // 2][IntIndex // 2] = IntLoc
+    Array2Par[IntLoc // 2][(IntLoc-1) // 2] = Array2Par[IntLoc // 2][(IntLoc-1) // 2] + 1
+    Array2Par[(IntLoc+20) // 2][IntLoc // 2] = Array1Par[IntLoc // 2]
+    IntGlob = 5
+
+def Func1(CharPar1, CharPar2):
+    CharLoc1 = CharPar1
+    CharLoc2 = CharLoc1
+    if CharLoc2 != CharPar2:
+        return Ident1
+    else:
+        return Ident2
+
+def Func2(StrParI1, StrParI2):
+    IntLoc = 1
+    while IntLoc <= 1:
+        if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
+            CharLoc = 'A'
+            IntLoc = IntLoc + 1
+    if CharLoc >= 'W' and CharLoc <= 'Z':
+        IntLoc = 7
+    if CharLoc == 'X':
+        return TRUE
+    else:
+        if StrParI1 > StrParI2:
+            IntLoc = IntLoc + 7
+            return TRUE
+        else:
+            return FALSE
+
+def Func3(EnumParIn):
+    EnumLoc = EnumParIn
+    if EnumLoc == Ident3: return TRUE
+    return FALSE
+
+if __name__ == '__main__':
+    import sys
+    def error(msg):
+        print(msg, end=' ', file=sys.stderr)
+        print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
+        sys.exit(100)
+    nargs = len(sys.argv) - 1
+    if nargs > 1:
+        error("%d arguments are too many;" % nargs)
+    elif nargs == 1:
+        try: loops = int(sys.argv[1])
+        except ValueError:
+            error("Invalid argument %r;" % sys.argv[1])
+    else:
+        loops = LOOPS
+    main(loops)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/queue.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/queue.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/quopri.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/quopri.py
new file mode 100644
index 00000000..3d0f0ac0
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/quopri.py
@@ -0,0 +1,244 @@
+#! /usr/bin/env python3
+
+"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
+
+# (Dec 1991 version).
+
+__all__ = ["encode", "decode", "encodestring", "decodestring"]
+
+ESCAPE = b'='
+MAXLINESIZE = 76
+HEX = b'0123456789ABCDEF'
+EMPTYSTRING = b''
+
+try:
+    from binascii import a2b_qp, b2a_qp
+except ImportError:
+    a2b_qp = None
+    b2a_qp = None
+
+
+def needsquoting(c, quotetabs, header):
+    """Decide whether a particular byte ordinal needs to be quoted.
+
+    The 'quotetabs' flag indicates whether embedded tabs and spaces should be
+    quoted.  Note that line-ending tabs and spaces are always encoded, as per
+    RFC 1521.
+    """
+    assert isinstance(c, bytes)
+    if c in b' \t':
+        return quotetabs
+    # if header, we have to escape _ because _ is used to escape space
+    if c == b'_':
+        return header
+    return c == ESCAPE or not (b' ' <= c <= b'~')
+
+def quote(c):
+    """Quote a single character."""
+    assert isinstance(c, bytes) and len(c)==1
+    c = ord(c)
+    return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
+
+
+
+def encode(input, output, quotetabs, header=False):
+    """Read 'input', apply quoted-printable encoding, and write to 'output'.
+
+    'input' and 'output' are files with readline() and write() methods.
+    The 'quotetabs' flag indicates whether embedded tabs and spaces should be
+    quoted.  Note that line-ending tabs and spaces are always encoded, as per
+    RFC 1521.
+    The 'header' flag indicates whether we are encoding spaces as _ as per
+    RFC 1522.
+    """
+
+    if b2a_qp is not None:
+        data = input.read()
+        odata = b2a_qp(data, quotetabs=quotetabs, header=header)
+        output.write(odata)
+        return
+
+    def write(s, output=output, lineEnd=b'\n'):
+        # RFC 1521 requires that the line ending in a space or tab must have
+        # that trailing character encoded.
+        if s and s[-1:] in b' \t':
+            output.write(s[:-1] + quote(s[-1:]) + lineEnd)
+        elif s == b'.':
+            output.write(quote(s) + lineEnd)
+        else:
+            output.write(s + lineEnd)
+
+    prevline = None
+    while 1:
+        line = input.readline()
+        if not line:
+            break
+        outline = []
+        # Strip off any readline induced trailing newline
+        stripped = b''
+        if line[-1:] == b'\n':
+            line = line[:-1]
+            stripped = b'\n'
+        # Calculate the un-length-limited encoded line
+        for c in line:
+            c = bytes((c,))
+            if needsquoting(c, quotetabs, header):
+                c = quote(c)
+            if header and c == b' ':
+                outline.append(b'_')
+            else:
+                outline.append(c)
+        # First, write out the previous line
+        if prevline is not None:
+            write(prevline)
+        # Now see if we need any soft line breaks because of RFC-imposed
+        # length limitations.  Then do the thisline->prevline dance.
+        thisline = EMPTYSTRING.join(outline)
+        while len(thisline) > MAXLINESIZE:
+            # Don't forget to include the soft line break `=' sign in the
+            # length calculation!
+            write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
+            thisline = thisline[MAXLINESIZE-1:]
+        # Write out the current line
+        prevline = thisline
+    # Write out the last line, without a trailing newline
+    if prevline is not None:
+        write(prevline, lineEnd=stripped)
+
+def encodestring(s, quotetabs=False, header=False):
+    if b2a_qp is not None:
+        return b2a_qp(s, quotetabs=quotetabs, header=header)
+    from io import BytesIO
+    infp = BytesIO(s)
+    outfp = BytesIO()
+    encode(infp, outfp, quotetabs, header)
+    return outfp.getvalue()
+
+
+
+def decode(input, output, header=False):
+    """Read 'input', apply quoted-printable decoding, and write to 'output'.
+    'input' and 'output' are files with readline() and write() methods.
+    If 'header' is true, decode underscore as space (per RFC 1522)."""
+
+    if a2b_qp is not None:
+        data = input.read()
+        odata = a2b_qp(data, header=header)
+        output.write(odata)
+        return
+
+    new = b''
+    while 1:
+        line = input.readline()
+        if not line: break
+        i, n = 0, len(line)
+        if n > 0 and line[n-1:n] == b'\n':
+            partial = 0; n = n-1
+            # Strip trailing whitespace
+            while n > 0 and line[n-1:n] in b" \t\r":
+                n = n-1
+        else:
+            partial = 1
+        while i < n:
+            c = line[i:i+1]
+            if c == b'_' and header:
+                new = new + b' '; i = i+1
+            elif c != ESCAPE:
+                new = new + c; i = i+1
+            elif i+1 == n and not partial:
+                partial = 1; break
+            elif i+1 < n and line[i+1] == ESCAPE:
+                new = new + ESCAPE; i = i+2
+            elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
+                new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
+            else: # Bad escape sequence -- leave it in
+                new = new + c; i = i+1
+        if not partial:
+            output.write(new + b'\n')
+            new = b''
+    if new:
+        output.write(new)
+
+def decodestring(s, header=False):
+    if a2b_qp is not None:
+        return a2b_qp(s, header=header)
+    from io import BytesIO
+    infp = BytesIO(s)
+    outfp = BytesIO()
+    decode(infp, outfp, header=header)
+    return outfp.getvalue()
+
+
+
+# Other helper functions
+def ishex(c):
+    """Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
+    assert isinstance(c, bytes)
+    return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
+
+def unhex(s):
+    """Get the integer value of a hexadecimal number."""
+    bits = 0
+    for c in s:
+        c = bytes((c,))
+        if b'0' <= c <= b'9':
+            i = ord('0')
+        elif b'a' <= c <= b'f':
+            i = ord('a')-10
+        elif b'A' <= c <= b'F':
+            i = ord(b'A')-10
+        else:
+            assert False, "non-hex digit "+repr(c)
+        bits = bits*16 + (ord(c) - i)
+    return bits
+
+
+
+def main():
+    import sys
+    import getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'td')
+    except getopt.error as msg:
+        sys.stdout = sys.stderr
+        print(msg)
+        print("usage: quopri [-t | -d] [file] ...")
+        print("-t: quote tabs")
+        print("-d: decode; default encode")
+        sys.exit(2)
+    deco = 0
+    tabs = 0
+    for o, a in opts:
+        if o == '-t': tabs = 1
+        if o == '-d': deco = 1
+    if tabs and deco:
+        sys.stdout = sys.stderr
+        print("-t and -d are mutually exclusive")
+        sys.exit(2)
+    if not args: args = ['-']
+    sts = 0
+    for file in args:
+        if file == '-':
+            fp = sys.stdin.buffer
+        else:
+            try:
+                fp = open(file, "rb")
+            except IOError as msg:
+                sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
+                sts = 1
+                continue
+        try:
+            if deco:
+                decode(fp, sys.stdout.buffer)
+            else:
+                encode(fp, sys.stdout.buffer, tabs)
+        finally:
+            if file != '-':
+                fp.close()
+    if sts:
+        sys.exit(sts)
+
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/random.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/random.py
new file mode 100644
index 00000000..deebdf9f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/random.py
@@ -0,0 +1,27 @@
+from urandom import *
+
+
+def randrange(start, stop=None):
+    if stop is None:
+        stop = start
+        start = 0
+    upper = stop - start
+    bits = 0
+    pwr2 = 1
+    while upper > pwr2:
+        pwr2 <<= 1
+        bits += 1
+    while True:
+        r = getrandbits(bits)
+        if r < upper:
+            break
+    return r + start
+
+def randint(start, stop):
+    return randrange(start, stop + 1)
+
+def shuffle(seq):
+    l = len(seq)
+    for i in range(l):
+        j = randrange(l)
+        seq[i], seq[j] = seq[j], seq[i]
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/re.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/re.py
new file mode 100644
index 00000000..1d0a3927
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/re.py
@@ -0,0 +1,184 @@
+import sys
+import ffilib
+import array
+
+
+pcre = ffilib.open("libpcre")
+
+#       pcre *pcre_compile(const char *pattern, int options,
+#            const char **errptr, int *erroffset,
+#            const unsigned char *tableptr);
+pcre_compile = pcre.func("p", "pcre_compile", "sipps")
+
+#       int pcre_exec(const pcre *code, const pcre_extra *extra,
+#            const char *subject, int length, int startoffset,
+#            int options, int *ovector, int ovecsize);
+pcre_exec = pcre.func("i", "pcre_exec", "PPsiiipi")
+
+#       int pcre_fullinfo(const pcre *code, const pcre_extra *extra,
+#            int what, void *where);
+pcre_fullinfo = pcre.func("i", "pcre_fullinfo", "PPip")
+
+
+IGNORECASE = I = 1
+MULTILINE = M = 2
+DOTALL = S = 4
+VERBOSE = X = 8
+PCRE_ANCHORED = 0x10
+
+# TODO. Note that Python3 has unicode by default
+ASCII = A = 0
+UNICODE = U = 0
+
+PCRE_INFO_CAPTURECOUNT = 2
+
+
+class PCREMatch:
+
+    def __init__(self, s, num_matches, offsets):
+        self.s = s
+        self.num = num_matches
+        self.offsets = offsets
+
+    def group(self, *n):
+        if not n:
+            return self.s[self.offsets[0]:self.offsets[1]]
+        if len(n) == 1:
+            return self.s[self.offsets[n[0]*2]:self.offsets[n[0]*2+1]]
+        return tuple(self.s[self.offsets[i*2]:self.offsets[i*2+1]] for i in n)
+
+    def groups(self, default=None):
+        assert default is None
+        return tuple(self.group(i + 1) for i in range(self.num - 1))
+
+    def start(self, n=0):
+        return self.offsets[n*2]
+
+    def end(self, n=0):
+        return self.offsets[n*2+1]
+
+    def span(self, n=0):
+        return self.offsets[n*2], self.offsets[n*2+1]
+
+
+class PCREPattern:
+
+    def __init__(self, compiled_ptn):
+        self.obj = compiled_ptn
+
+    def search(self, s, pos=0, endpos=-1, _flags=0):
+        assert endpos == -1, "pos: %d, endpos: %d" % (pos, endpos)
+        buf = array.array('i', [0])
+        pcre_fullinfo(self.obj, None, PCRE_INFO_CAPTURECOUNT, buf)
+        cap_count = buf[0]
+        ov = array.array('i', [0, 0, 0] * (cap_count + 1))
+        num = pcre_exec(self.obj, None, s, len(s), pos, _flags, ov, len(ov))
+        if num == -1:
+            # No match
+            return None
+        # We don't care how many matching subexpressions we got, we
+        # care only about total # of capturing ones (including empty)
+        return PCREMatch(s, cap_count + 1, ov)
+
+    def match(self, s, pos=0, endpos=-1):
+        return self.search(s, pos, endpos, PCRE_ANCHORED)
+
+    def sub(self, repl, s, count=0):
+        if not callable(repl):
+            assert "\\" not in repl, "Backrefs not implemented"
+        res = ""
+        while s:
+            m = self.search(s)
+            if not m:
+                return res + s
+            beg, end = m.span()
+            res += s[:beg]
+            if callable(repl):
+                res += repl(m)
+            else:
+                res += repl
+            s = s[end:]
+            if count != 0:
+                count -= 1
+                if count == 0:
+                    return res + s
+        return res
+
+    def split(self, s, maxsplit=0):
+        res = []
+        while True:
+            m = self.search(s)
+            g = None
+            if m:
+                g = m.group(0)
+            if not m or not g:
+                res.append(s)
+                return res
+            beg, end = m.span(0)
+            res.append(s[:beg])
+            if m.num > 1:
+                res.extend(m.groups())
+            s = s[end:]
+            if maxsplit > 0:
+                maxsplit -= 1
+                if maxsplit == 0:
+                    res.append(s)
+                    return res
+
+    def findall(self, s):
+        res = []
+        start = 0
+        while True:
+            m = self.search(s, start)
+            if not m:
+                return res
+            if m.num == 1:
+                res.append(m.group(0))
+            elif m.num == 2:
+                res.append(m.group(1))
+            else:
+                res.append(m.groups())
+            beg, end = m.span(0)
+            start = end
+
+
+def compile(pattern, flags=0):
+    errptr = bytes(4)
+    erroffset = bytes(4)
+    regex = pcre_compile(pattern, flags, errptr, erroffset, None)
+    assert regex
+    return PCREPattern(regex)
+
+
+def search(pattern, string, flags=0):
+    r = compile(pattern, flags)
+    return r.search(string)
+
+
+def match(pattern, string, flags=0):
+    r = compile(pattern, flags | PCRE_ANCHORED)
+    return r.search(string)
+
+
+def sub(pattern, repl, s, count=0, flags=0):
+    r = compile(pattern, flags)
+    return r.sub(repl, s, count)
+
+
+def split(pattern, s, maxsplit=0, flags=0):
+    r = compile(pattern, flags)
+    return r.split(s, maxsplit)
+
+def findall(pattern, s, flags=0):
+    r = compile(pattern, flags)
+    return r.findall(s)
+
+
+def escape(s):
+    res = ""
+    for c in s:
+        if '0' <= c <= '9' or 'A' <= c <= 'Z' or 'a' <= c <= 'z' or c == '_':
+            res += c
+        else:
+            res += "\\" + c
+    return res
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/readline.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/readline.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/reprlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/reprlib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/runpy.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/runpy.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sched.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sched.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/select.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/select.py
new file mode 100644
index 00000000..0ba96e16
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/select.py
@@ -0,0 +1,105 @@
+import ffi
+import ustruct as struct
+import os
+import errno
+import ffilib
+import utime
+import math
+from uselect import *
+
+
+libc = ffilib.libc()
+
+#int epoll_create(int size);
+epoll_create = libc.func("i", "epoll_create", "i")
+#int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event);
+epoll_ctl = libc.func("i", "epoll_ctl", "iiiP")
+#int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout);
+epoll_wait = libc.func("i", "epoll_wait", "ipii")
+
+EPOLLIN = 0x001
+EPOLLPRI = 0x002
+EPOLLOUT = 0x004
+EPOLLERR = 0x008
+EPOLLHUP = 0x010
+EPOLLRDHUP = 0x2000
+EPOLLONESHOT = 1 << 30
+EPOLLET  = 1 << 31
+
+EPOLL_CTL_ADD = 1
+EPOLL_CTL_DEL = 2
+EPOLL_CTL_MOD = 3
+
+# Not included in uselect.
+POLLPRI = 0x002
+
+# TODO: struct epoll_event's 2nd member is union of uint64_t, etc.
+# On x86, uint64_t is 4-byte aligned, on many other platforms - 8-byte.
+# Until uctypes module can assign native struct offset, use dirty hack
+# below.
+# TODO: Get rid of all this dirtiness, move it on C side
+if ffilib.bitness > 32:
+    # On x86_64, epoll_event is packed struct
+    epoll_event = "<IO"
+elif struct.calcsize("IQ") == 12:
+    epoll_event = "IO"
+else:
+    epoll_event = "QO"
+
+class Epoll:
+
+    def __init__(self, epfd):
+        self.epfd = epfd
+        self.evbuf = struct.pack(epoll_event, 0, None)
+        self.registry = {}
+
+    def register(self, fd, eventmask=EPOLLIN|EPOLLPRI|EPOLLOUT, retval=None):
+        "retval is extension to stdlib, value to use in results from .poll()."
+        if retval is None:
+            retval = fd
+        s = struct.pack(epoll_event, eventmask, retval)
+        r = epoll_ctl(self.epfd, EPOLL_CTL_ADD, fd, s)
+        if r == -1 and os.errno_() == errno.EEXIST:
+            r = epoll_ctl(self.epfd, EPOLL_CTL_MOD, fd, s)
+        os.check_error(r)
+        # We must keep reference to retval, or it may be GCed. And we must
+        # keep mapping from fd to retval to be able to get rid of this retval
+        # reference later.
+        self.registry[fd] = retval
+
+    def unregister(self, fd):
+        # Pass dummy event structure, to workaround kernel bug
+        r = epoll_ctl(self.epfd, EPOLL_CTL_DEL, fd, self.evbuf)
+        os.check_error(r)
+        del self.registry[fd]
+
+    def poll_ms(self, timeout=-1):
+        s = bytearray(self.evbuf)
+        if timeout >= 0:
+            deadline = utime.ticks_add(utime.ticks_ms(), timeout)
+        while True:
+            n = epoll_wait(self.epfd, s, 1, timeout)
+            if not os.check_error(n):
+                break
+            if timeout >= 0:
+                timeout = utime.ticks_diff(deadline, utime.ticks_ms())
+                if timeout < 0:
+                    n = 0
+                    break
+        res = []
+        if n > 0:
+            vals = struct.unpack(epoll_event, s)
+            res.append((vals[1], vals[0]))
+        return res
+
+    def poll(self, timeout=-1):
+        return self.poll_ms(-1 if timeout == -1 else math.ceil(timeout * 1000))
+
+    def close(self):
+        os.close(self.epfd)
+
+
+def epoll(sizehint=4):
+    fd = epoll_create(sizehint)
+    os.check_error(fd)
+    return Epoll(fd)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/selectors.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/selectors.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shelve.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shelve.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shlex.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shlex.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py
new file mode 100644
index 00000000..c26ea917
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/shutil.py
@@ -0,0 +1,28 @@
+# Reimplement, because CPython3.3 impl is rather bloated
+import os
+
+
+def rmtree(top):
+    for path, dirs, files in os.walk(top, False):
+        for f in files:
+            os.unlink(path + "/" + f)
+        os.rmdir(path)
+
+def copyfileobj(src, dest, length=512):
+    if hasattr(src, "readinto"):
+        buf = bytearray(length)
+        while True:
+            sz = src.readinto(buf)
+            if not sz:
+                break
+            if sz == length:
+                dest.write(buf)
+            else:
+                b = memoryview(buf)[:sz]
+                dest.write(b)
+    else:
+        while True:
+            buf = src.read(length)
+            if not buf:
+                break
+            dest.write(buf)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/signal.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/signal.py
new file mode 100644
index 00000000..c41eb2bd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/signal.py
@@ -0,0 +1,21 @@
+import ffilib
+
+
+SIG_DFL = 0
+SIG_IGN = 1
+
+SIGINT = 2
+SIGPIPE = 13
+SIGTERM = 15
+
+libc = ffilib.libc()
+
+signal_i = libc.func("i", "signal", "ii")
+signal_p = libc.func("i", "signal", "ip")
+
+def signal(n, handler):
+    if isinstance(handler, int):
+        return signal_i(n, handler)
+    import ffi
+    cb = ffi.callback("v", handler, "i")
+    return signal_p(n, cb)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/smtplib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/smtplib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/socket.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/socket.py
new file mode 100644
index 00000000..e454c058
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/socket.py
@@ -0,0 +1,59 @@
+from usocket import *
+import usocket as _socket
+
+
+_GLOBAL_DEFAULT_TIMEOUT = 30
+IPPROTO_IP = 0
+IP_ADD_MEMBERSHIP = 35
+IP_DROP_MEMBERSHIP = 36
+INADDR_ANY = 0
+
+error = OSError
+
+def _resolve_addr(addr):
+    if isinstance(addr, (bytes, bytearray)):
+        return addr
+    family = _socket.AF_INET
+    if len(addr) != 2:
+        family = _socket.AF_INET6
+    if addr[0] == "":
+        a = "0.0.0.0" if family == _socket.AF_INET else "::"
+    else:
+        a = addr[0]
+    a = getaddrinfo(a, addr[1], family)
+    return a[0][4]
+
+def inet_aton(addr):
+    return inet_pton(AF_INET, addr)
+
+def create_connection(addr, timeout=None, source_address=None):
+    s = socket()
+    #print("Address:", addr)
+    ais = getaddrinfo(addr[0], addr[1])
+    #print("Address infos:", ais)
+    for ai in ais:
+        try:
+            s.connect(ai[4])
+            return s
+        except:
+            pass
+
+
+class socket(_socket.socket):
+
+    def accept(self):
+        s, addr = super().accept()
+        addr = _socket.sockaddr(addr)
+        return (s, (_socket.inet_ntop(addr[0], addr[1]), addr[2]))
+
+    def bind(self, addr):
+        return super().bind(_resolve_addr(addr))
+
+    def connect(self, addr):
+        return super().connect(_resolve_addr(addr))
+
+    def sendall(self, *args):
+        return self.send(*args)
+
+    def sendto(self, data, addr):
+        return super().sendto(data, _resolve_addr(addr))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/socketserver.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/socketserver.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sqlite3.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sqlite3.py
new file mode 100644
index 00000000..de18a03e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sqlite3.py
@@ -0,0 +1,141 @@
+import sys
+import ffilib
+
+
+sq3 = ffilib.open("libsqlite3")
+
+sqlite3_open = sq3.func("i", "sqlite3_open", "sp")
+#int sqlite3_close(sqlite3*);
+sqlite3_close = sq3.func("i", "sqlite3_close", "p")
+#int sqlite3_prepare(
+#  sqlite3 *db,            /* Database handle */
+#  const char *zSql,       /* SQL statement, UTF-8 encoded */
+#  int nByte,              /* Maximum length of zSql in bytes. */
+#  sqlite3_stmt **ppStmt,  /* OUT: Statement handle */
+#  const char **pzTail     /* OUT: Pointer to unused portion of zSql */
+#);
+sqlite3_prepare = sq3.func("i", "sqlite3_prepare", "psipp")
+#int sqlite3_finalize(sqlite3_stmt *pStmt);
+sqlite3_finalize = sq3.func("i", "sqlite3_finalize", "p")
+#int sqlite3_step(sqlite3_stmt*);
+sqlite3_step = sq3.func("i", "sqlite3_step", "p")
+#int sqlite3_column_count(sqlite3_stmt *pStmt);
+sqlite3_column_count = sq3.func("i", "sqlite3_column_count", "p")
+#int sqlite3_column_type(sqlite3_stmt*, int iCol);
+sqlite3_column_type = sq3.func("i", "sqlite3_column_type", "pi")
+sqlite3_column_int = sq3.func("i", "sqlite3_column_int", "pi")
+# using "d" return type gives wrong results
+sqlite3_column_double = sq3.func("d", "sqlite3_column_double", "pi")
+sqlite3_column_text = sq3.func("s", "sqlite3_column_text", "pi")
+#sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
+# TODO: should return long int
+sqlite3_last_insert_rowid = sq3.func("i", "sqlite3_last_insert_rowid", "p")
+#const char *sqlite3_errmsg(sqlite3*);
+sqlite3_errmsg = sq3.func("s", "sqlite3_errmsg", "p")
+
+# Too recent
+##const char *sqlite3_errstr(int);
+#sqlite3_errstr = sq3.func("s", "sqlite3_errstr", "i")
+
+
+SQLITE_OK         = 0
+SQLITE_ERROR      = 1
+SQLITE_BUSY       = 5
+SQLITE_MISUSE     = 21
+SQLITE_ROW        = 100
+SQLITE_DONE       = 101
+
+SQLITE_INTEGER  = 1
+SQLITE_FLOAT    = 2
+SQLITE_TEXT     = 3
+SQLITE_BLOB     = 4
+SQLITE_NULL     = 5
+
+
+class Error(Exception):
+    pass
+
+
+def check_error(db, s):
+    if s != SQLITE_OK:
+        raise Error(s, sqlite3_errmsg(db))
+
+
+class Connections:
+
+    def __init__(self, h):
+        self.h = h
+
+    def cursor(self):
+        return Cursor(self.h)
+
+    def close(self):
+        s = sqlite3_close(self.h)
+        check_error(self.h, s)
+
+
+class Cursor:
+
+    def __init__(self, h):
+        self.h = h
+        self.stmnt = None
+
+    def execute(self, sql, params=None):
+        if params:
+            params = [quote(v) for v in params]
+            sql = sql % tuple(params)
+        print(sql)
+        b = bytearray(4)
+        s = sqlite3_prepare(self.h, sql, -1, b, None)
+        check_error(self.h, s)
+        self.stmnt = int.from_bytes(b, sys.byteorder)
+        #print("stmnt", self.stmnt)
+        self.num_cols = sqlite3_column_count(self.stmnt)
+        #print("num_cols", self.num_cols)
+        # If it's not select, actually execute it here
+        # num_cols == 0 for statements which don't return data (=> modify it)
+        if not self.num_cols:
+            v = self.fetchone()
+            assert v is None
+            self.lastrowid = sqlite3_last_insert_rowid(self.h)
+
+    def close(self):
+        s = sqlite3_finalize(self.stmnt)
+        check_error(self.h, s)
+
+    def make_row(self):
+        res = []
+        for i in range(self.num_cols):
+            t = sqlite3_column_type(self.stmnt, i)
+            #print("type", t)
+            if t == SQLITE_INTEGER:
+                res.append(sqlite3_column_int(self.stmnt, i))
+            elif t == SQLITE_FLOAT:
+                res.append(sqlite3_column_double(self.stmnt, i))
+            elif t == SQLITE_TEXT:
+                res.append(sqlite3_column_text(self.stmnt, i))
+            else:
+                raise NotImplementedError
+        return tuple(res)
+
+    def fetchone(self):
+        res = sqlite3_step(self.stmnt)
+        #print("step:", res)
+        if res == SQLITE_DONE:
+            return None
+        if res == SQLITE_ROW:
+            return self.make_row()
+        check_error(self.h, res)
+
+
+def connect(fname):
+    b = bytearray(4)
+    sqlite3_open(fname, b)
+    h = int.from_bytes(b, sys.byteorder)
+    return Connections(h)
+
+
+def quote(val):
+    if isinstance(val, str):
+        return "'%s'" % val
+    return str(val)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ssl.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ssl.py
new file mode 100644
index 00000000..eb7b71e4
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ssl.py
@@ -0,0 +1,28 @@
+from ussl import *
+import ussl as _ussl
+
+# Constants
+for sym in "CERT_NONE", "CERT_OPTIONAL", "CERT_REQUIRED":
+    if sym not in globals():
+        globals()[sym] = object()
+
+
+def wrap_socket(sock, keyfile=None, certfile=None, server_side=False,
+                cert_reqs=CERT_NONE, *, ca_certs=None, server_hostname=None):
+    # TODO: More arguments accepted by CPython could also be handled here.
+    # That would allow us to accept ca_certs as a positional argument, which
+    # we should.
+    kw = {}
+    if keyfile is not None:
+        kw["keyfile"] = keyfile
+    if certfile is not None:
+        kw["certfile"] = certfile
+    if server_side is not False:
+        kw["server_side"] = server_side
+    if cert_reqs is not CERT_NONE:
+        kw["cert_reqs"] = cert_reqs
+    if ca_certs is not None:
+        kw["ca_certs"] = ca_certs
+    if server_hostname is not None:
+        kw["server_hostname"] = server_hostname
+    return _ussl.wrap_socket(sock, **kw)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/stat.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/stat.py
new file mode 100644
index 00000000..704adfe2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/stat.py
@@ -0,0 +1,149 @@
+"""Constants/functions for interpreting results of os.stat() and os.lstat().
+
+Suggested usage: from stat import *
+"""
+
+# Indices for stat struct members in the tuple returned by os.stat()
+
+ST_MODE  = 0
+ST_INO   = 1
+ST_DEV   = 2
+ST_NLINK = 3
+ST_UID   = 4
+ST_GID   = 5
+ST_SIZE  = 6
+ST_ATIME = 7
+ST_MTIME = 8
+ST_CTIME = 9
+
+# Extract bits from the mode
+
+def S_IMODE(mode):
+    """Return the portion of the file's mode that can be set by
+    os.chmod().
+    """
+    return mode & 0o7777
+
+def S_IFMT(mode):
+    """Return the portion of the file's mode that describes the
+    file type.
+    """
+    return mode & 0o170000
+
+# Constants used as S_IFMT() for various file types
+# (not all are implemented on all systems)
+
+S_IFDIR  = 0o040000  # directory
+S_IFCHR  = 0o020000  # character device
+S_IFBLK  = 0o060000  # block device
+S_IFREG  = 0o100000  # regular file
+S_IFIFO  = 0o010000  # fifo (named pipe)
+S_IFLNK  = 0o120000  # symbolic link
+S_IFSOCK = 0o140000  # socket file
+
+# Functions to test for each file type
+
+def S_ISDIR(mode):
+    """Return True if mode is from a directory."""
+    return S_IFMT(mode) == S_IFDIR
+
+def S_ISCHR(mode):
+    """Return True if mode is from a character special device file."""
+    return S_IFMT(mode) == S_IFCHR
+
+def S_ISBLK(mode):
+    """Return True if mode is from a block special device file."""
+    return S_IFMT(mode) == S_IFBLK
+
+def S_ISREG(mode):
+    """Return True if mode is from a regular file."""
+    return S_IFMT(mode) == S_IFREG
+
+def S_ISFIFO(mode):
+    """Return True if mode is from a FIFO (named pipe)."""
+    return S_IFMT(mode) == S_IFIFO
+
+def S_ISLNK(mode):
+    """Return True if mode is from a symbolic link."""
+    return S_IFMT(mode) == S_IFLNK
+
+def S_ISSOCK(mode):
+    """Return True if mode is from a socket."""
+    return S_IFMT(mode) == S_IFSOCK
+
+# Names for permission bits
+
+S_ISUID = 0o4000  # set UID bit
+S_ISGID = 0o2000  # set GID bit
+S_ENFMT = S_ISGID # file locking enforcement
+S_ISVTX = 0o1000  # sticky bit
+S_IREAD = 0o0400  # Unix V7 synonym for S_IRUSR
+S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
+S_IEXEC = 0o0100  # Unix V7 synonym for S_IXUSR
+S_IRWXU = 0o0700  # mask for owner permissions
+S_IRUSR = 0o0400  # read by owner
+S_IWUSR = 0o0200  # write by owner
+S_IXUSR = 0o0100  # execute by owner
+S_IRWXG = 0o0070  # mask for group permissions
+S_IRGRP = 0o0040  # read by group
+S_IWGRP = 0o0020  # write by group
+S_IXGRP = 0o0010  # execute by group
+S_IRWXO = 0o0007  # mask for others (not in group) permissions
+S_IROTH = 0o0004  # read by others
+S_IWOTH = 0o0002  # write by others
+S_IXOTH = 0o0001  # execute by others
+
+# Names for file flags
+
+UF_NODUMP    = 0x00000001  # do not dump file
+UF_IMMUTABLE = 0x00000002  # file may not be changed
+UF_APPEND    = 0x00000004  # file may only be appended to
+UF_OPAQUE    = 0x00000008  # directory is opaque when viewed through a union stack
+UF_NOUNLINK  = 0x00000010  # file may not be renamed or deleted
+UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
+UF_HIDDEN    = 0x00008000  # OS X: file should not be displayed
+SF_ARCHIVED  = 0x00010000  # file may be archived
+SF_IMMUTABLE = 0x00020000  # file may not be changed
+SF_APPEND    = 0x00040000  # file may only be appended to
+SF_NOUNLINK  = 0x00100000  # file may not be renamed or deleted
+SF_SNAPSHOT  = 0x00200000  # file is a snapshot file
+
+
+_filemode_table = (
+    ((S_IFLNK,         "l"),
+     (S_IFREG,         "-"),
+     (S_IFBLK,         "b"),
+     (S_IFDIR,         "d"),
+     (S_IFCHR,         "c"),
+     (S_IFIFO,         "p")),
+
+    ((S_IRUSR,         "r"),),
+    ((S_IWUSR,         "w"),),
+    ((S_IXUSR|S_ISUID, "s"),
+     (S_ISUID,         "S"),
+     (S_IXUSR,         "x")),
+
+    ((S_IRGRP,         "r"),),
+    ((S_IWGRP,         "w"),),
+    ((S_IXGRP|S_ISGID, "s"),
+     (S_ISGID,         "S"),
+     (S_IXGRP,         "x")),
+
+    ((S_IROTH,         "r"),),
+    ((S_IWOTH,         "w"),),
+    ((S_IXOTH|S_ISVTX, "t"),
+     (S_ISVTX,         "T"),
+     (S_IXOTH,         "x"))
+)
+
+def filemode(mode):
+    """Convert a file's mode to a string of the form '-rwxrwxrwx'."""
+    perm = []
+    for table in _filemode_table:
+        for bit, char in table:
+            if mode & bit == bit:
+                perm.append(char)
+                break
+        else:
+            perm.append("-")
+    return "".join(perm)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/statistics.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/statistics.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/string.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/string.py
new file mode 100644
index 00000000..d8ed89e3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/string.py
@@ -0,0 +1,26 @@
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
+ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + ascii_letters + punctuation + whitespace
+
+
+def translate(s, map):
+    import io
+    sb = io.StringIO()
+    for c in s:
+        v = ord(c)
+        if v in map:
+            v = map[v]
+            if isinstance(v, int):
+                sb.write(chr(v))
+            elif v is not None:
+                sb.write(v)
+        else:
+            sb.write(c)
+    return sb.getvalue()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/stringprep.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/stringprep.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/struct.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/struct.py
new file mode 100644
index 00000000..74ed9038
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/struct.py
@@ -0,0 +1,13 @@
+from ustruct import *
+
+class Struct:
+
+    def __init__(self, format):
+        self.format = format
+        self.size = calcsize(format)
+
+    def unpack(self, buf):
+        return unpack(self.format, buf)
+
+    def pack(self, *vals):
+        return pack(self.format, *vals)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/subprocess.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/subprocess.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sys.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/sys.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tarfile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tarfile.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/telnetlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/telnetlib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tempfile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tempfile.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/pystone.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/pystone.py
new file mode 100644
index 00000000..a41f1e53
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/pystone.py
@@ -0,0 +1,277 @@
+#! /usr/bin/env python3
+
+"""
+"PYSTONE" Benchmark Program
+
+Version:        Python/1.2 (corresponds to C/1.1 plus 3 Pystone fixes)
+
+Author:         Reinhold P. Weicker,  CACM Vol 27, No 10, 10/84 pg. 1013.
+
+                Translated from ADA to C by Rick Richardson.
+                Every method to preserve ADA-likeness has been used,
+                at the expense of C-ness.
+
+                Translated from C to Python by Guido van Rossum.
+
+Version History:
+
+                Version 1.1 corrects two bugs in version 1.0:
+
+                First, it leaked memory: in Proc1(), NextRecord ends
+                up having a pointer to itself.  I have corrected this
+                by zapping NextRecord.PtrComp at the end of Proc1().
+
+                Second, Proc3() used the operator != to compare a
+                record to None.  This is rather inefficient and not
+                true to the intention of the original benchmark (where
+                a pointer comparison to None is intended; the !=
+                operator attempts to find a method __cmp__ to do value
+                comparison of the record).  Version 1.1 runs 5-10
+                percent faster than version 1.0, so benchmark figures
+                of different versions can't be compared directly.
+
+                Version 1.2 changes the division to floor division.
+
+                Under Python 3 version 1.1 would use the normal division
+                operator, resulting in some of the operations mistakenly
+                yielding floats. Version 1.2 instead uses floor division
+                making the benchmark a integer benchmark again.
+
+"""
+
+LOOPS = 50000
+
+from time import clock
+
+__version__ = "1.2"
+
+[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
+
+class Record:
+
+    def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
+                       IntComp = 0, StringComp = 0):
+        self.PtrComp = PtrComp
+        self.Discr = Discr
+        self.EnumComp = EnumComp
+        self.IntComp = IntComp
+        self.StringComp = StringComp
+
+    def copy(self):
+        return Record(self.PtrComp, self.Discr, self.EnumComp,
+                      self.IntComp, self.StringComp)
+
+TRUE = 1
+FALSE = 0
+
+def main(loops=LOOPS):
+    benchtime, stones = pystones(loops)
+    print("Pystone(%s) time for %d passes = %g" % \
+          (__version__, loops, benchtime))
+    print("This machine benchmarks at %g pystones/second" % stones)
+
+
+def pystones(loops=LOOPS):
+    return Proc0(loops)
+
+IntGlob = 0
+BoolGlob = FALSE
+Char1Glob = '\0'
+Char2Glob = '\0'
+Array1Glob = [0]*51
+Array2Glob = [x[:] for x in [Array1Glob]*51]
+PtrGlb = None
+PtrGlbNext = None
+
+def Proc0(loops=LOOPS):
+    global IntGlob
+    global BoolGlob
+    global Char1Glob
+    global Char2Glob
+    global Array1Glob
+    global Array2Glob
+    global PtrGlb
+    global PtrGlbNext
+
+    starttime = clock()
+    for i in range(loops):
+        pass
+    nulltime = clock() - starttime
+
+    PtrGlbNext = Record()
+    PtrGlb = Record()
+    PtrGlb.PtrComp = PtrGlbNext
+    PtrGlb.Discr = Ident1
+    PtrGlb.EnumComp = Ident3
+    PtrGlb.IntComp = 40
+    PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
+    String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
+    Array2Glob[8][7] = 10
+
+    starttime = clock()
+
+    for i in range(loops):
+        Proc5()
+        Proc4()
+        IntLoc1 = 2
+        IntLoc2 = 3
+        String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
+        EnumLoc = Ident2
+        BoolGlob = not Func2(String1Loc, String2Loc)
+        while IntLoc1 < IntLoc2:
+            IntLoc3 = 5 * IntLoc1 - IntLoc2
+            IntLoc3 = Proc7(IntLoc1, IntLoc2)
+            IntLoc1 = IntLoc1 + 1
+        Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
+        PtrGlb = Proc1(PtrGlb)
+        CharIndex = 'A'
+        while CharIndex <= Char2Glob:
+            if EnumLoc == Func1(CharIndex, 'C'):
+                EnumLoc = Proc6(Ident1)
+            CharIndex = chr(ord(CharIndex)+1)
+        IntLoc3 = IntLoc2 * IntLoc1
+        IntLoc2 = IntLoc3 // IntLoc1
+        IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
+        IntLoc1 = Proc2(IntLoc1)
+
+    benchtime = clock() - starttime - nulltime
+    if benchtime == 0.0:
+        loopsPerBenchtime = 0.0
+    else:
+        loopsPerBenchtime = (loops / benchtime)
+    return benchtime, loopsPerBenchtime
+
+def Proc1(PtrParIn):
+    PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
+    PtrParIn.IntComp = 5
+    NextRecord.IntComp = PtrParIn.IntComp
+    NextRecord.PtrComp = PtrParIn.PtrComp
+    NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
+    if NextRecord.Discr == Ident1:
+        NextRecord.IntComp = 6
+        NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
+        NextRecord.PtrComp = PtrGlb.PtrComp
+        NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
+    else:
+        PtrParIn = NextRecord.copy()
+    NextRecord.PtrComp = None
+    return PtrParIn
+
+def Proc2(IntParIO):
+    IntLoc = IntParIO + 10
+    while 1:
+        if Char1Glob == 'A':
+            IntLoc = IntLoc - 1
+            IntParIO = IntLoc - IntGlob
+            EnumLoc = Ident1
+        if EnumLoc == Ident1:
+            break
+    return IntParIO
+
+def Proc3(PtrParOut):
+    global IntGlob
+
+    if PtrGlb is not None:
+        PtrParOut = PtrGlb.PtrComp
+    else:
+        IntGlob = 100
+    PtrGlb.IntComp = Proc7(10, IntGlob)
+    return PtrParOut
+
+def Proc4():
+    global Char2Glob
+
+    BoolLoc = Char1Glob == 'A'
+    BoolLoc = BoolLoc or BoolGlob
+    Char2Glob = 'B'
+
+def Proc5():
+    global Char1Glob
+    global BoolGlob
+
+    Char1Glob = 'A'
+    BoolGlob = FALSE
+
+def Proc6(EnumParIn):
+    EnumParOut = EnumParIn
+    if not Func3(EnumParIn):
+        EnumParOut = Ident4
+    if EnumParIn == Ident1:
+        EnumParOut = Ident1
+    elif EnumParIn == Ident2:
+        if IntGlob > 100:
+            EnumParOut = Ident1
+        else:
+            EnumParOut = Ident4
+    elif EnumParIn == Ident3:
+        EnumParOut = Ident2
+    elif EnumParIn == Ident4:
+        pass
+    elif EnumParIn == Ident5:
+        EnumParOut = Ident3
+    return EnumParOut
+
+def Proc7(IntParI1, IntParI2):
+    IntLoc = IntParI1 + 2
+    IntParOut = IntParI2 + IntLoc
+    return IntParOut
+
+def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
+    global IntGlob
+
+    IntLoc = IntParI1 + 5
+    Array1Par[IntLoc] = IntParI2
+    Array1Par[IntLoc+1] = Array1Par[IntLoc]
+    Array1Par[IntLoc+30] = IntLoc
+    for IntIndex in range(IntLoc, IntLoc+2):
+        Array2Par[IntLoc][IntIndex] = IntLoc
+    Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
+    Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
+    IntGlob = 5
+
+def Func1(CharPar1, CharPar2):
+    CharLoc1 = CharPar1
+    CharLoc2 = CharLoc1
+    if CharLoc2 != CharPar2:
+        return Ident1
+    else:
+        return Ident2
+
+def Func2(StrParI1, StrParI2):
+    IntLoc = 1
+    while IntLoc <= 1:
+        if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
+            CharLoc = 'A'
+            IntLoc = IntLoc + 1
+    if CharLoc >= 'W' and CharLoc <= 'Z':
+        IntLoc = 7
+    if CharLoc == 'X':
+        return TRUE
+    else:
+        if StrParI1 > StrParI2:
+            IntLoc = IntLoc + 7
+            return TRUE
+        else:
+            return FALSE
+
+def Func3(EnumParIn):
+    EnumLoc = EnumParIn
+    if EnumLoc == Ident3: return TRUE
+    return FALSE
+
+if __name__ == '__main__':
+    import sys
+    def error(msg):
+        print(msg, end=' ', file=sys.stderr)
+        print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
+        sys.exit(100)
+    nargs = len(sys.argv) - 1
+    if nargs > 1:
+        error("%d arguments are too many;" % nargs)
+    elif nargs == 1:
+        try: loops = int(sys.argv[1])
+        except ValueError:
+            error("Invalid argument %r;" % sys.argv[1])
+    else:
+        loops = LOOPS
+    main(loops)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/support.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/support.py
new file mode 100644
index 00000000..cc65613a
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/test/support.py
@@ -0,0 +1,65 @@
+import sys
+import io
+import unittest
+import gc
+import contextlib
+
+
+TESTFN = '@test'
+
+def run_unittest(*classes):
+    suite = unittest.TestSuite()
+    for c in classes:
+        if isinstance(c, str):
+            c = __import__(c)
+            for name in dir(c):
+                obj = getattr(c, name)
+                if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+                    suite.addTest(obj)
+        else:
+            suite.addTest(c)
+    runner = unittest.TestRunner()
+    result = runner.run(suite)
+
+def can_symlink():
+    return False
+
+def skip_unless_symlink(test):
+    """Skip decorator for tests that require functional symlink"""
+    ok = can_symlink()
+    msg = "Requires functional symlink implementation"
+    return test if ok else unittest.skip(msg)(test)
+
+def create_empty_file(name):
+    open(name, "w").close()
+
+@contextlib.contextmanager
+def disable_gc():
+    have_gc = gc.isenabled()
+    gc.disable()
+    try:
+        yield
+    finally:
+        if have_gc:
+            gc.enable()
+
+def gc_collect():
+    gc.collect()
+    gc.collect()
+    gc.collect()
+
+@contextlib.contextmanager
+def captured_output(stream_name):
+    org = getattr(sys, stream_name)
+    buf = io.StringIO()
+    setattr(sys, stream_name, buf)
+    try:
+        yield buf
+    finally:
+        setattr(sys, stream_name, org)
+
+def captured_stderr():
+    return captured_output("stderr")
+
+def requires_IEEE_754(f):
+    return f
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests.py
new file mode 100644
index 00000000..c5d29ecc
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests.py
@@ -0,0 +1,36 @@
+import unittest
+from ucontextlib import contextmanager
+
+
+class ContextManagerTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self._history = []
+
+        @contextmanager
+        def manager(x):
+            self._history.append('start')
+            try:
+                yield x
+            finally:
+                self._history.append('finish')
+
+        self._manager = manager
+
+    def test_context_manager(self):
+        with self._manager(123) as x:
+            self.assertEqual(x, 123)
+        self.assertEqual(self._history, ['start', 'finish'])
+
+    def test_context_manager_on_error(self):
+        exc = Exception()
+        try:
+            with self._manager(123) as x:
+                raise exc
+        except Exception as e:
+            self.assertEqual(exc, e)
+        self.assertEqual(self._history, ['start', 'finish'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests/test.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests/test.py
new file mode 100644
index 00000000..72b3d85c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tests/test.py
@@ -0,0 +1,57 @@
+from unittest import TestCase, run_class
+import sys
+sys.path.insert(0, '../uasyncio')
+import queues
+
+
+class QueueTestCase(TestCase):
+
+    def _val(self, gen):
+        """Returns val from generator."""
+        while True:
+            try:
+                gen.send(None)
+            except StopIteration as e:
+                return e.value
+
+    def test_get_put(self):
+        q = queues.Queue(maxsize=1)
+        self._val(q.put(42))
+        self.assertEqual(self._val(q.get()), 42)
+
+    def test_get_put_nowait(self):
+        q = queues.Queue(maxsize=1)
+        q.put_nowait(12)
+        try:
+            q.put_nowait(42)
+            self.assertTrue(False)
+        except Exception as e:
+            self.assertEqual(type(e), queues.QueueFull)
+        self.assertEqual(q.get_nowait(), 12)
+        try:
+            q.get_nowait()
+            self.assertTrue(False)
+        except Exception as e:
+            self.assertEqual(type(e), queues.QueueEmpty)
+
+    def test_qsize(self):
+        q = queues.Queue()
+        for n in range(10):
+            q.put_nowait(10)
+        self.assertEqual(q.qsize(), 10)
+
+    def test_empty(self):
+        q = queues.Queue()
+        self.assertTrue(q.empty())
+        q.put_nowait(10)
+        self.assertFalse(q.empty())
+
+    def test_full(self):
+        q = queues.Queue(maxsize=1)
+        self.assertFalse(q.full())
+        q.put_nowait(10)
+        self.assertTrue(q.full())
+
+
+if __name__ == '__main__':
+    run_class(QueueTestCase)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/textwrap.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/textwrap.py
new file mode 100644
index 00000000..24891804
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/textwrap.py
@@ -0,0 +1,470 @@
+"""Text wrapping and filling.
+"""
+
+# Copyright (C) 1999-2001 Gregory P. Ward.
+# Copyright (C) 2002, 2003 Python Software Foundation.
+# Written by Greg Ward <gward@python.net>
+
+import re
+
+__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']
+
+# Hardcode the recognized whitespace characters to the US-ASCII
+# whitespace characters.  The main reason for doing this is that in
+# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
+# that character winds up in string.whitespace.  Respecting
+# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
+# same as any other whitespace char, which is clearly wrong (it's a
+# *non-breaking* space), 2) possibly cause problems with Unicode,
+# since 0xa0 is not in range(128).
+_whitespace = '\t\n\x0b\x0c\r '
+
+class TextWrapper:
+    """
+    Object for wrapping/filling text.  The public interface consists of
+    the wrap() and fill() methods; the other methods are just there for
+    subclasses to override in order to tweak the default behaviour.
+    If you want to completely replace the main wrapping algorithm,
+    you'll probably have to override _wrap_chunks().
+
+    Several instance attributes control various aspects of wrapping:
+      width (default: 70)
+        the maximum width of wrapped lines (unless break_long_words
+        is false)
+      initial_indent (default: "")
+        string that will be prepended to the first line of wrapped
+        output.  Counts towards the line's width.
+      subsequent_indent (default: "")
+        string that will be prepended to all lines save the first
+        of wrapped output; also counts towards each line's width.
+      expand_tabs (default: true)
+        Expand tabs in input text to spaces before further processing.
+        Each tab will become 0 .. 'tabsize' spaces, depending on its position
+        in its line.  If false, each tab is treated as a single character.
+      tabsize (default: 8)
+        Expand tabs in input text to 0 .. 'tabsize' spaces, unless
+        'expand_tabs' is false.
+      replace_whitespace (default: true)
+        Replace all whitespace characters in the input text by spaces
+        after tab expansion.  Note that if expand_tabs is false and
+        replace_whitespace is true, every tab will be converted to a
+        single space!
+      fix_sentence_endings (default: false)
+        Ensure that sentence-ending punctuation is always followed
+        by two spaces.  Off by default because the algorithm is
+        (unavoidably) imperfect.
+      break_long_words (default: true)
+        Break words longer than 'width'.  If false, those words will not
+        be broken, and some lines might be longer than 'width'.
+      break_on_hyphens (default: true)
+        Allow breaking hyphenated words. If true, wrapping will occur
+        preferably on whitespaces and right after hyphens part of
+        compound words.
+      drop_whitespace (default: true)
+        Drop leading and trailing whitespace from lines.
+      max_lines (default: None)
+        Truncate wrapped lines.
+      placeholder (default: ' [...]')
+        Append to the last line of truncated text.
+    """
+
+    unicode_whitespace_trans = {}
+    uspace = ord(' ')
+    for x in _whitespace:
+        unicode_whitespace_trans[ord(x)] = uspace
+
+    # This funky little regex is just the trick for splitting
+    # text up into word-wrappable chunks.  E.g.
+    #   "Hello there -- you goof-ball, use the -b option!"
+    # splits into
+    #   Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
+    # (after stripping out empty strings).
+    wordsep_re = re.compile(
+        r'(\s+|'                                  # any whitespace
+        r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|'   # hyphenated words
+        r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))')   # em-dash
+
+    # This less funky little regex just split on recognized spaces. E.g.
+    #   "Hello there -- you goof-ball, use the -b option!"
+    # splits into
+    #   Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
+    wordsep_simple_re = re.compile(r'(\s+)')
+
+    # XXX this is not locale- or charset-aware -- string.lowercase
+    # is US-ASCII only (and therefore English-only)
+    sentence_end_re = re.compile(r'[a-z]'             # lowercase letter
+                                 r'[\.\!\?]'          # sentence-ending punct.
+                                 r'[\"\']?'           # optional end-of-quote
+                                 r'\Z')               # end of chunk
+
+
+    def __init__(self,
+                 width=70,
+                 initial_indent="",
+                 subsequent_indent="",
+                 expand_tabs=True,
+                 replace_whitespace=True,
+                 fix_sentence_endings=False,
+                 break_long_words=True,
+                 drop_whitespace=True,
+                 break_on_hyphens=True,
+                 tabsize=8,
+                 *,
+                 max_lines=None,
+                 placeholder=' [...]'):
+        self.width = width
+        self.initial_indent = initial_indent
+        self.subsequent_indent = subsequent_indent
+        self.expand_tabs = expand_tabs
+        self.replace_whitespace = replace_whitespace
+        self.fix_sentence_endings = fix_sentence_endings
+        self.break_long_words = break_long_words
+        self.drop_whitespace = drop_whitespace
+        self.break_on_hyphens = break_on_hyphens
+        self.tabsize = tabsize
+        self.max_lines = max_lines
+        self.placeholder = placeholder
+
+
+    # -- Private methods -----------------------------------------------
+    # (possibly useful for subclasses to override)
+
+    def _munge_whitespace(self, text):
+        """_munge_whitespace(text : string) -> string
+
+        Munge whitespace in text: expand tabs and convert all other
+        whitespace characters to spaces.  Eg. " foo\tbar\n\nbaz"
+        becomes " foo    bar  baz".
+        """
+        if self.expand_tabs:
+            text = text.expandtabs(self.tabsize)
+        if self.replace_whitespace:
+            text = text.translate(self.unicode_whitespace_trans)
+        return text
+
+
+    def _split(self, text):
+        """_split(text : string) -> [string]
+
+        Split the text to wrap into indivisible chunks.  Chunks are
+        not quite the same as words; see _wrap_chunks() for full
+        details.  As an example, the text
+          Look, goof-ball -- use the -b option!
+        breaks into the following chunks:
+          'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
+          'use', ' ', 'the', ' ', '-b', ' ', 'option!'
+        if break_on_hyphens is True, or in:
+          'Look,', ' ', 'goof-ball', ' ', '--', ' ',
+          'use', ' ', 'the', ' ', '-b', ' ', option!'
+        otherwise.
+        """
+        if self.break_on_hyphens is True:
+            chunks = self.wordsep_re.split(text)
+        else:
+            chunks = self.wordsep_simple_re.split(text)
+        chunks = [c for c in chunks if c]
+        return chunks
+
+    def _fix_sentence_endings(self, chunks):
+        """_fix_sentence_endings(chunks : [string])
+
+        Correct for sentence endings buried in 'chunks'.  Eg. when the
+        original text contains "... foo.\nBar ...", munge_whitespace()
+        and split() will convert that to [..., "foo.", " ", "Bar", ...]
+        which has one too few spaces; this method simply changes the one
+        space to two.
+        """
+        i = 0
+        patsearch = self.sentence_end_re.search
+        while i < len(chunks)-1:
+            if chunks[i+1] == " " and patsearch(chunks[i]):
+                chunks[i+1] = "  "
+                i += 2
+            else:
+                i += 1
+
+    def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+        """_handle_long_word(chunks : [string],
+                             cur_line : [string],
+                             cur_len : int, width : int)
+
+        Handle a chunk of text (most likely a word, not whitespace) that
+        is too long to fit in any line.
+        """
+        # Figure out when indent is larger than the specified width, and make
+        # sure at least one character is stripped off on every pass
+        if width < 1:
+            space_left = 1
+        else:
+            space_left = width - cur_len
+
+        # If we're allowed to break long words, then do so: put as much
+        # of the next chunk onto the current line as will fit.
+        if self.break_long_words:
+            cur_line.append(reversed_chunks[-1][:space_left])
+            reversed_chunks[-1] = reversed_chunks[-1][space_left:]
+
+        # Otherwise, we have to preserve the long word intact.  Only add
+        # it to the current line if there's nothing already there --
+        # that minimizes how much we violate the width constraint.
+        elif not cur_line:
+            cur_line.append(reversed_chunks.pop())
+
+        # If we're not allowed to break long words, and there's already
+        # text on the current line, do nothing.  Next time through the
+        # main loop of _wrap_chunks(), we'll wind up here again, but
+        # cur_len will be zero, so the next line will be entirely
+        # devoted to the long word that we can't handle right now.
+
+    def _wrap_chunks(self, chunks):
+        """_wrap_chunks(chunks : [string]) -> [string]
+
+        Wrap a sequence of text chunks and return a list of lines of
+        length 'self.width' or less.  (If 'break_long_words' is false,
+        some lines may be longer than this.)  Chunks correspond roughly
+        to words and the whitespace between them: each chunk is
+        indivisible (modulo 'break_long_words'), but a line break can
+        come between any two chunks.  Chunks should not have internal
+        whitespace; ie. a chunk is either all whitespace or a "word".
+        Whitespace chunks will be removed from the beginning and end of
+        lines, but apart from that whitespace is preserved.
+        """
+        lines = []
+        if self.width <= 0:
+            raise ValueError("invalid width %r (must be > 0)" % self.width)
+        if self.max_lines is not None:
+            if self.max_lines > 1:
+                indent = self.subsequent_indent
+            else:
+                indent = self.initial_indent
+            if len(indent) + len(self.placeholder.lstrip()) > self.width:
+                raise ValueError("placeholder too large for max width")
+
+        # Arrange in reverse order so items can be efficiently popped
+        # from a stack of chucks.
+        chunks.reverse()
+
+        while chunks:
+
+            # Start the list of chunks that will make up the current line.
+            # cur_len is just the length of all the chunks in cur_line.
+            cur_line = []
+            cur_len = 0
+
+            # Figure out which static string will prefix this line.
+            if lines:
+                indent = self.subsequent_indent
+            else:
+                indent = self.initial_indent
+
+            # Maximum width for this line.
+            width = self.width - len(indent)
+
+            # First chunk on line is whitespace -- drop it, unless this
+            # is the very beginning of the text (ie. no lines started yet).
+            if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+                del chunks[-1]
+
+            while chunks:
+                l = len(chunks[-1])
+
+                # Can at least squeeze this chunk onto the current line.
+                if cur_len + l <= width:
+                    cur_line.append(chunks.pop())
+                    cur_len += l
+
+                # Nope, this line is full.
+                else:
+                    break
+
+            # The current line is full, and the next chunk is too big to
+            # fit on *any* line (not just this one).
+            if chunks and len(chunks[-1]) > width:
+                self._handle_long_word(chunks, cur_line, cur_len, width)
+                cur_len = sum(map(len, cur_line))
+
+            # If the last chunk on this line is all whitespace, drop it.
+            if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
+                cur_len -= len(cur_line[-1])
+                del cur_line[-1]
+
+            if cur_line:
+                if (self.max_lines is None or
+                    len(lines) + 1 < self.max_lines or
+                    (not chunks or
+                     self.drop_whitespace and
+                     len(chunks) == 1 and
+                     not chunks[0].strip()) and cur_len <= width):
+                    # Convert current line back to a string and store it in
+                    # list of all lines (return value).
+                    lines.append(indent + ''.join(cur_line))
+                else:
+                    while cur_line:
+                        if (cur_line[-1].strip() and
+                            cur_len + len(self.placeholder) <= width):
+                            cur_line.append(self.placeholder)
+                            lines.append(indent + ''.join(cur_line))
+                            break
+                        cur_len -= len(cur_line[-1])
+                        del cur_line[-1]
+                    else:
+                        if lines:
+                            prev_line = lines[-1].rstrip()
+                            if (len(prev_line) + len(self.placeholder) <=
+                                    self.width):
+                                lines[-1] = prev_line + self.placeholder
+                                break
+                        lines.append(indent + self.placeholder.lstrip())
+                    break
+
+        return lines
+
+    def _split_chunks(self, text):
+        text = self._munge_whitespace(text)
+        return self._split(text)
+
+    # -- Public interface ----------------------------------------------
+
+    def wrap(self, text):
+        """wrap(text : string) -> [string]
+
+        Reformat the single paragraph in 'text' so it fits in lines of
+        no more than 'self.width' columns, and return a list of wrapped
+        lines.  Tabs in 'text' are expanded with string.expandtabs(),
+        and all other whitespace characters (including newline) are
+        converted to space.
+        """
+        chunks = self._split_chunks(text)
+        if self.fix_sentence_endings:
+            self._fix_sentence_endings(chunks)
+        return self._wrap_chunks(chunks)
+
+    def fill(self, text):
+        """fill(text : string) -> string
+
+        Reformat the single paragraph in 'text' to fit in lines of no
+        more than 'self.width' columns, and return a new string
+        containing the entire wrapped paragraph.
+        """
+        return "\n".join(self.wrap(text))
+
+
+# -- Convenience interface ---------------------------------------------
+
+def wrap(text, width=70, **kwargs):
+    """Wrap a single paragraph of text, returning a list of wrapped lines.
+
+    Reformat the single paragraph in 'text' so it fits in lines of no
+    more than 'width' columns, and return a list of wrapped lines.  By
+    default, tabs in 'text' are expanded with string.expandtabs(), and
+    all other whitespace characters (including newline) are converted to
+    space.  See TextWrapper class for available keyword args to customize
+    wrapping behaviour.
+    """
+    w = TextWrapper(width=width, **kwargs)
+    return w.wrap(text)
+
+def fill(text, width=70, **kwargs):
+    """Fill a single paragraph of text, returning a new string.
+
+    Reformat the single paragraph in 'text' to fit in lines of no more
+    than 'width' columns, and return a new string containing the entire
+    wrapped paragraph.  As with wrap(), tabs are expanded and other
+    whitespace characters converted to space.  See TextWrapper class for
+    available keyword args to customize wrapping behaviour.
+    """
+    w = TextWrapper(width=width, **kwargs)
+    return w.fill(text)
+
+def shorten(text, width, **kwargs):
+    """Collapse and truncate the given text to fit in the given width.
+
+    The text first has its whitespace collapsed.  If it then fits in
+    the *width*, it is returned as is.  Otherwise, as many words
+    as possible are joined and then the placeholder is appended::
+
+        >>> textwrap.shorten("Hello  world!", width=12)
+        'Hello world!'
+        >>> textwrap.shorten("Hello  world!", width=11)
+        'Hello [...]'
+    """
+    w = TextWrapper(width=width, max_lines=1, **kwargs)
+    return w.fill(' '.join(text.strip().split()))
+
+
+# -- Loosely related functionality -------------------------------------
+
+_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
+_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
+
+def dedent(text):
+    """Remove any common leading whitespace from every line in `text`.
+
+    This can be used to make triple-quoted strings line up with the left
+    edge of the display, while still presenting them in the source code
+    in indented form.
+
+    Note that tabs and spaces are both treated as whitespace, but they
+    are not equal: the lines "  hello" and "\thello" are
+    considered to have no common leading whitespace.  (This behaviour is
+    new in Python 2.5; older versions of this module incorrectly
+    expanded tabs before searching for common leading whitespace.)
+    """
+    # Look for the longest leading string of spaces and tabs common to
+    # all lines.
+    margin = None
+    text = _whitespace_only_re.sub('', text)
+    indents = _leading_whitespace_re.findall(text)
+    for indent in indents:
+        if margin is None:
+            margin = indent
+
+        # Current line more deeply indented than previous winner:
+        # no change (previous winner is still on top).
+        elif indent.startswith(margin):
+            pass
+
+        # Current line consistent with and no deeper than previous winner:
+        # it's the new winner.
+        elif margin.startswith(indent):
+            margin = indent
+
+        # Current line and previous winner have no common whitespace:
+        # there is no margin.
+        else:
+            margin = ""
+            break
+
+    # sanity check (testing/debugging only)
+    if 0 and margin:
+        for line in text.split("\n"):
+            assert not line or line.startswith(margin), \
+                   "line = %r, margin = %r" % (line, margin)
+
+    if margin:
+        text = re.sub(r'(?m)^' + margin, '', text)
+    return text
+
+
+def indent(text, prefix, predicate=None):
+    """Adds 'prefix' to the beginning of selected lines in 'text'.
+
+    If 'predicate' is provided, 'prefix' will only be added to the lines
+    where 'predicate(line)' is True. If 'predicate' is not provided,
+    it will default to adding 'prefix' to all non-empty lines that do not
+    consist solely of whitespace characters.
+    """
+    if predicate is None:
+        def predicate(line):
+            return line.strip()
+
+    def prefixed_lines():
+        for line in text.splitlines(True):
+            yield (prefix + line if predicate(line) else line)
+    return ''.join(prefixed_lines())
+
+
+if __name__ == "__main__":
+    #print dedent("\tfoo\n\tbar")
+    #print dedent("  \thello there\n  \t  how are you?")
+    print(dedent("Hello there.\n  This is indented."))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/threading.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/threading.py
new file mode 100644
index 00000000..003cb907
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/threading.py
@@ -0,0 +1,15 @@
+import _thread
+
+
+class Thread:
+
+    def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
+        self.target = target
+        self.args = args
+        self.kwargs = {} if kwargs is None else kwargs
+
+    def start(self):
+        _thread.start_new_thread(self.run, ())
+
+    def run(self):
+        self.target(*self.args, **self.kwargs)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/time.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/time.py
new file mode 100644
index 00000000..f46a0764
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/time.py
@@ -0,0 +1,76 @@
+from utime import *
+from ucollections import namedtuple
+import ustruct
+import uctypes
+import ffi
+import ffilib
+import array
+
+libc = ffilib.libc()
+
+# struct tm *gmtime(const time_t *timep);
+# struct tm *localtime(const time_t *timep);
+# size_t strftime(char *s, size_t max, const char *format,
+#                       const struct tm *tm);
+gmtime_ = libc.func("P", "gmtime", "P")
+localtime_ = libc.func("P", "localtime", "P")
+strftime_ = libc.func("i", "strftime", "sisP")
+mktime_ = libc.func("i", "mktime", "P")
+
+_struct_time = namedtuple("struct_time",
+    ["tm_year", "tm_mon", "tm_mday", "tm_hour", "tm_min", "tm_sec", "tm_wday", "tm_yday", "tm_isdst"])
+
+def _tuple_to_c_tm(t):
+    return ustruct.pack("@iiiiiiiii", t[5], t[4], t[3], t[2], t[1] - 1, t[0] - 1900, (t[6] + 1) % 7, t[7] - 1, t[8])
+
+
+def _c_tm_to_tuple(tm):
+    t = ustruct.unpack("@iiiiiiiii", tm)
+    return _struct_time(t[5] + 1900, t[4] + 1, t[3], t[2], t[1], t[0], (t[6] - 1) % 7, t[7] + 1, t[8])
+
+def struct_time(tm):
+    return _struct_time(*tm)
+
+
+def strftime(format, t=None):
+    if t is None:
+        t = localtime()
+
+    buf = bytearray(32)
+    l = strftime_(buf, 32, format, _tuple_to_c_tm(t))
+    return str(buf[:l], "utf-8")
+
+
+def localtime(t=None):
+    if t is None:
+        t = time()
+
+    t = int(t)
+    a = ustruct.pack('l', t)
+    tm_p = localtime_(a)
+    return _c_tm_to_tuple(uctypes.bytearray_at(tm_p, 36))
+
+
+def gmtime(t=None):
+    if t is None:
+        t = time()
+
+    t = int(t)
+    a = ustruct.pack('l', t)
+    tm_p = gmtime_(a)
+    return _c_tm_to_tuple(uctypes.bytearray_at(tm_p, 36))
+
+
+def mktime(tt):
+    return mktime_(_tuple_to_c_tm(tt))
+
+
+def perf_counter():
+    return time()
+
+def process_time():
+    return clock()
+
+
+daylight = 0
+timezone = 0
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/timeit.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/timeit.py
new file mode 100644
index 00000000..9c09ce3c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/timeit.py
@@ -0,0 +1,334 @@
+#! /usr/bin/env python3
+
+__doc__ = """Tool for measuring execution time of small code snippets.
+
+This module avoids a number of common traps for measuring execution
+times.  See also Tim Peters' introduction to the Algorithms chapter in
+the Python Cookbook, published by O'Reilly.
+
+Library usage: see the Timer class.
+
+Command line usage:
+    python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-p] [-h] [--] [statement]
+
+Options:
+  -n/--number N: how many times to execute 'statement' (default: see below)
+  -r/--repeat N: how many times to repeat the timer (default 3)
+  -s/--setup S: statement to be executed once initially (default 'pass')
+  -p/--process: use time.process_time() (default is time.perf_counter())
+  -t/--time: use time.time() (deprecated)
+  -c/--clock: use time.clock() (deprecated)
+  -v/--verbose: print raw timing results; repeat for more digits precision
+  -h/--help: print this usage message and exit
+  --: separate options from statement, use when statement starts with -
+  statement: statement to be timed (default 'pass')
+
+A multi-line statement may be given by specifying each line as a
+separate argument; indented lines are possible by enclosing an
+argument in quotes and using leading spaces.  Multiple -s options are
+treated similarly.
+
+If -n is not given, a suitable number of loops is calculated by trying
+successive powers of 10 until the total time is at least 0.2 seconds.
+
+The difference in default timer function is because on Windows,
+clock() has microsecond granularity but time()'s granularity is 1/60th
+of a second; on Unix, clock() has 1/100th of a second granularity and
+time() is much more precise.  On either platform, the default timer
+functions measure wall clock time, not the CPU time.  This means that
+other processes running on the same computer may interfere with the
+timing.  The best thing to do when accurate timing is necessary is to
+repeat the timing a few times and use the best time.  The -r option is
+good for this; the default of 3 repetitions is probably enough in most
+cases.  On Unix, you can use clock() to measure CPU time.
+
+Note: there is a certain baseline overhead associated with executing a
+pass statement.  The code here doesn't try to hide it, but you should
+be aware of it.  The baseline overhead can be measured by invoking the
+program without arguments.
+
+The baseline overhead differs between Python versions!  Also, to
+fairly compare older Python versions to Python 2.3, you may want to
+use python -O for the older versions to avoid timing SET_LINENO
+instructions.
+"""
+
+import gc
+import sys
+import time
+try:
+    import itertools
+except ImportError:
+    # Must be an older Python version (see timeit() below)
+    itertools = None
+
+__all__ = ["Timer"]
+
+dummy_src_name = "<timeit-src>"
+default_number = 1000000
+default_repeat = 3
+default_timer = time.perf_counter
+
+# Don't change the indentation of the template; the reindent() calls
+# in Timer.__init__() depend on setup being indented 4 spaces and stmt
+# being indented 8 spaces.
+template = """
+def inner(_it, _timer):
+    {setup}
+    _t0 = _timer()
+    for _i in _it:
+        {stmt}
+    _t1 = _timer()
+    return _t1 - _t0
+"""
+
+def reindent(src, indent):
+    """Helper to reindent a multi-line statement."""
+    return src.replace("\n", "\n" + " "*indent)
+
+def _template_func(setup, func):
+    """Create a timer function. Used if the "statement" is a callable."""
+    def inner(_it, _timer, _func=func):
+        setup()
+        _t0 = _timer()
+        for _i in _it:
+            _func()
+        _t1 = _timer()
+        return _t1 - _t0
+    return inner
+
+class Timer:
+    """Class for timing execution speed of small code snippets.
+
+    The constructor takes a statement to be timed, an additional
+    statement used for setup, and a timer function.  Both statements
+    default to 'pass'; the timer function is platform-dependent (see
+    module doc string).
+
+    To measure the execution time of the first statement, use the
+    timeit() method.  The repeat() method is a convenience to call
+    timeit() multiple times and return a list of results.
+
+    The statements may contain newlines, as long as they don't contain
+    multi-line string literals.
+    """
+
+    def __init__(self, stmt="pass", setup="pass", timer=default_timer):
+        """Constructor.  See class doc string."""
+        self.timer = timer
+        ns = {}
+        if isinstance(stmt, str):
+            stmt = reindent(stmt, 8)
+            if isinstance(setup, str):
+                setup = reindent(setup, 4)
+                src = template.format(stmt=stmt, setup=setup)
+            elif callable(setup):
+                src = template.format(stmt=stmt, setup='_setup()')
+                ns['_setup'] = setup
+            else:
+                raise ValueError("setup is neither a string nor callable")
+            self.src = src # Save for traceback display
+            code = compile(src, dummy_src_name, "exec")
+            exec(code, globals(), ns)
+            self.inner = ns["inner"]
+        elif callable(stmt):
+            self.src = None
+            if isinstance(setup, str):
+                _setup = setup
+                def setup():
+                    exec(_setup, globals(), ns)
+            elif not callable(setup):
+                raise ValueError("setup is neither a string nor callable")
+            self.inner = _template_func(setup, stmt)
+        else:
+            raise ValueError("stmt is neither a string nor callable")
+
+    def print_exc(self, file=None):
+        """Helper to print a traceback from the timed code.
+
+        Typical use:
+
+            t = Timer(...)       # outside the try/except
+            try:
+                t.timeit(...)    # or t.repeat(...)
+            except:
+                t.print_exc()
+
+        The advantage over the standard traceback is that source lines
+        in the compiled template will be displayed.
+
+        The optional file argument directs where the traceback is
+        sent; it defaults to sys.stderr.
+        """
+        import linecache, traceback
+        if self.src is not None:
+            linecache.cache[dummy_src_name] = (len(self.src),
+                                               None,
+                                               self.src.split("\n"),
+                                               dummy_src_name)
+        # else the source is already stored somewhere else
+
+        traceback.print_exc(file=file)
+
+    def timeit(self, number=default_number):
+        """Time 'number' executions of the main statement.
+
+        To be precise, this executes the setup statement once, and
+        then returns the time it takes to execute the main statement
+        a number of times, as a float measured in seconds.  The
+        argument is the number of times through the loop, defaulting
+        to one million.  The main statement, the setup statement and
+        the timer function to be used are passed to the constructor.
+        """
+        if itertools:
+            it = itertools.repeat(None, number)
+        else:
+            it = [None] * number
+        gcold = gc.isenabled()
+#        gc.disable()
+        try:
+            timing = self.inner(it, self.timer)
+        finally:
+            if gcold:
+                gc.enable()
+        return timing
+
+    def repeat(self, repeat=default_repeat, number=default_number):
+        """Call timeit() a few times.
+
+        This is a convenience function that calls the timeit()
+        repeatedly, returning a list of results.  The first argument
+        specifies how many times to call timeit(), defaulting to 3;
+        the second argument specifies the timer argument, defaulting
+        to one million.
+
+        Note: it's tempting to calculate mean and standard deviation
+        from the result vector and report these.  However, this is not
+        very useful.  In a typical case, the lowest value gives a
+        lower bound for how fast your machine can run the given code
+        snippet; higher values in the result vector are typically not
+        caused by variability in Python's speed, but by other
+        processes interfering with your timing accuracy.  So the min()
+        of the result is probably the only number you should be
+        interested in.  After that, you should look at the entire
+        vector and apply common sense rather than statistics.
+        """
+        r = []
+        for i in range(repeat):
+            t = self.timeit(number)
+            r.append(t)
+        return r
+
+def timeit(stmt="pass", setup="pass", timer=default_timer,
+           number=default_number):
+    """Convenience function to create Timer object and call timeit method."""
+    return Timer(stmt, setup, timer).timeit(number)
+
+def repeat(stmt="pass", setup="pass", timer=default_timer,
+           repeat=default_repeat, number=default_number):
+    """Convenience function to create Timer object and call repeat method."""
+    return Timer(stmt, setup, timer).repeat(repeat, number)
+
+def main(args=None, *, _wrap_timer=None):
+    """Main program, used when run as a script.
+
+    The optional 'args' argument specifies the command line to be parsed,
+    defaulting to sys.argv[1:].
+
+    The return value is an exit code to be passed to sys.exit(); it
+    may be None to indicate success.
+
+    When an exception happens during timing, a traceback is printed to
+    stderr and the return value is 1.  Exceptions at other times
+    (including the template compilation) are not caught.
+
+    '_wrap_timer' is an internal interface used for unit testing.  If it
+    is not None, it must be a callable that accepts a timer function
+    and returns another timer function (used for unit testing).
+    """
+    if args is None:
+        args = sys.argv[1:]
+    import getopt
+    try:
+        opts, args = getopt.getopt(args, "n:s:r:tcpvh",
+                                   ["number=", "setup=", "repeat=",
+                                    "time", "clock", "process",
+                                    "verbose", "help"])
+    except getopt.error as err:
+        print(err)
+        print("use -h/--help for command line help")
+        return 2
+    timer = default_timer
+    stmt = "\n".join(args) or "pass"
+    number = 0 # auto-determine
+    setup = []
+    repeat = default_repeat
+    verbose = 0
+    precision = 3
+    for o, a in opts:
+        if o in ("-n", "--number"):
+            number = int(a)
+        if o in ("-s", "--setup"):
+            setup.append(a)
+        if o in ("-r", "--repeat"):
+            repeat = int(a)
+            if repeat <= 0:
+                repeat = 1
+        if o in ("-t", "--time"):
+            timer = time.time
+        if o in ("-c", "--clock"):
+            timer = time.clock
+        if o in ("-p", "--process"):
+            timer = time.process_time
+        if o in ("-v", "--verbose"):
+            if verbose:
+                precision += 1
+            verbose += 1
+        if o in ("-h", "--help"):
+            print(__doc__, end=' ')
+            return 0
+    setup = "\n".join(setup) or "pass"
+    # Include the current directory, so that local imports work (sys.path
+    # contains the directory of this script, rather than the current
+    # directory)
+    import os
+    sys.path.insert(0, os.curdir)
+    if _wrap_timer is not None:
+        timer = _wrap_timer(timer)
+    t = Timer(stmt, setup, timer)
+    if number == 0:
+        # determine number so that 0.2 <= total time < 2.0
+        for i in range(1, 10):
+            number = 10**i
+            try:
+                x = t.timeit(number)
+            except:
+                t.print_exc()
+                return 1
+            if verbose:
+                print("%d loops -> %.*g secs" % (number, precision, x))
+            if x >= 0.2:
+                break
+    try:
+        r = t.repeat(repeat, number)
+    except:
+        t.print_exc()
+        return 1
+    best = min(r)
+    if verbose:
+        print("raw times:", " ".join(["%.*g" % (precision, x) for x in r]))
+    print("%d loops," % number, end=' ')
+    usec = best * 1e6 / number
+    if usec < 1000:
+        print("best of %d: %.*g usec per loop" % (repeat, precision, usec))
+    else:
+        msec = usec / 1000
+        if msec < 1000:
+            print("best of %d: %.*g msec per loop" % (repeat, precision, msec))
+        else:
+            sec = msec / 1000
+            print("best of %d: %.*g sec per loop" % (repeat, precision, sec))
+    return None
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/trace.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/trace.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/traceback.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/traceback.py
new file mode 100644
index 00000000..eaa237c7
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/traceback.py
@@ -0,0 +1,21 @@
+import sys
+
+def format_tb(tb, limit):
+    return ["traceback.format_tb() not implemented\n"]
+
+def format_exception_only(type, value):
+    return [repr(value) + "\n"]
+
+def format_exception(etype, value, tb, limit=None, chain=True):
+    return format_exception_only(etype, value)
+
+def print_exception(t, e, tb, limit=None, file=None, chain=True):
+    if file is None:
+        file = sys.stdout
+    sys.print_exception(e, file)
+
+def print_exc(limit=None, file=None, chain=True):
+    print_exception(*sys.exc_info(), limit=limit, file=file, chain=chain)
+
+def format_exc(limit=None, chain=True):
+    return "".join(format_exception(*sys.exc_info(), limit=limit, chain=chain))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tty.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tty.py
new file mode 100644
index 00000000..1d55cbd8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/tty.py
@@ -0,0 +1 @@
+from termios import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/types.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/types.py
new file mode 100644
index 00000000..e1105de6
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/types.py
@@ -0,0 +1,101 @@
+"""
+Define names for built-in types that aren't directly accessible as a builtin.
+"""
+import sys
+
+# Iterators in Python aren't a matter of type but of protocol.  A large
+# and changing number of builtin types implement *some* flavor of
+# iterator.  Don't check the type!  Use hasattr to check for both
+# "__iter__" and "__next__" attributes instead.
+
+def _f(): pass
+FunctionType = type(_f)
+LambdaType = type(lambda: None)         # Same as FunctionType
+CodeType = None  # TODO: Add better sentinel which can't match anything
+MappingProxyType = None  # TODO: Add better sentinel which can't match anything
+SimpleNamespace = None  # TODO: Add better sentinel which can't match anything
+
+def _g():
+    yield 1
+GeneratorType = type(_g())
+
+class _C:
+    def _m(self): pass
+MethodType = type(_C()._m)
+
+BuiltinFunctionType = type(len)
+BuiltinMethodType = type([].append)     # Same as BuiltinFunctionType
+
+ModuleType = type(sys)
+
+try:
+    raise TypeError
+except TypeError:
+#    tb = sys.exc_info()[2]
+    TracebackType = None  # TODO: Add better sentinel which can't match anything
+    FrameType = None  # TODO: Add better sentinel which can't match anything
+    tb = None; del tb
+
+# For Jython, the following two types are identical
+GetSetDescriptorType = None  # TODO: Add better sentinel which can't match anything
+MemberDescriptorType = None  # TODO: Add better sentinel which can't match anything
+
+del sys, _f, _g, _C,                              # Not for export
+
+
+# Provide a PEP 3115 compliant mechanism for class creation
+def new_class(name, bases=(), kwds=None, exec_body=None):
+    """Create a class object dynamically using the appropriate metaclass."""
+    meta, ns, kwds = prepare_class(name, bases, kwds)
+    if exec_body is not None:
+        exec_body(ns)
+    return meta(name, bases, ns, **kwds)
+
+def prepare_class(name, bases=(), kwds=None):
+    """Call the __prepare__ method of the appropriate metaclass.
+
+    Returns (metaclass, namespace, kwds) as a 3-tuple
+
+    *metaclass* is the appropriate metaclass
+    *namespace* is the prepared class namespace
+    *kwds* is an updated copy of the passed in kwds argument with any
+    'metaclass' entry removed. If no kwds argument is passed in, this will
+    be an empty dict.
+    """
+    if kwds is None:
+        kwds = {}
+    else:
+        kwds = dict(kwds) # Don't alter the provided mapping
+    if 'metaclass' in kwds:
+        meta = kwds.pop('metaclass')
+    else:
+        if bases:
+            meta = type(bases[0])
+        else:
+            meta = type
+    if isinstance(meta, type):
+        # when meta is a type, we first determine the most-derived metaclass
+        # instead of invoking the initial candidate directly
+        meta = _calculate_meta(meta, bases)
+    if hasattr(meta, '__prepare__'):
+        ns = meta.__prepare__(name, bases, **kwds)
+    else:
+        ns = {}
+    return meta, ns, kwds
+
+def _calculate_meta(meta, bases):
+    """Calculate the most derived metaclass."""
+    winner = meta
+    for base in bases:
+        base_meta = type(base)
+        if issubclass(winner, base_meta):
+            continue
+        if issubclass(base_meta, winner):
+            winner = base_meta
+            continue
+        # else:
+        raise TypeError("metaclass conflict: "
+                        "the metaclass of a derived class "
+                        "must be a (non-strict) subclass "
+                        "of the metaclasses of all its bases")
+    return winner
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uaiohttpclient.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uaiohttpclient.py
new file mode 100644
index 00000000..cf7fe19e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uaiohttpclient.py
@@ -0,0 +1,96 @@
+import uasyncio as asyncio
+
+
+class ClientResponse:
+
+    def __init__(self, reader):
+        self.content = reader
+
+    def read(self, sz=-1):
+        return (yield from self.content.read(sz))
+
+    def __repr__(self):
+        return "<ClientResponse %d %s>" % (self.status, self.headers)
+
+
+class ChunkedClientResponse(ClientResponse):
+
+    def __init__(self, reader):
+        self.content = reader
+        self.chunk_size = 0
+
+    def read(self, sz=4*1024*1024):
+        if self.chunk_size == 0:
+            l = yield from self.content.readline()
+            #print("chunk line:", l)
+            l = l.split(b";", 1)[0]
+            self.chunk_size = int(l, 16)
+            #print("chunk size:", self.chunk_size)
+            if self.chunk_size == 0:
+                # End of message
+                sep = yield from self.content.read(2)
+                assert sep == b"\r\n"
+                return b''
+        data = yield from self.content.read(min(sz, self.chunk_size))
+        self.chunk_size -= len(data)
+        if self.chunk_size == 0:
+            sep = yield from self.content.read(2)
+            assert sep == b"\r\n"
+        return data
+
+    def __repr__(self):
+        return "<ChunkedClientResponse %d %s>" % (self.status, self.headers)
+
+
+def request_raw(method, url):
+    try:
+        proto, dummy, host, path = url.split("/", 3)
+    except ValueError:
+        proto, dummy, host = url.split("/", 2)
+        path = ""
+    if proto != "http:":
+        raise ValueError("Unsupported protocol: " + proto)
+    reader, writer = yield from asyncio.open_connection(host, 80)
+    # Use protocol 1.0, because 1.1 always allows to use chunked transfer-encoding
+    # But explicitly set Connection: close, even though this should be default for 1.0,
+    # because some servers misbehave w/o it.
+    query = "%s /%s HTTP/1.0\r\nHost: %s\r\nConnection: close\r\nUser-Agent: compat\r\n\r\n" % (method, path, host)
+    yield from writer.awrite(query.encode('latin-1'))
+#    yield from writer.aclose()
+    return reader
+
+
+def request(method, url):
+    redir_cnt = 0
+    redir_url = None
+    while redir_cnt < 2:
+        reader = yield from request_raw(method, url)
+        headers = []
+        sline = yield from reader.readline()
+        sline = sline.split(None, 2)
+        status = int(sline[1])
+        chunked = False
+        while True:
+            line = yield from reader.readline()
+            if not line or line == b"\r\n":
+                break
+            headers.append(line)
+            if line.startswith(b"Transfer-Encoding:"):
+                if b"chunked" in line:
+                    chunked = True
+            elif line.startswith(b"Location:"):
+                url = line.rstrip().split(None, 1)[1].decode("latin-1")
+
+        if 301 <= status <= 303:
+            redir_cnt += 1
+            yield from reader.aclose()
+            continue
+        break
+
+    if chunked:
+        resp = ChunkedClientResponse(reader)
+    else:
+        resp = ClientResponse(reader)
+    resp.status = status
+    resp.headers = headers
+    return resp
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio.py
new file mode 100644
index 00000000..fc83456f
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio.py
@@ -0,0 +1,99 @@
+import inspect
+import asyncio
+import asyncio.futures as futures
+from asyncio import *
+
+
+OrgTask = Task
+
+class Task(OrgTask):
+
+    def _step(self, value=None, exc=None):
+        assert not self.done(), \
+            '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc)
+        if self._must_cancel:
+            if not isinstance(exc, futures.CancelledError):
+                exc = futures.CancelledError()
+            self._must_cancel = False
+        coro = self._coro
+        self._fut_waiter = None
+
+        self.__class__._current_tasks[self._loop] = self
+        # Call either coro.throw(exc) or coro.send(value).
+        try:
+            if exc is not None:
+                result = coro.throw(exc)
+            elif value is not None:
+                result = coro.send(value)
+            else:
+                result = next(coro)
+        except StopIteration as exc:
+            self.set_result(exc.value)
+        except futures.CancelledError as exc:
+            super().cancel()  # I.e., Future.cancel(self).
+        except Exception as exc:
+            self.set_exception(exc)
+        except BaseException as exc:
+            self.set_exception(exc)
+            raise
+        else:
+            if isinstance(result, futures.Future):
+                # Yielded Future must come from Future.__iter__().
+                if result._blocking:
+                    result._blocking = False
+                    result.add_done_callback(self._wakeup)
+                    self._fut_waiter = result
+                    if self._must_cancel:
+                        if self._fut_waiter.cancel():
+                            self._must_cancel = False
+                else:
+                    self._loop.call_soon(
+                        self._step, None,
+                        RuntimeError(
+                            'yield was used instead of yield from '
+                            'in task {!r} with {!r}'.format(self, result)))
+            elif result is None:
+                # Bare yield relinquishes control for one event loop iteration.
+                self._loop.call_soon(self._step)
+            elif inspect.isgenerator(result):
+                #print("Scheduling", result)
+                self._loop.create_task(result)
+                self._loop.call_soon(self._step)
+                # Yielding a generator is just wrong.
+#                self._loop.call_soon(
+#                    self._step, None,
+#                    RuntimeError(
+#                        'yield was used instead of yield from for '
+#                        'generator in task {!r} with {}'.format(
+#                            self, result)))
+            else:
+                # Yielding something else is an error.
+                self._loop.call_soon(
+                    self._step, None,
+                    RuntimeError(
+                        'Task got bad yield: {!r}'.format(result)))
+        finally:
+            self.__class__._current_tasks.pop(self._loop)
+            self = None  # Needed to break cycles when an exception occurs.
+
+
+asyncio.tasks.Task = Task
+
+
+OrgStreamWriter = StreamWriter
+
+class StreamWriter(OrgStreamWriter):
+
+    def awrite(self, data):
+        if isinstance(data, str):
+            data = data.encode("utf-8")
+        self.write(data)
+        yield from self.drain()
+
+    def aclose(self):
+        self.close()
+        return
+        yield
+
+
+asyncio.streams.StreamWriter = StreamWriter
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/__init__.py
new file mode 100644
index 00000000..41fa5725
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/__init__.py
@@ -0,0 +1,258 @@
+import uerrno
+import uselect as select
+import usocket as _socket
+from uasyncio.core import *
+
+
+DEBUG = 0
+log = None
+
+def set_debug(val):
+    global DEBUG, log
+    DEBUG = val
+    if val:
+        import logging
+        log = logging.getLogger("uasyncio")
+
+
+class PollEventLoop(EventLoop):
+
+    def __init__(self, runq_len=16, waitq_len=16):
+        EventLoop.__init__(self, runq_len, waitq_len)
+        self.poller = select.poll()
+        self.objmap = {}
+
+    def add_reader(self, sock, cb, *args):
+        if DEBUG and __debug__:
+            log.debug("add_reader%s", (sock, cb, args))
+        if args:
+            self.poller.register(sock, select.POLLIN)
+            self.objmap[id(sock)] = (cb, args)
+        else:
+            self.poller.register(sock, select.POLLIN)
+            self.objmap[id(sock)] = cb
+
+    def remove_reader(self, sock):
+        if DEBUG and __debug__:
+            log.debug("remove_reader(%s)", sock)
+        self.poller.unregister(sock)
+        del self.objmap[id(sock)]
+
+    def add_writer(self, sock, cb, *args):
+        if DEBUG and __debug__:
+            log.debug("add_writer%s", (sock, cb, args))
+        if args:
+            self.poller.register(sock, select.POLLOUT)
+            self.objmap[id(sock)] = (cb, args)
+        else:
+            self.poller.register(sock, select.POLLOUT)
+            self.objmap[id(sock)] = cb
+
+    def remove_writer(self, sock):
+        if DEBUG and __debug__:
+            log.debug("remove_writer(%s)", sock)
+        try:
+            self.poller.unregister(sock)
+            self.objmap.pop(id(sock), None)
+        except OSError as e:
+            # StreamWriter.awrite() first tries to write to a socket,
+            # and if that succeeds, yield IOWrite may never be called
+            # for that socket, and it will never be added to poller. So,
+            # ignore such error.
+            if e.args[0] != uerrno.ENOENT:
+                raise
+
+    def wait(self, delay):
+        if DEBUG and __debug__:
+            log.debug("poll.wait(%d)", delay)
+        # We need one-shot behavior (second arg of 1 to .poll())
+        res = self.poller.ipoll(delay, 1)
+        #log.debug("poll result: %s", res)
+        # Remove "if res" workaround after
+        # https://github.com/micropython/micropython/issues/2716 fixed.
+        if res:
+            for sock, ev in res:
+                cb = self.objmap[id(sock)]
+                if ev & (select.POLLHUP | select.POLLERR):
+                    # These events are returned even if not requested, and
+                    # are sticky, i.e. will be returned again and again.
+                    # If the caller doesn't do proper error handling and
+                    # unregister this sock, we'll busy-loop on it, so we
+                    # as well can unregister it now "just in case".
+                    self.remove_reader(sock)
+                if DEBUG and __debug__:
+                    log.debug("Calling IO callback: %r", cb)
+                if isinstance(cb, tuple):
+                    cb[0](*cb[1])
+                else:
+                    cb.pend_throw(None)
+                    self.call_soon(cb)
+
+
+class StreamReader:
+
+    def __init__(self, polls, ios=None):
+        if ios is None:
+            ios = polls
+        self.polls = polls
+        self.ios = ios
+
+    def read(self, n=-1):
+        while True:
+            yield IORead(self.polls)
+            res = self.ios.read(n)
+            if res is not None:
+                break
+            # This should not happen for real sockets, but can easily
+            # happen for stream wrappers (ssl, websockets, etc.)
+            #log.warn("Empty read")
+        if not res:
+            yield IOReadDone(self.polls)
+        return res
+
+    def readexactly(self, n):
+        buf = b""
+        while n:
+            yield IORead(self.polls)
+            res = self.ios.read(n)
+            assert res is not None
+            if not res:
+                yield IOReadDone(self.polls)
+                break
+            buf += res
+            n -= len(res)
+        return buf
+
+    def readline(self):
+        if DEBUG and __debug__:
+            log.debug("StreamReader.readline()")
+        buf = b""
+        while True:
+            yield IORead(self.polls)
+            res = self.ios.readline()
+            assert res is not None
+            if not res:
+                yield IOReadDone(self.polls)
+                break
+            buf += res
+            if buf[-1] == 0x0a:
+                break
+        if DEBUG and __debug__:
+            log.debug("StreamReader.readline(): %s", buf)
+        return buf
+
+    def aclose(self):
+        yield IOReadDone(self.polls)
+        self.ios.close()
+
+    def __repr__(self):
+        return "<StreamReader %r %r>" % (self.polls, self.ios)
+
+
+class StreamWriter:
+
+    def __init__(self, s, extra):
+        self.s = s
+        self.extra = extra
+
+    def awrite(self, buf, off=0, sz=-1):
+        # This method is called awrite (async write) to not proliferate
+        # incompatibility with original asyncio. Unlike original asyncio
+        # whose .write() method is both not a coroutine and guaranteed
+        # to return immediately (which means it has to buffer all the
+        # data), this method is a coroutine.
+        if sz == -1:
+            sz = len(buf) - off
+        if DEBUG and __debug__:
+            log.debug("StreamWriter.awrite(): spooling %d bytes", sz)
+        while True:
+            res = self.s.write(buf, off, sz)
+            # If we spooled everything, return immediately
+            if res == sz:
+                if DEBUG and __debug__:
+                    log.debug("StreamWriter.awrite(): completed spooling %d bytes", res)
+                return
+            if res is None:
+                res = 0
+            if DEBUG and __debug__:
+                log.debug("StreamWriter.awrite(): spooled partial %d bytes", res)
+            assert res < sz
+            off += res
+            sz -= res
+            yield IOWrite(self.s)
+            #assert s2.fileno() == self.s.fileno()
+            if DEBUG and __debug__:
+                log.debug("StreamWriter.awrite(): can write more")
+
+    # Write piecewise content from iterable (usually, a generator)
+    def awriteiter(self, iterable):
+        for buf in iterable:
+            yield from self.awrite(buf)
+
+    def aclose(self):
+        yield IOWriteDone(self.s)
+        self.s.close()
+
+    def get_extra_info(self, name, default=None):
+        return self.extra.get(name, default)
+
+    def __repr__(self):
+        return "<StreamWriter %r>" % self.s
+
+
+def open_connection(host, port, ssl=False):
+    if DEBUG and __debug__:
+        log.debug("open_connection(%s, %s)", host, port)
+    ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM)
+    ai = ai[0]
+    s = _socket.socket(ai[0], ai[1], ai[2])
+    s.setblocking(False)
+    try:
+        s.connect(ai[-1])
+    except OSError as e:
+        if e.args[0] != uerrno.EINPROGRESS:
+            raise
+    if DEBUG and __debug__:
+        log.debug("open_connection: After connect")
+    yield IOWrite(s)
+#    if __debug__:
+#        assert s2.fileno() == s.fileno()
+    if DEBUG and __debug__:
+        log.debug("open_connection: After iowait: %s", s)
+    if ssl:
+        print("Warning: uasyncio SSL support is alpha")
+        import ussl
+        s.setblocking(True)
+        s2 = ussl.wrap_socket(s)
+        s.setblocking(False)
+        return StreamReader(s, s2), StreamWriter(s2, {})
+    return StreamReader(s), StreamWriter(s, {})
+
+
+def start_server(client_coro, host, port, backlog=10):
+    if DEBUG and __debug__:
+        log.debug("start_server(%s, %s)", host, port)
+    ai = _socket.getaddrinfo(host, port, 0, _socket.SOCK_STREAM)
+    ai = ai[0]
+    s = _socket.socket(ai[0], ai[1], ai[2])
+    s.setblocking(False)
+
+    s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
+    s.bind(ai[-1])
+    s.listen(backlog)
+    while True:
+        if DEBUG and __debug__:
+            log.debug("start_server: Before accept")
+        yield IORead(s)
+        if DEBUG and __debug__:
+            log.debug("start_server: After iowait")
+        s2, client_addr = s.accept()
+        s2.setblocking(False)
+        if DEBUG and __debug__:
+            log.debug("start_server: After accept: %s", s2)
+        extra = {"peername": client_addr}
+        yield client_coro(StreamReader(s2), StreamWriter(s2, extra))
+
+
+import uasyncio.core
+uasyncio.core._event_loop_class = PollEventLoop
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/core.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/core.py
new file mode 100644
index 00000000..77fdb7a2
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/core.py
@@ -0,0 +1,315 @@
+import utime as time
+import utimeq
+import ucollections
+
+
+type_gen = type((lambda: (yield))())
+
+DEBUG = 0
+log = None
+
+def set_debug(val):
+    global DEBUG, log
+    DEBUG = val
+    if val:
+        import logging
+        log = logging.getLogger("uasyncio.core")
+
+
+class CancelledError(Exception):
+    pass
+
+
+class TimeoutError(CancelledError):
+    pass
+
+
+class EventLoop:
+
+    def __init__(self, runq_len=16, waitq_len=16):
+        self.runq = ucollections.deque((), runq_len, True)
+        self.waitq = utimeq.utimeq(waitq_len)
+        # Current task being run. Task is a top-level coroutine scheduled
+        # in the event loop (sub-coroutines executed transparently by
+        # yield from/await, event loop "doesn't see" them).
+        self.cur_task = None
+
+    def time(self):
+        return time.ticks_ms()
+
+    def create_task(self, coro):
+        # CPython 3.4.2
+        self.call_later_ms(0, coro)
+        # CPython asyncio incompatibility: we don't return Task object
+
+    def call_soon(self, callback, *args):
+        if __debug__ and DEBUG:
+            log.debug("Scheduling in runq: %s", (callback, args))
+        self.runq.append(callback)
+        if not isinstance(callback, type_gen):
+            self.runq.append(args)
+
+    def call_later(self, delay, callback, *args):
+        self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args)
+
+    def call_later_ms(self, delay, callback, *args):
+        if not delay:
+            return self.call_soon(callback, *args)
+        self.call_at_(time.ticks_add(self.time(), delay), callback, args)
+
+    def call_at_(self, time, callback, args=()):
+        if __debug__ and DEBUG:
+            log.debug("Scheduling in waitq: %s", (time, callback, args))
+        self.waitq.push(time, callback, args)
+
+    def wait(self, delay):
+        # Default wait implementation, to be overriden in subclasses
+        # with IO scheduling
+        if __debug__ and DEBUG:
+            log.debug("Sleeping for: %s", delay)
+        time.sleep_ms(delay)
+
+    def run_forever(self):
+        cur_task = [0, 0, 0]
+        while True:
+            # Expire entries in waitq and move them to runq
+            tnow = self.time()
+            while self.waitq:
+                t = self.waitq.peektime()
+                delay = time.ticks_diff(t, tnow)
+                if delay > 0:
+                    break
+                self.waitq.pop(cur_task)
+                if __debug__ and DEBUG:
+                    log.debug("Moving from waitq to runq: %s", cur_task[1])
+                self.call_soon(cur_task[1], *cur_task[2])
+
+            # Process runq
+            l = len(self.runq)
+            if __debug__ and DEBUG:
+                log.debug("Entries in runq: %d", l)
+            while l:
+                cb = self.runq.popleft()
+                l -= 1
+                args = ()
+                if not isinstance(cb, type_gen):
+                    args = self.runq.popleft()
+                    l -= 1
+                    if __debug__ and DEBUG:
+                        log.info("Next callback to run: %s", (cb, args))
+                    cb(*args)
+                    continue
+
+                if __debug__ and DEBUG:
+                    log.info("Next coroutine to run: %s", (cb, args))
+                self.cur_task = cb
+                delay = 0
+                try:
+                    if args is ():
+                        ret = next(cb)
+                    else:
+                        ret = cb.send(*args)
+                    if __debug__ and DEBUG:
+                        log.info("Coroutine %s yield result: %s", cb, ret)
+                    if isinstance(ret, SysCall1):
+                        arg = ret.arg
+                        if isinstance(ret, SleepMs):
+                            delay = arg
+                        elif isinstance(ret, IORead):
+                            cb.pend_throw(False)
+                            self.add_reader(arg, cb)
+                            continue
+                        elif isinstance(ret, IOWrite):
+                            cb.pend_throw(False)
+                            self.add_writer(arg, cb)
+                            continue
+                        elif isinstance(ret, IOReadDone):
+                            self.remove_reader(arg)
+                        elif isinstance(ret, IOWriteDone):
+                            self.remove_writer(arg)
+                        elif isinstance(ret, StopLoop):
+                            return arg
+                        else:
+                            assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret))
+                    elif isinstance(ret, type_gen):
+                        self.call_soon(ret)
+                    elif isinstance(ret, int):
+                        # Delay
+                        delay = ret
+                    elif ret is None:
+                        # Just reschedule
+                        pass
+                    elif ret is False:
+                        # Don't reschedule
+                        continue
+                    else:
+                        assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret))
+                except StopIteration as e:
+                    if __debug__ and DEBUG:
+                        log.debug("Coroutine finished: %s", cb)
+                    continue
+                except CancelledError as e:
+                    if __debug__ and DEBUG:
+                        log.debug("Coroutine cancelled: %s", cb)
+                    continue
+                # Currently all syscalls don't return anything, so we don't
+                # need to feed anything to the next invocation of coroutine.
+                # If that changes, need to pass that value below.
+                if delay:
+                    self.call_later_ms(delay, cb)
+                else:
+                    self.call_soon(cb)
+
+            # Wait until next waitq task or I/O availability
+            delay = 0
+            if not self.runq:
+                delay = -1
+                if self.waitq:
+                    tnow = self.time()
+                    t = self.waitq.peektime()
+                    delay = time.ticks_diff(t, tnow)
+                    if delay < 0:
+                        delay = 0
+            self.wait(delay)
+
+    def run_until_complete(self, coro):
+        def _run_and_stop():
+            yield from coro
+            yield StopLoop(0)
+        self.call_soon(_run_and_stop())
+        self.run_forever()
+
+    def stop(self):
+        self.call_soon((lambda: (yield StopLoop(0)))())
+
+    def close(self):
+        pass
+
+
+class SysCall:
+
+    def __init__(self, *args):
+        self.args = args
+
+    def handle(self):
+        raise NotImplementedError
+
+# Optimized syscall with 1 arg
+class SysCall1(SysCall):
+
+    def __init__(self, arg):
+        self.arg = arg
+
+class StopLoop(SysCall1):
+    pass
+
+class IORead(SysCall1):
+    pass
+
+class IOWrite(SysCall1):
+    pass
+
+class IOReadDone(SysCall1):
+    pass
+
+class IOWriteDone(SysCall1):
+    pass
+
+
+_event_loop = None
+_event_loop_class = EventLoop
+def get_event_loop(runq_len=16, waitq_len=16):
+    global _event_loop
+    if _event_loop is None:
+        _event_loop = _event_loop_class(runq_len, waitq_len)
+    return _event_loop
+
+def sleep(secs):
+    yield int(secs * 1000)
+
+# Implementation of sleep_ms awaitable with zero heap memory usage
+class SleepMs(SysCall1):
+
+    def __init__(self):
+        self.v = None
+        self.arg = None
+
+    def __call__(self, arg):
+        self.v = arg
+        #print("__call__")
+        return self
+
+    def __iter__(self):
+        #print("__iter__")
+        return self
+
+    def __next__(self):
+        if self.v is not None:
+            #print("__next__ syscall enter")
+            self.arg = self.v
+            self.v = None
+            return self
+        #print("__next__ syscall exit")
+        _stop_iter.__traceback__ = None
+        raise _stop_iter
+
+_stop_iter = StopIteration()
+sleep_ms = SleepMs()
+
+
+def cancel(coro):
+    prev = coro.pend_throw(CancelledError())
+    if prev is False:
+        _event_loop.call_soon(coro)
+
+
+class TimeoutObj:
+    def __init__(self, coro):
+        self.coro = coro
+
+
+def wait_for_ms(coro, timeout):
+
+    def waiter(coro, timeout_obj):
+        res = yield from coro
+        if __debug__ and DEBUG:
+            log.debug("waiter: cancelling %s", timeout_obj)
+        timeout_obj.coro = None
+        return res
+
+    def timeout_func(timeout_obj):
+        if timeout_obj.coro:
+            if __debug__ and DEBUG:
+                log.debug("timeout_func: cancelling %s", timeout_obj.coro)
+            prev = timeout_obj.coro.pend_throw(TimeoutError())
+            #print("prev pend", prev)
+            if prev is False:
+                _event_loop.call_soon(timeout_obj.coro)
+
+    timeout_obj = TimeoutObj(_event_loop.cur_task)
+    _event_loop.call_later_ms(timeout, timeout_func, timeout_obj)
+    return (yield from waiter(coro, timeout_obj))
+
+
+def wait_for(coro, timeout):
+    return wait_for_ms(coro, int(timeout * 1000))
+
+
+def coroutine(f):
+    return f
+
+#
+# The functions below are deprecated in uasyncio, and provided only
+# for compatibility with CPython asyncio
+#
+
+def ensure_future(coro, loop=_event_loop):
+    _event_loop.call_soon(coro)
+    # CPython asyncio incompatibility: we don't return Task object
+    return coro
+
+
+# CPython asyncio incompatibility: Task is a function, not a class (for efficiency)
+def Task(coro, loop=_event_loop):
+    # Same as async()
+    _event_loop.call_soon(coro)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/queues.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/queues.py
new file mode 100644
index 00000000..04918ae5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/queues.py
@@ -0,0 +1,94 @@
+from collections.deque import deque
+from uasyncio.core import sleep
+
+
+class QueueEmpty(Exception):
+    """Exception raised by get_nowait()."""
+
+
+class QueueFull(Exception):
+    """Exception raised by put_nowait()."""
+
+
+class Queue:
+    """A queue, useful for coordinating producer and consumer coroutines.
+
+    If maxsize is less than or equal to zero, the queue size is infinite. If it
+    is an integer greater than 0, then "yield from put()" will block when the
+    queue reaches maxsize, until an item is removed by get().
+
+    Unlike the standard library Queue, you can reliably know this Queue's size
+    with qsize(), since your single-threaded uasyncio application won't be
+    interrupted between calling qsize() and doing an operation on the Queue.
+    """
+    _attempt_delay = 0.1
+
+    def __init__(self, maxsize=0):
+        self.maxsize = maxsize
+        self._queue = deque()
+
+    def _get(self):
+        return self._queue.popleft()
+
+    def get(self):
+        """Returns generator, which can be used for getting (and removing)
+        an item from a queue.
+
+        Usage::
+
+            item = yield from queue.get()
+        """
+        while not self._queue:
+            yield from sleep(self._attempt_delay)
+        return self._get()
+
+    def get_nowait(self):
+        """Remove and return an item from the queue.
+
+        Return an item if one is immediately available, else raise QueueEmpty.
+        """
+        if not self._queue:
+            raise QueueEmpty()
+        return self._get()
+
+    def _put(self, val):
+        self._queue.append(val)
+
+    def put(self, val):
+        """Returns generator which can be used for putting item in a queue.
+
+        Usage::
+
+            yield from queue.put(item)
+        """
+        while self.qsize() >= self.maxsize and self.maxsize:
+            yield from sleep(self._attempt_delay)
+        self._put(val)
+
+    def put_nowait(self, val):
+        """Put an item into the queue without blocking.
+
+        If no free slot is immediately available, raise QueueFull.
+        """
+        if self.qsize() >= self.maxsize and self.maxsize:
+            raise QueueFull()
+        self._put(val)
+
+    def qsize(self):
+        """Number of items in the queue."""
+        return len(self._queue)
+
+    def empty(self):
+        """Return True if the queue is empty, False otherwise."""
+        return not self._queue
+
+    def full(self):
+        """Return True if there are maxsize items in the queue.
+
+        Note: if the Queue was initialized with maxsize=0 (the default),
+        then full() is never True.
+        """
+        if self.maxsize <= 0:
+            return False
+        else:
+            return self.qsize() >= self.maxsize
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/synchro.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/synchro.py
new file mode 100644
index 00000000..62cd93cd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/synchro.py
@@ -0,0 +1,28 @@
+from uasyncio import core
+
+class Lock:
+
+    def __init__(self):
+        self.locked = False
+        self.wlist = []
+
+    def release(self):
+        assert self.locked
+        self.locked = False
+        if self.wlist:
+            #print(self.wlist)
+            coro = self.wlist.pop(0)
+            core.get_event_loop().call_soon(coro)
+
+    def acquire(self):
+        # As release() is not coro, assume we just released and going to acquire again
+        # so, yield first to let someone else to acquire it first
+        yield
+        #print("acquire:", self.locked)
+        while 1:
+            if not self.locked:
+                self.locked = True
+                return True
+            #print("putting", core.get_event_loop().cur_task, "on waiting list")
+            self.wlist.append(core.get_event_loop().cur_task)
+            yield False
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/udp.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/udp.py
new file mode 100644
index 00000000..5987bf7d
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/udp.py
@@ -0,0 +1,57 @@
+import usocket
+from uasyncio import core
+
+
+DEBUG = 0
+log = None
+
+def set_debug(val):
+    global DEBUG, log
+    DEBUG = val
+    if val:
+        import logging
+        log = logging.getLogger("uasyncio.udp")
+
+def socket(af=usocket.AF_INET):
+    s = usocket.socket(af, usocket.SOCK_DGRAM)
+    s.setblocking(False)
+    return s
+
+def recv(s, n):
+    try:
+        yield core.IORead(s)
+        return s.recv(n)
+    except:
+        #print("recv: exc, cleaning up")
+        #print(uasyncio.core._event_loop.objmap, uasyncio.core._event_loop.poller)
+        #uasyncio.core._event_loop.poller.dump()
+        yield core.IOReadDone(s)
+        #print(uasyncio.core._event_loop.objmap)
+        #uasyncio.core._event_loop.poller.dump()
+        raise
+
+def recvfrom(s, n):
+    try:
+        yield core.IORead(s)
+        return s.recvfrom(n)
+    except:
+        #print("recv: exc, cleaning up")
+        #print(uasyncio.core._event_loop.objmap, uasyncio.core._event_loop.poller)
+        #uasyncio.core._event_loop.poller.dump()
+        yield core.IOReadDone(s)
+        #print(uasyncio.core._event_loop.objmap)
+        #uasyncio.core._event_loop.poller.dump()
+        raise
+
+def sendto(s, buf, addr=None):
+    while 1:
+        res = s.sendto(buf, addr)
+        #print("send res:", res)
+        if res == len(buf):
+            return
+        print("sendto: IOWrite")
+        yield core.IOWrite(s)
+
+def close(s):
+    yield core.IOReadDone(s)
+    s.close()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/websocket/server.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/websocket/server.py
new file mode 100644
index 00000000..046b071e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uasyncio/websocket/server.py
@@ -0,0 +1,63 @@
+import uasyncio
+import uhashlib, ubinascii
+import websocket
+
+
+def make_respkey(webkey):
+    d = uhashlib.sha1(webkey)
+    d.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+    respkey = d.digest()
+    respkey = ubinascii.b2a_base64(respkey) #[:-1]
+    # Return with trailing "\n".
+    return respkey
+
+
+class WSWriter:
+
+    def __init__(self, reader, writer):
+        # Reader is passed for symmetry with WSReader() and ignored.
+        self.s = writer
+
+    async def awrite(self, data):
+        assert len(data) < 126
+        await self.s.awrite(b"\x81")
+        await self.s.awrite(bytes([len(data)]))
+        await self.s.awrite(data)
+
+
+def WSReader(reader, writer):
+
+        webkey = None
+        while 1:
+            l = yield from reader.readline()
+            print(l)
+            if not l:
+                raise ValueError()
+            if l == b"\r\n":
+                break
+            if l.startswith(b'Sec-WebSocket-Key'):
+                webkey = l.split(b":", 1)[1]
+                webkey = webkey.strip()
+
+        if not webkey:
+            raise ValueError("Not a websocker request")
+
+        respkey = make_respkey(webkey)
+
+        await writer.awrite(b"""\
+HTTP/1.1 101 Switching Protocols\r
+Upgrade: websocket\r
+Connection: Upgrade\r
+Sec-WebSocket-Accept: """)
+        await writer.awrite(respkey)
+        # This will lead to "<key>\n\r\n" being written. Not exactly
+        # "\r\n\r\n", but browsers seem to eat it.
+        await writer.awrite("\r\n")
+        #await writer.awrite("\r\n\r\n")
+
+        print("Finished webrepl handshake")
+
+        ws = websocket.websocket(reader.ios)
+        rws = uasyncio.StreamReader(reader.ios, ws)
+
+        return rws
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucontextlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucontextlib.py
new file mode 100644
index 00000000..29445a02
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucontextlib.py
@@ -0,0 +1,106 @@
+"""Utilities for with-statement contexts.  See PEP 343.
+
+Original source code: https://hg.python.org/cpython/file/3.4/Lib/contextlib.py
+
+Not implemented:
+ - redirect_stdout;
+ - ExitStack.
+ - closing
+ - supress
+"""
+
+class ContextDecorator(object):
+    "A base class or mixin that enables context managers to work as decorators."
+
+    def _recreate_cm(self):
+        """Return a recreated instance of self.
+
+        Allows an otherwise one-shot context manager like
+        _GeneratorContextManager to support use as
+        a decorator via implicit recreation.
+
+        This is a private interface just for _GeneratorContextManager.
+        See issue #11647 for details.
+        """
+        return self
+
+    def __call__(self, func):
+        def inner(*args, **kwds):
+            with self._recreate_cm():
+                return func(*args, **kwds)
+        return inner
+
+
+class _GeneratorContextManager(ContextDecorator):
+    """Helper for @contextmanager decorator."""
+
+    def __init__(self, func, *args, **kwds):
+        self.gen = func(*args, **kwds)
+        self.func, self.args, self.kwds = func, args, kwds
+
+    def _recreate_cm(self):
+        # _GCM instances are one-shot context managers, so the
+        # CM must be recreated each time a decorated function is
+        # called
+        return self.__class__(self.func, *self.args, **self.kwds)
+
+    def __enter__(self):
+        try:
+            return next(self.gen)
+        except StopIteration:
+            raise RuntimeError("generator didn't yield") from None
+
+    def __exit__(self, type, value, traceback):
+        if type is None:
+            try:
+                next(self.gen)
+            except StopIteration:
+                return
+            else:
+                raise RuntimeError("generator didn't stop")
+        else:
+            if value is None:
+                # Need to force instantiation so we can reliably
+                # tell if we get the same exception back
+                value = type()
+            try:
+                self.gen.throw(type, value, traceback)
+                raise RuntimeError("generator didn't stop after throw()")
+            except StopIteration as exc:
+                # Suppress the exception *unless* it's the same exception that
+                # was passed to throw().  This prevents a StopIteration
+                # raised inside the "with" statement from being suppressed
+                return exc is not value
+
+
+def contextmanager(func):
+    """@contextmanager decorator.
+
+    Typical usage:
+
+        @contextmanager
+        def some_generator(<arguments>):
+            <setup>
+            try:
+                yield <value>
+            finally:
+                <cleanup>
+
+    This makes this:
+
+        with some_generator(<arguments>) as <variable>:
+            <body>
+
+    equivalent to this:
+
+        <setup>
+        try:
+            <variable> = <value>
+            <body>
+        finally:
+            <cleanup>
+
+    """
+    def helper(*args, **kwds):
+        return _GeneratorContextManager(func, *args, **kwds)
+    return helper
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucurses/__init__.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucurses/__init__.py
new file mode 100644
index 00000000..98cb9230
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/ucurses/__init__.py
@@ -0,0 +1,326 @@
+import os
+import tty, termios
+import select
+
+COLOR_BLACK    = 0
+COLOR_RED      = 1
+COLOR_GREEN    = 2
+COLOR_YELLOW   = 3
+COLOR_BLUE     = 4
+COLOR_MAGENTA  = 5
+COLOR_CYAN     = 6
+COLOR_WHITE    = 7
+
+A_NORMAL = 0
+A_BOLD = 1
+A_UNDERLINE = 2
+A_REVERSE = 4
+A_STANDOUT = A_REVERSE
+
+ATTRMAP = {
+A_NORMAL: b"\x1b[0m",
+A_BOLD: b"\x1b[1m",  # Some terminal emulators don't render bold by default, then use 4m for underline
+A_REVERSE: b"\x1b[7m",
+}
+
+# Use http://www.utf8-chartable.de/unicode-utf8-table.pl
+# for utf-8 pseudographic reference
+# "─"
+ACS_HLINE = b"\xe2\x94\x80"
+# "│"
+ACS_VLINE = b"\xe2\x94\x82"
+# "┌"
+ACS_ULCORNER = b"\xe2\x94\x8c"
+# "┐"
+ACS_URCORNER = b"\xe2\x94\x90"
+# "└"
+ACS_LLCORNER = b"\xe2\x94\x94"
+# "┘"
+ACS_LRCORNER = b"\xe2\x94\x98"
+
+KEY_F1 = 1031
+KEY_RESIZE = 1100
+KEY_MOUSE = 1101
+KEY_BTAB = 1090
+
+KEY_UP = 1001
+KEY_DOWN = 1002
+KEY_LEFT = 1003
+KEY_RIGHT = 1004
+KEY_HOME = 1005
+KEY_END = 1006
+KEY_PGUP = 1007
+KEY_PGDN = 1008
+KEY_QUIT = 1009
+KEY_ENTER = 1010
+KEY_BACKSPACE = 1011
+KEY_DELETE = 1012
+KEY_ESC = 0x1b
+
+KEY_DC = KEY_DELETE
+KEY_PPAGE = KEY_PGUP
+KEY_NPAGE = KEY_PGDN
+
+KEYMAP = {
+b"\x1b[A": KEY_UP,
+b"\x1b[B": KEY_DOWN,
+b"\x1b[D": KEY_LEFT,
+b"\x1b[C": KEY_RIGHT,
+b"\x1bOH": KEY_HOME,
+b"\x1bOF": KEY_END,
+b"\x1b[1~": KEY_HOME,
+b"\x1b[4~": KEY_END,
+b"\x1b[5~": KEY_PGUP,
+b"\x1b[6~": KEY_PGDN,
+b"\x03": KEY_QUIT,
+b"\r": KEY_ENTER,
+b"\x7f": KEY_BACKSPACE,
+b"\x1b[3~": KEY_DELETE,
+
+b"\x1bOA": KEY_UP,
+b"\x1bOB": KEY_DOWN,
+b"\x1bOD": KEY_LEFT,
+b"\x1bOC": KEY_RIGHT,
+b"\x1bOP": KEY_F1,
+b"\x1b": KEY_ESC,
+
+b"\x1b[Z": KEY_BTAB,
+}
+
+ALL_MOUSE_EVENTS = 0xff
+
+
+def _wr(s):
+    # TODO: When Python is 3.5, update this to use only bytes
+    if isinstance(s, str):
+        s = bytes(s, "utf-8")
+    os.write(1, s)
+
+def _move(row, col):
+    # TODO: When Python is 3.5, update this to use bytes
+    _wr("\x1b[%d;%dH" % (row + 1, col + 1))
+
+# Clear specified number of positions
+def _clear_num_pos(num):
+    if num > 0:
+        _wr("\x1b[%dX" % num)
+
+def _draw_box(left, top, width, height):
+    bottom = top + height - 1
+    _move(top, left)
+    _wr(ACS_ULCORNER)
+    hor = ACS_HLINE * (width - 2)
+    _wr(hor)
+    _wr(ACS_URCORNER)
+
+    _move(bottom, left)
+    _wr(ACS_LLCORNER)
+    _wr(hor)
+    _wr(ACS_LRCORNER)
+
+    top += 1
+    while top < bottom:
+        _move(top, left)
+        _wr(ACS_VLINE)
+        _move(top, left + width - 1)
+        _wr(ACS_VLINE)
+        top += 1
+
+
+class error(Exception):
+    pass
+
+
+class Window:
+
+    def __init__(self, lines, cols, y, x):
+        self.lines = lines
+        self.cols = cols
+        self.y = y
+        self.x = x
+        self.bkgattr = A_NORMAL
+        self.keybuf = None
+        self.keyi = 0
+        self.keydelay = -1
+
+    def _goto(self, row, col):
+        _move(self.y + row, self.x + col)
+
+    def move(self, y, x):
+        # Maybe need to cache coords?
+        self._goto(y, x)
+
+    def getmaxyx(self):
+        return (self.lines, self.cols)
+
+    def addstr(self, y, x, str, attr=A_NORMAL):
+        self._goto(y, x)
+        # TODO: Should be "ORed"
+        if attr == A_NORMAL:
+            attr = self.bkgattr
+        if attr != A_NORMAL:
+            _wr(ATTRMAP[attr])
+            _wr(str)
+            _wr(ATTRMAP[A_NORMAL])
+        else:
+            _wr(str)
+
+    def addnstr(self, y, x, str, n, attr=A_NORMAL):
+        self.addstr(y, x, str[:n], attr)
+
+    def addch(self, y, x, ch, attr=A_NORMAL):
+        if isinstance(ch, int):
+            ch = chr(ch)
+        self.addstr(y, x, ch, attr)
+
+    def attron(self, attr):
+        pass
+
+    def attroff(self, attr):
+        pass
+
+    def attrset(self, attr):
+        pass
+
+    def bkgdset(self, ch, attr=A_NORMAL):
+        self.bkgattr = attr
+
+    def erase(self):
+        for i in range(self.lines):
+            self._goto(i, 0)
+            _clear_num_pos(self.cols)
+
+    def border(self):
+        _draw_box(self.x, self.y, self.cols, self.lines)
+
+    def hline(self, y, x, ch, n):
+        self.move(y, x)
+        _wr(ch * n)
+
+    def vline(self, y, x, ch, n):
+        for i in range(n):
+            self.move(y + i, x)
+            _wr(ch)
+
+    def refresh(self):
+        pass
+
+    def redrawwin(self):
+        pass
+
+    def keypad(self, yes):
+        pass
+
+    def timeout(self, delay):
+        self.keydelay = delay
+
+    def nodelay(self, yes):
+        if yes:
+            self.keydelay = 0
+        else:
+            self.keydelay = -1
+
+    def getch(self):
+        if self.keybuf and self.keyi < len(self.keybuf):
+            c = self.keybuf[self.keyi]
+            self.keyi += 1
+            return c
+
+        if self.keydelay >= 0:
+            USE_EPOLL = 1
+            if USE_EPOLL:
+                poll = select.epoll()
+                poll.register(0, select.EPOLLIN)
+                res = poll.poll(self.keydelay / 1000)
+                poll.unregister(0)
+                poll.close()
+            else:
+                res = select.select([0], [], [], self.keydelay / 1000)[0]
+            if not res:
+                return -1
+
+        key = os.read(0, 32)
+        if key[0] != 0x1b:
+            self.keybuf = key
+            self.keyi = 1
+            key = key[0]
+
+        else:
+            if key in KEYMAP:
+                key = KEYMAP[key]
+            else:
+                assert False, repr(key)
+        return key
+
+
+SCREEN = Window(24, 80, 0, 0)
+org_termios = None
+
+
+def wrapper(func):
+    global org_termios
+    org_termios = termios.tcgetattr(0)
+    res = func()
+    endwin()
+    return res
+
+def initscr():
+    global org_termios
+    org_termios = termios.tcgetattr(0)
+    return SCREEN
+
+def doupdate():
+    pass
+
+def endwin():
+    global org_termios
+    _wr(b"\r")
+    termios.tcsetattr(0, termios.TCSANOW, org_termios)
+
+def raw():
+    tty.setraw(0)
+
+def cbreak():
+    #TODO
+    pass
+
+def nocbreak():
+    #TODO
+    pass
+
+def echo():
+    #TODO
+    pass
+
+def noecho():
+    #TODO
+    pass
+
+def meta(yes):
+    #TODO
+    pass
+
+def mousemask(mask):
+    # Mouse reporting - X10 compatbility mode
+    _wr(b"\x1b[?9h")
+
+def has_colors():
+    return False
+
+def can_change_color():
+    return False
+
+def curs_set(visibility):
+    if visibility > 0:
+        _wr(b"\x1b[?25h")
+    else:
+        _wr(b"\x1b[?25l")
+
+def beep():
+    _wr(b"\x07")
+
+def newwin(lines, cols, y=0, x=0):
+    #print("newwin(%d, %d, %d, %d)" % (lines, cols, y, x))
+    cols = cols or SCREEN.cols
+    lines = lines or SCREEN.lines
+    return Window(lines, cols, y, x)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/udnspkt.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/udnspkt.py
new file mode 100644
index 00000000..4b798bf1
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/udnspkt.py
@@ -0,0 +1,78 @@
+import uio
+
+
+def write_fqdn(buf, name):
+    parts = name.split(".")
+    for p in parts:
+        buf.writebin("B", len(p))
+        buf.write(p)
+    buf.writebin("B", 0)
+
+
+def skip_fqdn(buf):
+    while True:
+        sz = buf.readbin("B")
+        if not sz:
+            break
+        if sz >= 0xc0:
+            buf.readbin("B")
+            break
+        buf.read(sz)
+
+
+def make_req(buf, fqdn, is_ipv6):
+    typ = 1  # A
+    if is_ipv6:
+        typ = 28  # AAAA
+
+    buf.writebin(">H", 0)
+    buf.writebin(">H", 0x100)
+    # q count
+    buf.writebin(">H", 1)
+    buf.writebin(">H", 0)
+    # squashed together
+    buf.writebin(">I", 0)
+
+    write_fqdn(buf, fqdn)
+    buf.writebin(">H", typ)
+    buf.writebin(">H", 1)  # Class
+
+
+def parse_resp(buf, is_ipv6):
+    typ = 1  # A
+    if is_ipv6:
+        typ = 28  # AAAA
+
+    id = buf.readbin(">H")
+    flags = buf.readbin(">H")
+    assert flags & 0x8000
+    qcnt = buf.readbin(">H")
+    acnt = buf.readbin(">H")
+    nscnt = buf.readbin(">H")
+    addcnt = buf.readbin(">H")
+    #print(qcnt, acnt, nscnt, addcnt)
+
+    skip_fqdn(buf)
+    v = buf.readbin(">H")
+    #print(v)
+    v = buf.readbin(">H")
+    #print(v)
+
+    for i in range(acnt):
+        #print("Resp #%d" % i)
+        #v = read_fqdn(buf)
+        #print(v)
+        skip_fqdn(buf)
+        t = buf.readbin(">H")
+        #print("Type", t)
+        v = buf.readbin(">H")
+        #print("Class", v)
+        v = buf.readbin(">I")
+        #print("TTL", v)
+        rlen = buf.readbin(">H")
+        #print("rlen", rlen)
+        rval = buf.read(rlen)
+        #print(rval)
+
+        if t == typ:
+            return rval
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/robust.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/robust.py
new file mode 100644
index 00000000..7ee40e02
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/robust.py
@@ -0,0 +1,43 @@
+import utime
+from . import simple
+
+class MQTTClient(simple.MQTTClient):
+
+    DELAY = 2
+    DEBUG = False
+
+    def delay(self, i):
+        utime.sleep(self.DELAY)
+
+    def log(self, in_reconnect, e):
+        if self.DEBUG:
+            if in_reconnect:
+                print("mqtt reconnect: %r" % e)
+            else:
+                print("mqtt: %r" % e)
+
+    def reconnect(self):
+        i = 0
+        while 1:
+            try:
+                return super().connect(False)
+            except OSError as e:
+                self.log(True, e)
+                i += 1
+                self.delay(i)
+
+    def publish(self, topic, msg, retain=False, qos=0):
+        while 1:
+            try:
+                return super().publish(topic, msg, retain, qos)
+            except OSError as e:
+                self.log(False, e)
+            self.reconnect()
+
+    def wait_msg(self):
+        while 1:
+            try:
+                return super().wait_msg()
+            except OSError as e:
+                self.log(False, e)
+            self.reconnect()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/simple.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/simple.py
new file mode 100644
index 00000000..8216fa5e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/umqtt/simple.py
@@ -0,0 +1,204 @@
+import usocket as socket
+import ustruct as struct
+from ubinascii import hexlify
+
+class MQTTException(Exception):
+    pass
+
+class MQTTClient:
+
+    def __init__(self, client_id, server, port=0, user=None, password=None, keepalive=0,
+                 ssl=False, ssl_params={}):
+        if port == 0:
+            port = 8883 if ssl else 1883
+        self.client_id = client_id
+        self.sock = None
+        self.server = server
+        self.port = port
+        self.ssl = ssl
+        self.ssl_params = ssl_params
+        self.pid = 0
+        self.cb = None
+        self.user = user
+        self.pswd = password
+        self.keepalive = keepalive
+        self.lw_topic = None
+        self.lw_msg = None
+        self.lw_qos = 0
+        self.lw_retain = False
+
+    def _send_str(self, s):
+        self.sock.write(struct.pack("!H", len(s)))
+        self.sock.write(s)
+
+    def _recv_len(self):
+        n = 0
+        sh = 0
+        while 1:
+            b = self.sock.read(1)[0]
+            n |= (b & 0x7f) << sh
+            if not b & 0x80:
+                return n
+            sh += 7
+
+    def set_callback(self, f):
+        self.cb = f
+
+    def set_last_will(self, topic, msg, retain=False, qos=0):
+        assert 0 <= qos <= 2
+        assert topic
+        self.lw_topic = topic
+        self.lw_msg = msg
+        self.lw_qos = qos
+        self.lw_retain = retain
+
+    def connect(self, clean_session=True):
+        self.sock = socket.socket()
+        addr = socket.getaddrinfo(self.server, self.port)[0][-1]
+        self.sock.connect(addr)
+        if self.ssl:
+            import ussl
+            self.sock = ussl.wrap_socket(self.sock, **self.ssl_params)
+        premsg = bytearray(b"\x10\0\0\0\0\0")
+        msg = bytearray(b"\x04MQTT\x04\x02\0\0")
+
+        sz = 10 + 2 + len(self.client_id)
+        msg[6] = clean_session << 1
+        if self.user is not None:
+            sz += 2 + len(self.user) + 2 + len(self.pswd)
+            msg[6] |= 0xC0
+        if self.keepalive:
+            assert self.keepalive < 65536
+            msg[7] |= self.keepalive >> 8
+            msg[8] |= self.keepalive & 0x00FF
+        if self.lw_topic:
+            sz += 2 + len(self.lw_topic) + 2 + len(self.lw_msg)
+            msg[6] |= 0x4 | (self.lw_qos & 0x1) << 3 | (self.lw_qos & 0x2) << 3
+            msg[6] |= self.lw_retain << 5
+
+        i = 1
+        while sz > 0x7f:
+            premsg[i] = (sz & 0x7f) | 0x80
+            sz >>= 7
+            i += 1
+        premsg[i] = sz
+
+        self.sock.write(premsg, i + 2)
+        self.sock.write(msg)
+        #print(hex(len(msg)), hexlify(msg, ":"))
+        self._send_str(self.client_id)
+        if self.lw_topic:
+            self._send_str(self.lw_topic)
+            self._send_str(self.lw_msg)
+        if self.user is not None:
+            self._send_str(self.user)
+            self._send_str(self.pswd)
+        resp = self.sock.read(4)
+        assert resp[0] == 0x20 and resp[1] == 0x02
+        if resp[3] != 0:
+            raise MQTTException(resp[3])
+        return resp[2] & 1
+
+    def disconnect(self):
+        self.sock.write(b"\xe0\0")
+        self.sock.close()
+
+    def ping(self):
+        self.sock.write(b"\xc0\0")
+
+    def publish(self, topic, msg, retain=False, qos=0):
+        pkt = bytearray(b"\x30\0\0\0")
+        pkt[0] |= qos << 1 | retain
+        sz = 2 + len(topic) + len(msg)
+        if qos > 0:
+            sz += 2
+        assert sz < 2097152
+        i = 1
+        while sz > 0x7f:
+            pkt[i] = (sz & 0x7f) | 0x80
+            sz >>= 7
+            i += 1
+        pkt[i] = sz
+        #print(hex(len(pkt)), hexlify(pkt, ":"))
+        self.sock.write(pkt, i + 1)
+        self._send_str(topic)
+        if qos > 0:
+            self.pid += 1
+            pid = self.pid
+            struct.pack_into("!H", pkt, 0, pid)
+            self.sock.write(pkt, 2)
+        self.sock.write(msg)
+        if qos == 1:
+            while 1:
+                op = self.wait_msg()
+                if op == 0x40:
+                    sz = self.sock.read(1)
+                    assert sz == b"\x02"
+                    rcv_pid = self.sock.read(2)
+                    rcv_pid = rcv_pid[0] << 8 | rcv_pid[1]
+                    if pid == rcv_pid:
+                        return
+        elif qos == 2:
+            assert 0
+
+    def subscribe(self, topic, qos=0):
+        assert self.cb is not None, "Subscribe callback is not set"
+        pkt = bytearray(b"\x82\0\0\0")
+        self.pid += 1
+        struct.pack_into("!BH", pkt, 1, 2 + 2 + len(topic) + 1, self.pid)
+        #print(hex(len(pkt)), hexlify(pkt, ":"))
+        self.sock.write(pkt)
+        self._send_str(topic)
+        self.sock.write(qos.to_bytes(1, "little"))
+        while 1:
+            op = self.wait_msg()
+            if op == 0x90:
+                resp = self.sock.read(4)
+                #print(resp)
+                assert resp[1] == pkt[2] and resp[2] == pkt[3]
+                if resp[3] == 0x80:
+                    raise MQTTException(resp[3])
+                return
+
+    # Wait for a single incoming MQTT message and process it.
+    # Subscribed messages are delivered to a callback previously
+    # set by .set_callback() method. Other (internal) MQTT
+    # messages processed internally.
+    def wait_msg(self):
+        res = self.sock.read(1)
+        self.sock.setblocking(True)
+        if res is None:
+            return None
+        if res == b"":
+            raise OSError(-1)
+        if res == b"\xd0":  # PINGRESP
+            sz = self.sock.read(1)[0]
+            assert sz == 0
+            return None
+        op = res[0]
+        if op & 0xf0 != 0x30:
+            return op
+        sz = self._recv_len()
+        topic_len = self.sock.read(2)
+        topic_len = (topic_len[0] << 8) | topic_len[1]
+        topic = self.sock.read(topic_len)
+        sz -= topic_len + 2
+        if op & 6:
+            pid = self.sock.read(2)
+            pid = pid[0] << 8 | pid[1]
+            sz -= 2
+        msg = self.sock.read(sz)
+        self.cb(topic, msg)
+        if op & 6 == 2:
+            pkt = bytearray(b"\x40\x02\0\0")
+            struct.pack_into("!H", pkt, 2, pid)
+            self.sock.write(pkt)
+        elif op & 6 == 4:
+            assert 0
+
+    # Checks whether a pending message from server is available.
+    # If not, returns immediately with None. Otherwise, does
+    # the same processing as wait_msg.
+    def check_msg(self):
+        self.sock.setblocking(False)
+        return self.wait_msg()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unicodedata.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unicodedata.py
new file mode 100644
index 00000000..2b6cfd7e
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unicodedata.py
@@ -0,0 +1,6 @@
+def east_asian_width(c):
+    return 1
+
+
+def normalize(form, unistr):
+    return unistr
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unittest.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unittest.py
new file mode 100644
index 00000000..0361c864
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/unittest.py
@@ -0,0 +1,224 @@
+import sys
+
+
+class SkipTest(Exception):
+    pass
+
+
+class AssertRaisesContext:
+
+    def __init__(self, exc):
+        self.expected = exc
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        if exc_type is None:
+            assert False, "%r not raised" % self.expected
+        if issubclass(exc_type, self.expected):
+            return True
+        return False
+
+
+class TestCase:
+
+    def fail(self, msg=''):
+        assert False, msg
+
+    def assertEqual(self, x, y, msg=''):
+        if not msg:
+            msg = "%r vs (expected) %r" % (x, y)
+        assert x == y, msg
+
+    def assertNotEqual(self, x, y, msg=''):
+        if not msg:
+            msg = "%r not expected to be equal %r" % (x, y)
+        assert x != y, msg
+
+    def assertAlmostEqual(self, x, y, places=None, msg='', delta=None):
+        if x == y:
+            return
+        if delta is not None and places is not None:
+            raise TypeError("specify delta or places not both")
+
+        if delta is not None:
+            if abs(x - y) <= delta:
+                return
+            if not msg:
+                msg = '%r != %r within %r delta' % (x, y, delta)
+        else:
+            if places is None:
+                places = 7
+            if round(abs(y-x), places) == 0:
+                return
+            if not msg:
+                msg = '%r != %r within %r places' % (x, y, places)
+
+        assert False, msg
+
+    def assertNotAlmostEqual(self, x, y, places=None, msg='', delta=None):
+        if delta is not None and places is not None:
+            raise TypeError("specify delta or places not both")
+
+        if delta is not None:
+            if not (x == y) and abs(x - y) > delta:
+                return
+            if not msg:
+                msg = '%r == %r within %r delta' % (x, y, delta)
+        else:
+            if places is None:
+                places = 7
+            if not (x == y) and round(abs(y-x), places) != 0:
+                return
+            if not msg:
+                msg = '%r == %r within %r places' % (x, y, places)
+
+        assert False, msg
+
+    def assertIs(self, x, y, msg=''):
+        if not msg:
+            msg = "%r is not %r" % (x, y)
+        assert x is y, msg
+
+    def assertIsNot(self, x, y, msg=''):
+        if not msg:
+            msg = "%r is %r" % (x, y)
+        assert x is not y, msg
+
+    def assertIsNone(self, x, msg=''):
+        if not msg:
+            msg = "%r is not None" % x
+        assert x is None, msg
+
+    def assertIsNotNone(self, x, msg=''):
+        if not msg:
+            msg = "%r is None" % x
+        assert x is not None, msg
+
+    def assertTrue(self, x, msg=''):
+        if not msg:
+            msg = "Expected %r to be True" % x
+        assert x, msg
+
+    def assertFalse(self, x, msg=''):
+        if not msg:
+            msg = "Expected %r to be False" % x
+        assert not x, msg
+
+    def assertIn(self, x, y, msg=''):
+        if not msg:
+            msg = "Expected %r to be in %r" % (x, y)
+        assert x in y, msg
+
+    def assertIsInstance(self, x, y, msg=''):
+        assert isinstance(x, y), msg
+
+    def assertRaises(self, exc, func=None, *args, **kwargs):
+        if func is None:
+            return AssertRaisesContext(exc)
+
+        try:
+            func(*args, **kwargs)
+            assert False, "%r not raised" % exc
+        except Exception as e:
+            if isinstance(e, exc):
+                return
+            raise
+
+
+
+def skip(msg):
+    def _decor(fun):
+        # We just replace original fun with _inner
+        def _inner(self):
+            raise SkipTest(msg)
+        return _inner
+    return _decor
+
+def skipIf(cond, msg):
+    if not cond:
+        return lambda x: x
+    return skip(msg)
+
+def skipUnless(cond, msg):
+    if cond:
+        return lambda x: x
+    return skip(msg)
+
+
+class TestSuite:
+    def __init__(self):
+        self.tests = []
+    def addTest(self, cls):
+        self.tests.append(cls)
+
+class TestRunner:
+    def run(self, suite):
+        res = TestResult()
+        for c in suite.tests:
+            run_class(c, res)
+
+        print("Ran %d tests\n" % res.testsRun)
+        if res.failuresNum > 0 or res.errorsNum > 0:
+            print("FAILED (failures=%d, errors=%d)" % (res.failuresNum, res.errorsNum))
+        else:
+            msg = "OK"
+            if res.skippedNum > 0:
+                msg += " (%d skipped)" % res.skippedNum
+            print(msg)
+
+        return res
+
+class TestResult:
+    def __init__(self):
+        self.errorsNum = 0
+        self.failuresNum = 0
+        self.skippedNum = 0
+        self.testsRun = 0
+
+    def wasSuccessful(self):
+        return self.errorsNum == 0 and self.failuresNum == 0
+
+# TODO: Uncompliant
+def run_class(c, test_result):
+    o = c()
+    set_up = getattr(o, "setUp", lambda: None)
+    tear_down = getattr(o, "tearDown", lambda: None)
+    for name in dir(o):
+        if name.startswith("test"):
+            print("%s (%s) ..." % (name, c.__qualname__), end="")
+            m = getattr(o, name)
+            set_up()
+            try:
+                test_result.testsRun += 1
+                m()
+                print(" ok")
+            except SkipTest as e:
+                print(" skipped:", e.args[0])
+                test_result.skippedNum += 1
+            except:
+                print(" FAIL")
+                test_result.failuresNum += 1
+                # Uncomment to investigate failure in detail
+                #raise
+                continue
+            finally:
+                tear_down()
+
+
+def main(module="__main__"):
+    def test_cases(m):
+        for tn in dir(m):
+            c = getattr(m, tn)
+            if isinstance(c, object) and isinstance(c, type) and issubclass(c, TestCase):
+                yield c
+
+    m = __import__(module)
+    suite = TestSuite()
+    for c in test_cases(m):
+        suite.addTest(c)
+    runner = TestRunner()
+    result = runner.run(suite)
+    # Terminate with non zero return code in case of failures
+    sys.exit(result.failuresNum > 0)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip.py
new file mode 100644
index 00000000..a400c317
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip.py
@@ -0,0 +1,314 @@
+#
+# upip - Package manager for MicroPython
+#
+# Copyright (c) 2015-2018 Paul Sokolovsky
+#
+# Licensed under the MIT license.
+#
+import sys
+import gc
+import uos as os
+import uerrno as errno
+import ujson as json
+import uzlib
+import upip_utarfile as tarfile
+gc.collect()
+
+
+debug = False
+install_path = None
+cleanup_files = []
+gzdict_sz = 16 + 15
+
+file_buf = bytearray(512)
+
+class NotFoundError(Exception):
+    pass
+
+def op_split(path):
+    if path == "":
+        return ("", "")
+    r = path.rsplit("/", 1)
+    if len(r) == 1:
+        return ("", path)
+    head = r[0]
+    if not head:
+        head = "/"
+    return (head, r[1])
+
+def op_basename(path):
+    return op_split(path)[1]
+
+# Expects *file* name
+def _makedirs(name, mode=0o777):
+    ret = False
+    s = ""
+    comps = name.rstrip("/").split("/")[:-1]
+    if comps[0] == "":
+        s = "/"
+    for c in comps:
+        if s and s[-1] != "/":
+            s += "/"
+        s += c
+        try:
+            os.mkdir(s)
+            ret = True
+        except OSError as e:
+            if e.args[0] != errno.EEXIST and e.args[0] != errno.EISDIR:
+                raise
+            ret = False
+    return ret
+
+
+def save_file(fname, subf):
+    global file_buf
+    with open(fname, "wb") as outf:
+        while True:
+            sz = subf.readinto(file_buf)
+            if not sz:
+                break
+            outf.write(file_buf, sz)
+
+def install_tar(f, prefix):
+    meta = {}
+    for info in f:
+        #print(info)
+        fname = info.name
+        try:
+            fname = fname[fname.index("/") + 1:]
+        except ValueError:
+            fname = ""
+
+        save = True
+        for p in ("setup.", "PKG-INFO", "README"):
+                #print(fname, p)
+                if fname.startswith(p) or ".egg-info" in fname:
+                    if fname.endswith("/requires.txt"):
+                        meta["deps"] = f.extractfile(info).read()
+                    save = False
+                    if debug:
+                        print("Skipping", fname)
+                    break
+
+        if save:
+            outfname = prefix + fname
+            if info.type != tarfile.DIRTYPE:
+                if debug:
+                    print("Extracting " + outfname)
+                _makedirs(outfname)
+                subf = f.extractfile(info)
+                save_file(outfname, subf)
+    return meta
+
+def expandhome(s):
+    if "~/" in s:
+        h = os.getenv("HOME")
+        s = s.replace("~/", h + "/")
+    return s
+
+import ussl
+import usocket
+warn_ussl = True
+def url_open(url):
+    global warn_ussl
+
+    if debug:
+        print(url)
+
+    proto, _, host, urlpath = url.split('/', 3)
+    try:
+        ai = usocket.getaddrinfo(host, 443, 0, usocket.SOCK_STREAM)
+    except OSError as e:
+        fatal("Unable to resolve %s (no Internet?)" % host, e)
+    #print("Address infos:", ai)
+    ai = ai[0]
+
+    s = usocket.socket(ai[0], ai[1], ai[2])
+    try:
+        #print("Connect address:", addr)
+        s.connect(ai[-1])
+
+        if proto == "https:":
+            s = ussl.wrap_socket(s, server_hostname=host)
+            if warn_ussl:
+                print("Warning: %s SSL certificate is not validated" % host)
+                warn_ussl = False
+
+        # MicroPython rawsocket module supports file interface directly
+        s.write("GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n" % (urlpath, host))
+        l = s.readline()
+        protover, status, msg = l.split(None, 2)
+        if status != b"200":
+            if status == b"404" or status == b"301":
+                raise NotFoundError("Package not found")
+            raise ValueError(status)
+        while 1:
+            l = s.readline()
+            if not l:
+                raise ValueError("Unexpected EOF in HTTP headers")
+            if l == b'\r\n':
+                break
+    except Exception as e:
+        s.close()
+        raise e
+
+    return s
+
+
+def get_pkg_metadata(name):
+    f = url_open("https://pypi.org/pypi/%s/json" % name)
+    try:
+        return json.load(f)
+    finally:
+        f.close()
+
+
+def fatal(msg, exc=None):
+    print("Error:", msg)
+    if exc and debug:
+        raise exc
+    sys.exit(1)
+
+def install_pkg(pkg_spec, install_path):
+    data = get_pkg_metadata(pkg_spec)
+
+    latest_ver = data["info"]["version"]
+    packages = data["releases"][latest_ver]
+    del data
+    gc.collect()
+    assert len(packages) == 1
+    package_url = packages[0]["url"]
+    print("Installing %s %s from %s" % (pkg_spec, latest_ver, package_url))
+    package_fname = op_basename(package_url)
+    f1 = url_open(package_url)
+    try:
+        f2 = uzlib.DecompIO(f1, gzdict_sz)
+        f3 = tarfile.TarFile(fileobj=f2)
+        meta = install_tar(f3, install_path)
+    finally:
+        f1.close()
+    del f3
+    del f2
+    gc.collect()
+    return meta
+
+def install(to_install, install_path=None):
+    # Calculate gzip dictionary size to use
+    global gzdict_sz
+    sz = gc.mem_free() + gc.mem_alloc()
+    if sz <= 65536:
+        gzdict_sz = 16 + 12
+
+    if install_path is None:
+        install_path = get_install_path()
+    if install_path[-1] != "/":
+        install_path += "/"
+    if not isinstance(to_install, list):
+        to_install = [to_install]
+    print("Installing to: " + install_path)
+    # sets would be perfect here, but don't depend on them
+    installed = []
+    try:
+        while to_install:
+            if debug:
+                print("Queue:", to_install)
+            pkg_spec = to_install.pop(0)
+            if pkg_spec in installed:
+                continue
+            meta = install_pkg(pkg_spec, install_path)
+            installed.append(pkg_spec)
+            if debug:
+                print(meta)
+            deps = meta.get("deps", "").rstrip()
+            if deps:
+                deps = deps.decode("utf-8").split("\n")
+                to_install.extend(deps)
+    except Exception as e:
+        print("Error installing '{}': {}, packages may be partially installed".format(
+                pkg_spec, e),
+            file=sys.stderr)
+
+def get_install_path():
+    global install_path
+    if install_path is None:
+        # sys.path[0] is current module's path
+        install_path = sys.path[1]
+    install_path = expandhome(install_path)
+    return install_path
+
+def cleanup():
+    for fname in cleanup_files:
+        try:
+            os.unlink(fname)
+        except OSError:
+            print("Warning: Cannot delete " + fname)
+
+def help():
+    print("""\
+upip - Simple PyPI package manager for MicroPython
+Usage: micropython -m upip install [-p <path>] <package>... | -r <requirements.txt>
+import upip; upip.install(package_or_list, [<path>])
+
+If <path> is not given, packages will be installed into sys.path[1]
+(can be set from MICROPYPATH environment variable, if current system
+supports that).""")
+    print("Current value of sys.path[1]:", sys.path[1])
+    print("""\
+
+Note: only MicroPython packages (usually, named micropython-*) are supported
+for installation, upip does not support arbitrary code in setup.py.
+""")
+
+def main():
+    global debug
+    global install_path
+    install_path = None
+
+    if len(sys.argv) < 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
+        help()
+        return
+
+    if sys.argv[1] != "install":
+        fatal("Only 'install' command supported")
+
+    to_install = []
+
+    i = 2
+    while i < len(sys.argv) and sys.argv[i][0] == "-":
+        opt = sys.argv[i]
+        i += 1
+        if opt == "-h" or opt == "--help":
+            help()
+            return
+        elif opt == "-p":
+            install_path = sys.argv[i]
+            i += 1
+        elif opt == "-r":
+            list_file = sys.argv[i]
+            i += 1
+            with open(list_file) as f:
+                while True:
+                    l = f.readline()
+                    if not l:
+                        break
+                    if l[0] == "#":
+                        continue
+                    to_install.append(l.rstrip())
+        elif opt == "--debug":
+            debug = True
+        else:
+            fatal("Unknown/unsupported option: " + opt)
+
+    to_install.extend(sys.argv[i:])
+    if not to_install:
+        help()
+        return
+
+    install(to_install)
+
+    if not debug:
+        cleanup()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip_utarfile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip_utarfile.py
new file mode 100644
index 00000000..460ca2cd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upip_utarfile.py
@@ -0,0 +1,94 @@
+import uctypes
+
+# http://www.gnu.org/software/tar/manual/html_node/Standard.html
+TAR_HEADER = {
+    "name": (uctypes.ARRAY | 0, uctypes.UINT8 | 100),
+    "size": (uctypes.ARRAY | 124, uctypes.UINT8 | 11),
+}
+
+DIRTYPE = "dir"
+REGTYPE = "file"
+
+def roundup(val, align):
+    return (val + align - 1) & ~(align - 1)
+
+class FileSection:
+
+    def __init__(self, f, content_len, aligned_len):
+        self.f = f
+        self.content_len = content_len
+        self.align = aligned_len - content_len
+
+    def read(self, sz=65536):
+        if self.content_len == 0:
+            return b""
+        if sz > self.content_len:
+            sz = self.content_len
+        data = self.f.read(sz)
+        sz = len(data)
+        self.content_len -= sz
+        return data
+
+    def readinto(self, buf):
+        if self.content_len == 0:
+            return 0
+        if len(buf) > self.content_len:
+            buf = memoryview(buf)[:self.content_len]
+        sz = self.f.readinto(buf)
+        self.content_len -= sz
+        return sz
+
+    def skip(self):
+        sz = self.content_len + self.align
+        if sz:
+            buf = bytearray(16)
+            while sz:
+                s = min(sz, 16)
+                self.f.readinto(buf, s)
+                sz -= s
+
+class TarInfo:
+
+    def __str__(self):
+        return "TarInfo(%r, %s, %d)" % (self.name, self.type, self.size)
+
+class TarFile:
+
+    def __init__(self, name=None, fileobj=None):
+        if fileobj:
+            self.f = fileobj
+        else:
+            self.f = open(name, "rb")
+        self.subf = None
+
+    def next(self):
+            if self.subf:
+                self.subf.skip()
+            buf = self.f.read(512)
+            if not buf:
+                return None
+
+            h = uctypes.struct(uctypes.addressof(buf), TAR_HEADER, uctypes.LITTLE_ENDIAN)
+
+            # Empty block means end of archive
+            if h.name[0] == 0:
+                return None
+
+            d = TarInfo()
+            d.name = str(h.name, "utf-8").rstrip("\0")
+            d.size = int(bytes(h.size), 8)
+            d.type = [REGTYPE, DIRTYPE][d.name[-1] == "/"]
+            self.subf = d.subf = FileSection(self.f, d.size, roundup(d.size, 512))
+            return d
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        v = self.next()
+        if v is None:
+            raise StopIteration
+        return v
+
+    def extractfile(self, tarinfo):
+        return tarinfo.subf
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upysh.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upysh.py
new file mode 100644
index 00000000..250c5747
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/upysh.py
@@ -0,0 +1,84 @@
+import sys
+import os
+
+class LS:
+
+    def __repr__(self):
+        self.__call__()
+        return ""
+
+    def __call__(self, path="."):
+        l = os.listdir(path)
+        l.sort()
+        for f in l:
+            st = os.stat("%s/%s" % (path, f))
+            if st[0] & 0x4000:  # stat.S_IFDIR
+                print("   <dir> %s" % f)
+            else:
+                print("% 8d %s" % (st[6], f))
+
+class PWD:
+
+    def __repr__(self):
+        return os.getcwd()
+
+    def __call__(self):
+        return self.__repr__()
+
+class CLEAR:
+    def __repr__(self):
+        return "\x1b[2J\x1b[H"
+
+    def __call__(self):
+        return self.__repr__()
+
+
+pwd = PWD()
+ls = LS()
+clear = CLEAR()
+
+cd = os.chdir
+mkdir = os.mkdir
+mv = os.rename
+rm = os.remove
+rmdir = os.rmdir
+
+def head(f, n=10):
+    with open(f) as f:
+        for i in range(n):
+            l = f.readline()
+            if not l: break
+            sys.stdout.write(l)
+
+def cat(f):
+    head(f, 1 << 30)
+
+def newfile(path):
+    print("Type file contents line by line, finish with EOF (Ctrl+D).")
+    with open(path, "w") as f:
+        while 1:
+            try:
+                l = input()
+            except EOFError:
+                break
+            f.write(l)
+            f.write("\n")
+
+class Man():
+
+    def __repr__(self):
+        return("""
+upysh is intended to be imported using:
+from upysh import *
+
+To see this help text again, type "man".
+
+upysh commands:
+pwd, cd("new_dir"), ls, ls(...), head(...), cat(...)
+newfile(...), mv("old", "new"), rm(...), mkdir(...), rmdir(...),
+clear
+""")
+
+man = Man()
+
+print(man)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urequests.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urequests.py
new file mode 100644
index 00000000..acb220e8
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urequests.py
@@ -0,0 +1,124 @@
+import usocket
+
+class Response:
+
+    def __init__(self, f):
+        self.raw = f
+        self.encoding = "utf-8"
+        self._cached = None
+
+    def close(self):
+        if self.raw:
+            self.raw.close()
+            self.raw = None
+        self._cached = None
+
+    @property
+    def content(self):
+        if self._cached is None:
+            try:
+                self._cached = self.raw.read()
+            finally:
+                self.raw.close()
+                self.raw = None
+        return self._cached
+
+    @property
+    def text(self):
+        return str(self.content, self.encoding)
+
+    def json(self):
+        import ujson
+        return ujson.loads(self.content)
+
+
+def request(method, url, data=None, json=None, headers={}, stream=None):
+    try:
+        proto, dummy, host, path = url.split("/", 3)
+    except ValueError:
+        proto, dummy, host = url.split("/", 2)
+        path = ""
+    if proto == "http:":
+        port = 80
+    elif proto == "https:":
+        import ussl
+        port = 443
+    else:
+        raise ValueError("Unsupported protocol: " + proto)
+
+    if ":" in host:
+        host, port = host.split(":", 1)
+        port = int(port)
+
+    ai = usocket.getaddrinfo(host, port, 0, usocket.SOCK_STREAM)
+    ai = ai[0]
+
+    s = usocket.socket(ai[0], ai[1], ai[2])
+    try:
+        s.connect(ai[-1])
+        if proto == "https:":
+            s = ussl.wrap_socket(s, server_hostname=host)
+        s.write(b"%s /%s HTTP/1.0\r\n" % (method, path))
+        if not "Host" in headers:
+            s.write(b"Host: %s\r\n" % host)
+        # Iterate over keys to avoid tuple alloc
+        for k in headers:
+            s.write(k)
+            s.write(b": ")
+            s.write(headers[k])
+            s.write(b"\r\n")
+        if json is not None:
+            assert data is None
+            import ujson
+            data = ujson.dumps(json)
+            s.write(b"Content-Type: application/json\r\n")
+        if data:
+            s.write(b"Content-Length: %d\r\n" % len(data))
+        s.write(b"\r\n")
+        if data:
+            s.write(data)
+
+        l = s.readline()
+        #print(l)
+        l = l.split(None, 2)
+        status = int(l[1])
+        reason = ""
+        if len(l) > 2:
+            reason = l[2].rstrip()
+        while True:
+            l = s.readline()
+            if not l or l == b"\r\n":
+                break
+            #print(l)
+            if l.startswith(b"Transfer-Encoding:"):
+                if b"chunked" in l:
+                    raise ValueError("Unsupported " + l)
+            elif l.startswith(b"Location:") and not 200 <= status <= 299:
+                raise NotImplementedError("Redirects not yet supported")
+    except OSError:
+        s.close()
+        raise
+
+    resp = Response(s)
+    resp.status_code = status
+    resp.reason = reason
+    return resp
+
+
+def head(url, **kw):
+    return request("HEAD", url, **kw)
+
+def get(url, **kw):
+    return request("GET", url, **kw)
+
+def post(url, **kw):
+    return request("POST", url, **kw)
+
+def put(url, **kw):
+    return request("PUT", url, **kw)
+
+def patch(url, **kw):
+    return request("PATCH", url, **kw)
+
+def delete(url, **kw):
+    return request("DELETE", url, **kw)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/parse.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/parse.py
new file mode 100644
index 00000000..8e65d2bf
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/parse.py
@@ -0,0 +1,974 @@
+"""Parse (absolute and relative) URLs.
+
+urlparse module is based upon the following RFC specifications.
+
+RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
+and L.  Masinter, January 2005.
+
+RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
+and L.Masinter, December 1999.
+
+RFC 2396:  "Uniform Resource Identifiers (URI)": Generic Syntax by T.
+Berners-Lee, R. Fielding, and L. Masinter, August 1998.
+
+RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
+
+RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
+1995.
+
+RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
+McCahill, December 1994
+
+RFC 3986 is considered the current standard and any future changes to
+urlparse module should conform with it.  The urlparse module is
+currently not entirely compliant with this RFC due to defacto
+scenarios for parsing, and for backward compatibility purposes, some
+parsing quirks from older RFCs are retained. The testcases in
+test_urlparse.py provides a good indicator of parsing behavior.
+"""
+
+import re
+import sys
+import collections
+
+__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
+           "urlsplit", "urlunsplit", "urlencode", "parse_qs",
+           "parse_qsl", "quote", "quote_plus", "quote_from_bytes",
+           "unquote", "unquote_plus", "unquote_to_bytes"]
+
+# A classification of schemes ('' means apply by default)
+uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
+                 'wais', 'file', 'https', 'shttp', 'mms',
+                 'prospero', 'rtsp', 'rtspu', '', 'sftp',
+                 'svn', 'svn+ssh']
+uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
+               'imap', 'wais', 'file', 'mms', 'https', 'shttp',
+               'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
+               'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
+uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
+               'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
+               'mms', '', 'sftp', 'tel']
+
+# These are not actually used anymore, but should stay for backwards
+# compatibility.  (They are undocumented, but have a public-looking name.)
+non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
+                    'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
+uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
+              'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
+uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
+                 'nntp', 'wais', 'https', 'shttp', 'snews',
+                 'file', 'prospero', '']
+
+# Characters valid in scheme names
+scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
+                'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                '0123456789'
+                '+-.')
+
+# XXX: Consider replacing with functools.lru_cache
+MAX_CACHE_SIZE = 20
+_parse_cache = {}
+
+def clear_cache():
+    """Clear the parse cache and the quoters cache."""
+    _parse_cache.clear()
+    _safe_quoters.clear()
+
+
+# Helpers for bytes handling
+# For 3.2, we deliberately require applications that
+# handle improperly quoted URLs to do their own
+# decoding and encoding. If valid use cases are
+# presented, we may relax this by using latin-1
+# decoding internally for 3.3
+_implicit_encoding = 'ascii'
+_implicit_errors = 'strict'
+
+def _noop(obj):
+    return obj
+
+def _encode_result(obj, encoding=_implicit_encoding,
+                        errors=_implicit_errors):
+    return obj.encode(encoding, errors)
+
+def _decode_args(args, encoding=_implicit_encoding,
+                       errors=_implicit_errors):
+    return tuple(x.decode(encoding, errors) if x else '' for x in args)
+
+def _coerce_args(*args):
+    # Invokes decode if necessary to create str args
+    # and returns the coerced inputs along with
+    # an appropriate result coercion function
+    #   - noop for str inputs
+    #   - encoding function otherwise
+    str_input = isinstance(args[0], str)
+    for arg in args[1:]:
+        # We special-case the empty string to support the
+        # "scheme=''" default argument to some functions
+        if arg and isinstance(arg, str) != str_input:
+            raise TypeError("Cannot mix str and non-str arguments")
+    if str_input:
+        return args + (_noop,)
+    return _decode_args(args) + (_encode_result,)
+
+# Result objects are more helpful than simple tuples
+class _ResultMixinStr(object):
+    """Standard approach to encoding parsed results from str to bytes"""
+    __slots__ = ()
+
+    def encode(self, encoding='ascii', errors='strict'):
+        return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
+
+
+class _ResultMixinBytes(object):
+    """Standard approach to decoding parsed results from bytes to str"""
+    __slots__ = ()
+
+    def decode(self, encoding='ascii', errors='strict'):
+        return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
+
+
+class _NetlocResultMixinBase(object):
+    """Shared methods for the parsed result objects containing a netloc element"""
+    __slots__ = ()
+
+    @property
+    def username(self):
+        return self._userinfo[0]
+
+    @property
+    def password(self):
+        return self._userinfo[1]
+
+    @property
+    def hostname(self):
+        hostname = self._hostinfo[0]
+        if not hostname:
+            hostname = None
+        elif hostname is not None:
+            hostname = hostname.lower()
+        return hostname
+
+    @property
+    def port(self):
+        port = self._hostinfo[1]
+        if port is not None:
+            port = int(port, 10)
+            # Return None on an illegal port
+            if not ( 0 <= port <= 65535):
+                return None
+        return port
+
+
+class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
+    __slots__ = ()
+
+    @property
+    def _userinfo(self):
+        netloc = self.netloc
+        userinfo, have_info, hostinfo = netloc.rpartition('@')
+        if have_info:
+            username, have_password, password = userinfo.partition(':')
+            if not have_password:
+                password = None
+        else:
+            username = password = None
+        return username, password
+
+    @property
+    def _hostinfo(self):
+        netloc = self.netloc
+        _, _, hostinfo = netloc.rpartition('@')
+        _, have_open_br, bracketed = hostinfo.partition('[')
+        if have_open_br:
+            hostname, _, port = bracketed.partition(']')
+            _, have_port, port = port.partition(':')
+        else:
+            hostname, have_port, port = hostinfo.partition(':')
+        if not have_port:
+            port = None
+        return hostname, port
+
+
+class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
+    __slots__ = ()
+
+    @property
+    def _userinfo(self):
+        netloc = self.netloc
+        userinfo, have_info, hostinfo = netloc.rpartition(b'@')
+        if have_info:
+            username, have_password, password = userinfo.partition(b':')
+            if not have_password:
+                password = None
+        else:
+            username = password = None
+        return username, password
+
+    @property
+    def _hostinfo(self):
+        netloc = self.netloc
+        _, _, hostinfo = netloc.rpartition(b'@')
+        _, have_open_br, bracketed = hostinfo.partition(b'[')
+        if have_open_br:
+            hostname, _, port = bracketed.partition(b']')
+            _, have_port, port = port.partition(b':')
+        else:
+            hostname, have_port, port = hostinfo.partition(b':')
+        if not have_port:
+            port = None
+        return hostname, port
+
+
+from collections import namedtuple
+
+_DefragResultBase = namedtuple('DefragResult', 'url fragment')
+_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
+_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
+
+# For backwards compatibility, alias _NetlocResultMixinStr
+# ResultBase is no longer part of the documented API, but it is
+# retained since deprecating it isn't worth the hassle
+ResultBase = _NetlocResultMixinStr
+
+# Structured result objects for string data
+class DefragResult(_DefragResultBase, _ResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        if self.fragment:
+            return self.url + '#' + self.fragment
+        else:
+            return self.url
+
+class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        return urlunsplit(self)
+
+class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        return urlunparse(self)
+
+# Structured result objects for bytes data
+class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        if self.fragment:
+            return self.url + b'#' + self.fragment
+        else:
+            return self.url
+
+class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        return urlunsplit(self)
+
+class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        return urlunparse(self)
+
+# Set up the encode/decode result pairs
+def _fix_result_transcoding():
+    _result_pairs = (
+        (DefragResult, DefragResultBytes),
+        (SplitResult, SplitResultBytes),
+        (ParseResult, ParseResultBytes),
+    )
+    for _decoded, _encoded in _result_pairs:
+        _decoded._encoded_counterpart = _encoded
+        _encoded._decoded_counterpart = _decoded
+
+_fix_result_transcoding()
+del _fix_result_transcoding
+
+def urlparse(url, scheme='', allow_fragments=True):
+    """Parse a URL into 6 components:
+    <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
+    Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    url, scheme, _coerce_result = _coerce_args(url, scheme)
+    splitresult = urlsplit(url, scheme, allow_fragments)
+    scheme, netloc, url, query, fragment = splitresult
+    if scheme in uses_params and ';' in url:
+        url, params = _splitparams(url)
+    else:
+        params = ''
+    result = ParseResult(scheme, netloc, url, params, query, fragment)
+    return _coerce_result(result)
+
+def _splitparams(url):
+    if '/'  in url:
+        i = url.find(';', url.rfind('/'))
+        if i < 0:
+            return url, ''
+    else:
+        i = url.find(';')
+    return url[:i], url[i+1:]
+
+def _splitnetloc(url, start=0):
+    delim = len(url)   # position of end of domain part of url, default is end
+    for c in '/?#':    # look for delimiters; the order is NOT important
+        wdelim = url.find(c, start)        # find first of this delim
+        if wdelim >= 0:                    # if found
+            delim = min(delim, wdelim)     # use earliest delim position
+    return url[start:delim], url[delim:]   # return (domain, rest)
+
+def urlsplit(url, scheme='', allow_fragments=True):
+    """Parse a URL into 5 components:
+    <scheme>://<netloc>/<path>?<query>#<fragment>
+    Return a 5-tuple: (scheme, netloc, path, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    url, scheme, _coerce_result = _coerce_args(url, scheme)
+    allow_fragments = bool(allow_fragments)
+    key = url, scheme, allow_fragments, type(url), type(scheme)
+    cached = _parse_cache.get(key, None)
+    if cached:
+        return _coerce_result(cached)
+    if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
+        clear_cache()
+    netloc = query = fragment = ''
+    i = url.find(':')
+    if i > 0:
+        if url[:i] == 'http': # optimize the common case
+            scheme = url[:i].lower()
+            url = url[i+1:]
+            if url[:2] == '//':
+                netloc, url = _splitnetloc(url, 2)
+                if (('[' in netloc and ']' not in netloc) or
+                        (']' in netloc and '[' not in netloc)):
+                    raise ValueError("Invalid IPv6 URL")
+            if allow_fragments and '#' in url:
+                url, fragment = url.split('#', 1)
+            if '?' in url:
+                url, query = url.split('?', 1)
+            v = SplitResult(scheme, netloc, url, query, fragment)
+            _parse_cache[key] = v
+            return _coerce_result(v)
+        for c in url[:i]:
+            if c not in scheme_chars:
+                break
+        else:
+            # make sure "url" is not actually a port number (in which case
+            # "scheme" is really part of the path)
+            rest = url[i+1:]
+            if not rest or any(c not in '0123456789' for c in rest):
+                # not a port number
+                scheme, url = url[:i].lower(), rest
+
+    if url[:2] == '//':
+        netloc, url = _splitnetloc(url, 2)
+        if (('[' in netloc and ']' not in netloc) or
+                (']' in netloc and '[' not in netloc)):
+            raise ValueError("Invalid IPv6 URL")
+    if allow_fragments and '#' in url:
+        url, fragment = url.split('#', 1)
+    if '?' in url:
+        url, query = url.split('?', 1)
+    v = SplitResult(scheme, netloc, url, query, fragment)
+    _parse_cache[key] = v
+    return _coerce_result(v)
+
+def urlunparse(components):
+    """Put a parsed URL back together again.  This may result in a
+    slightly different, but equivalent URL, if the URL that was parsed
+    originally had redundant delimiters, e.g. a ? with an empty query
+    (the draft states that these are equivalent)."""
+    scheme, netloc, url, params, query, fragment, _coerce_result = (
+                                                  _coerce_args(*components))
+    if params:
+        url = "%s;%s" % (url, params)
+    return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
+
+def urlunsplit(components):
+    """Combine the elements of a tuple as returned by urlsplit() into a
+    complete URL as a string. The data argument can be any five-item iterable.
+    This may result in a slightly different, but equivalent URL, if the URL that
+    was parsed originally had unnecessary delimiters (for example, a ? with an
+    empty query; the RFC states that these are equivalent)."""
+    scheme, netloc, url, query, fragment, _coerce_result = (
+                                          _coerce_args(*components))
+    if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
+        if url and url[:1] != '/': url = '/' + url
+        url = '//' + (netloc or '') + url
+    if scheme:
+        url = scheme + ':' + url
+    if query:
+        url = url + '?' + query
+    if fragment:
+        url = url + '#' + fragment
+    return _coerce_result(url)
+
+def urljoin(base, url, allow_fragments=True):
+    """Join a base URL and a possibly relative URL to form an absolute
+    interpretation of the latter."""
+    if not base:
+        return url
+    if not url:
+        return base
+    base, url, _coerce_result = _coerce_args(base, url)
+    bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
+            urlparse(base, '', allow_fragments)
+    scheme, netloc, path, params, query, fragment = \
+            urlparse(url, bscheme, allow_fragments)
+    if scheme != bscheme or scheme not in uses_relative:
+        return _coerce_result(url)
+    if scheme in uses_netloc:
+        if netloc:
+            return _coerce_result(urlunparse((scheme, netloc, path,
+                                              params, query, fragment)))
+        netloc = bnetloc
+    if path[:1] == '/':
+        return _coerce_result(urlunparse((scheme, netloc, path,
+                                          params, query, fragment)))
+    if not path and not params:
+        path = bpath
+        params = bparams
+        if not query:
+            query = bquery
+        return _coerce_result(urlunparse((scheme, netloc, path,
+                                          params, query, fragment)))
+    segments = bpath.split('/')[:-1] + path.split('/')
+    # XXX The stuff below is bogus in various ways...
+    if segments[-1] == '.':
+        segments[-1] = ''
+    while '.' in segments:
+        segments.remove('.')
+    while 1:
+        i = 1
+        n = len(segments) - 1
+        while i < n:
+            if (segments[i] == '..'
+                and segments[i-1] not in ('', '..')):
+                del segments[i-1:i+1]
+                break
+            i = i+1
+        else:
+            break
+    if segments == ['', '..']:
+        segments[-1] = ''
+    elif len(segments) >= 2 and segments[-1] == '..':
+        segments[-2:] = ['']
+    return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
+                                      params, query, fragment)))
+
+def urldefrag(url):
+    """Removes any existing fragment from URL.
+
+    Returns a tuple of the defragmented URL and the fragment.  If
+    the URL contained no fragments, the second element is the
+    empty string.
+    """
+    url, _coerce_result = _coerce_args(url)
+    if '#' in url:
+        s, n, p, a, q, frag = urlparse(url)
+        defrag = urlunparse((s, n, p, a, q, ''))
+    else:
+        frag = ''
+        defrag = url
+    return _coerce_result(DefragResult(defrag, frag))
+
+_hexdig = '0123456789ABCDEFabcdef'
+_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
+              for a in _hexdig for b in _hexdig}
+
+def unquote_to_bytes(string):
+    """unquote_to_bytes('abc%20def') -> b'abc def'."""
+    # Note: strings are encoded as UTF-8. This is only an issue if it contains
+    # unescaped non-ASCII characters, which URIs should not.
+    if not string:
+        # Is it a string-like object?
+        string.split
+        return b''
+    if isinstance(string, str):
+        string = string.encode('utf-8')
+    bits = string.split(b'%')
+    if len(bits) == 1:
+        return string
+    res = [bits[0]]
+    append = res.append
+    for item in bits[1:]:
+        try:
+            append(_hextobyte[item[:2]])
+            append(item[2:])
+        except KeyError:
+            append(b'%')
+            append(item)
+    return b''.join(res)
+
+_asciire = re.compile(r'([\x00-\x7f]+)')
+
+def unquote(string, encoding='utf-8', errors='replace'):
+    """Replace %xx escapes by their single-character equivalent. The optional
+    encoding and errors parameters specify how to decode percent-encoded
+    sequences into Unicode characters, as accepted by the bytes.decode()
+    method.
+    By default, percent-encoded sequences are decoded with UTF-8, and invalid
+    sequences are replaced by a placeholder character.
+
+    unquote('abc%20def') -> 'abc def'.
+    """
+    if '%' not in string:
+        string.split
+        return string
+    if encoding is None:
+        encoding = 'utf-8'
+    if errors is None:
+        errors = 'replace'
+    bits = _asciire.split(string)
+    res = [bits[0]]
+    append = res.append
+    for i in range(1, len(bits), 2):
+        append(unquote_to_bytes(bits[i]).decode(encoding, errors))
+        append(bits[i + 1])
+    return ''.join(res)
+
+def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+             encoding='utf-8', errors='replace'):
+    """Parse a query given as a string argument.
+
+        Arguments:
+
+        qs: percent-encoded query string to be parsed
+
+        keep_blank_values: flag indicating whether blank values in
+            percent-encoded queries should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        encoding and errors: specify how to decode percent-encoded sequences
+            into Unicode characters, as accepted by the bytes.decode() method.
+    """
+    parsed_result = {}
+    pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
+                      encoding=encoding, errors=errors)
+    for name, value in pairs:
+        if name in parsed_result:
+            parsed_result[name].append(value)
+        else:
+            parsed_result[name] = [value]
+    return parsed_result
+
+def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+              encoding='utf-8', errors='replace'):
+    """Parse a query given as a string argument.
+
+    Arguments:
+
+    qs: percent-encoded query string to be parsed
+
+    keep_blank_values: flag indicating whether blank values in
+        percent-encoded queries should be treated as blank strings.  A
+        true value indicates that blanks should be retained as blank
+        strings.  The default false value indicates that blank values
+        are to be ignored and treated as if they were  not included.
+
+    strict_parsing: flag indicating what to do with parsing errors. If
+        false (the default), errors are silently ignored. If true,
+        errors raise a ValueError exception.
+
+    encoding and errors: specify how to decode percent-encoded sequences
+        into Unicode characters, as accepted by the bytes.decode() method.
+
+    Returns a list, as G-d intended.
+    """
+    qs, _coerce_result = _coerce_args(qs)
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+    r = []
+    for name_value in pairs:
+        if not name_value and not strict_parsing:
+            continue
+        nv = name_value.split('=', 1)
+        if len(nv) != 2:
+            if strict_parsing:
+                raise ValueError("bad query field: %r" % (name_value,))
+            # Handle case of a control-name with no equal sign
+            if keep_blank_values:
+                nv.append('')
+            else:
+                continue
+        if len(nv[1]) or keep_blank_values:
+            name = nv[0].replace('+', ' ')
+            name = unquote(name, encoding=encoding, errors=errors)
+            name = _coerce_result(name)
+            value = nv[1].replace('+', ' ')
+            value = unquote(value, encoding=encoding, errors=errors)
+            value = _coerce_result(value)
+            r.append((name, value))
+    return r
+
+def unquote_plus(string, encoding='utf-8', errors='replace'):
+    """Like unquote(), but also replace plus signs by spaces, as required for
+    unquoting HTML form values.
+
+    unquote_plus('%7e/abc+def') -> '~/abc def'
+    """
+    string = string.replace('+', ' ')
+    return unquote(string, encoding, errors)
+
+_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                         b'abcdefghijklmnopqrstuvwxyz'
+                         b'0123456789'
+                         b'_.-')
+_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
+_safe_quoters = {}
+
+class Quoter(collections.defaultdict):
+    """A mapping from bytes (in range(0,256)) to strings.
+
+    String values are percent-encoded byte values, unless the key < 128, and
+    in the "safe" set (either the specified safe set, or default set).
+    """
+    # Keeps a cache internally, using defaultdict, for efficiency (lookups
+    # of cached keys don't call Python code at all).
+    def __init__(self, safe):
+        """safe: bytes object."""
+        self.safe = _ALWAYS_SAFE.union(safe)
+
+    def __repr__(self):
+        # Without this, will just display as a defaultdict
+        return "<Quoter %r>" % dict(self)
+
+    def __missing__(self, b):
+        # Handle a cache miss. Store quoted string in cache and return.
+        res = chr(b) if b in self.safe else '%{:02X}'.format(b)
+        self[b] = res
+        return res
+
+def quote(string, safe='/', encoding=None, errors=None):
+    """quote('abc def') -> 'abc%20def'
+
+    Each part of a URL, e.g. the path info, the query, etc., has a
+    different set of reserved characters that must be quoted.
+
+    RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
+    the following reserved characters.
+
+    reserved    = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
+                  "$" | ","
+
+    Each of these characters is reserved in some component of a URL,
+    but not necessarily in all of them.
+
+    By default, the quote function is intended for quoting the path
+    section of a URL.  Thus, it will not encode '/'.  This character
+    is reserved, but in typical usage the quote function is being
+    called on a path where the existing slash characters are used as
+    reserved characters.
+
+    string and safe may be either str or bytes objects. encoding must
+    not be specified if string is a str.
+
+    The optional encoding and errors parameters specify how to deal with
+    non-ASCII characters, as accepted by the str.encode method.
+    By default, encoding='utf-8' (characters are encoded with UTF-8), and
+    errors='strict' (unsupported characters raise a UnicodeEncodeError).
+    """
+    if isinstance(string, str):
+        if not string:
+            return string
+        if encoding is None:
+            encoding = 'utf-8'
+        if errors is None:
+            errors = 'strict'
+        string = string.encode(encoding, errors)
+    else:
+        if encoding is not None:
+            raise TypeError("quote() doesn't support 'encoding' for bytes")
+        if errors is not None:
+            raise TypeError("quote() doesn't support 'errors' for bytes")
+    return quote_from_bytes(string, safe)
+
+def quote_plus(string, safe='', encoding=None, errors=None):
+    """Like quote(), but also replace ' ' with '+', as required for quoting
+    HTML form values. Plus signs in the original string are escaped unless
+    they are included in safe. It also does not have safe default to '/'.
+    """
+    # Check if ' ' in string, where string may either be a str or bytes.  If
+    # there are no spaces, the regular quote will produce the right answer.
+    if ((isinstance(string, str) and ' ' not in string) or
+        (isinstance(string, bytes) and b' ' not in string)):
+        return quote(string, safe, encoding, errors)
+    if isinstance(safe, str):
+        space = ' '
+    else:
+        space = b' '
+    string = quote(string, safe + space, encoding, errors)
+    return string.replace(' ', '+')
+
+def quote_from_bytes(bs, safe='/'):
+    """Like quote(), but accepts a bytes object rather than a str, and does
+    not perform string-to-bytes encoding.  It always returns an ASCII string.
+    quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
+    """
+    if not isinstance(bs, (bytes, bytearray)):
+        raise TypeError("quote_from_bytes() expected bytes")
+    if not bs:
+        return ''
+    if isinstance(safe, str):
+        # Normalize 'safe' by converting to bytes and removing non-ASCII chars
+        safe = safe.encode('ascii', 'ignore')
+    else:
+        safe = bytes([c for c in safe if c < 128])
+    if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
+        return bs.decode()
+    try:
+        quoter = _safe_quoters[safe]
+    except KeyError:
+        _safe_quoters[safe] = quoter = Quoter(safe).__getitem__
+    return ''.join([quoter(char) for char in bs])
+
+def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
+    """Encode a dict or sequence of two-element tuples into a URL query string.
+
+    If any values in the query arg are sequences and doseq is true, each
+    sequence element is converted to a separate parameter.
+
+    If the query arg is a sequence of two-element tuples, the order of the
+    parameters in the output will match the order of parameters in the
+    input.
+
+    The components of a query arg may each be either a string or a bytes type.
+    When a component is a string, the safe, encoding and error parameters are
+    sent to the quote_plus function for encoding.
+    """
+
+    if hasattr(query, "items"):
+        query = query.items()
+    else:
+        # It's a bother at times that strings and string-like objects are
+        # sequences.
+        try:
+            # non-sequence items should not work with len()
+            # non-empty strings will fail this
+            if len(query) and not isinstance(query[0], tuple):
+                raise TypeError
+            # Zero-length sequences of all types will get here and succeed,
+            # but that's a minor nit.  Since the original implementation
+            # allowed empty dicts that type of behavior probably should be
+            # preserved for consistency
+        except TypeError:
+#            ty, va, tb = sys.exc_info()
+            raise TypeError("not a valid non-string sequence "
+                            "or mapping object")#.with_traceback(tb)
+
+    l = []
+    if not doseq:
+        for k, v in query:
+            if isinstance(k, bytes):
+                k = quote_plus(k, safe)
+            else:
+                k = quote_plus(str(k), safe, encoding, errors)
+
+            if isinstance(v, bytes):
+                v = quote_plus(v, safe)
+            else:
+                v = quote_plus(str(v), safe, encoding, errors)
+            l.append(k + '=' + v)
+    else:
+        for k, v in query:
+            if isinstance(k, bytes):
+                k = quote_plus(k, safe)
+            else:
+                k = quote_plus(str(k), safe, encoding, errors)
+
+            if isinstance(v, bytes):
+                v = quote_plus(v, safe)
+                l.append(k + '=' + v)
+            elif isinstance(v, str):
+                v = quote_plus(v, safe, encoding, errors)
+                l.append(k + '=' + v)
+            else:
+                try:
+                    # Is this a sufficient test for sequence-ness?
+                    x = len(v)
+                except TypeError:
+                    # not a sequence
+                    v = quote_plus(str(v), safe, encoding, errors)
+                    l.append(k + '=' + v)
+                else:
+                    # loop over the sequence
+                    for elt in v:
+                        if isinstance(elt, bytes):
+                            elt = quote_plus(elt, safe)
+                        else:
+                            elt = quote_plus(str(elt), safe, encoding, errors)
+                        l.append(k + '=' + elt)
+    return '&'.join(l)
+
+# Utilities to parse URLs (most of these return None for missing parts):
+# unwrap('<URL:type://host/path>') --> 'type://host/path'
+# splittype('type:opaquestring') --> 'type', 'opaquestring'
+# splithost('//host[:port]/path') --> 'host[:port]', '/path'
+# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
+# splitpasswd('user:passwd') -> 'user', 'passwd'
+# splitport('host:port') --> 'host', 'port'
+# splitquery('/path?query') --> '/path', 'query'
+# splittag('/path#tag') --> '/path', 'tag'
+# splitattr('/path;attr1=value1;attr2=value2;...') ->
+#   '/path', ['attr1=value1', 'attr2=value2', ...]
+# splitvalue('attr=value') --> 'attr', 'value'
+# urllib.parse.unquote('abc%20def') -> 'abc def'
+# quote('abc def') -> 'abc%20def')
+
+def to_bytes(url):
+    """to_bytes(u"URL") --> 'URL'."""
+    # Most URL schemes require ASCII. If that changes, the conversion
+    # can be relaxed.
+    # XXX get rid of to_bytes()
+    if isinstance(url, str):
+        try:
+            url = url.encode("ASCII").decode()
+        except UnicodeError:
+            raise UnicodeError("URL " + repr(url) +
+                               " contains non-ASCII characters")
+    return url
+
+def unwrap(url):
+    """unwrap('<URL:type://host/path>') --> 'type://host/path'."""
+    url = str(url).strip()
+    if url[:1] == '<' and url[-1:] == '>':
+        url = url[1:-1].strip()
+    if url[:4] == 'URL:': url = url[4:].strip()
+    return url
+
+_typeprog = None
+def splittype(url):
+    """splittype('type:opaquestring') --> 'type', 'opaquestring'."""
+    global _typeprog
+    if _typeprog is None:
+        import re
+        _typeprog = re.compile('^([^/:]+):')
+
+    match = _typeprog.match(url)
+    if match:
+        scheme = match.group(1)
+        return scheme.lower(), url[len(scheme) + 1:]
+    return None, url
+
+_hostprog = None
+def splithost(url):
+    """splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
+    global _hostprog
+    if _hostprog is None:
+        import re
+        _hostprog = re.compile('^//([^/?]*)(.*)$')
+
+    match = _hostprog.match(url)
+    if match:
+        host_port = match.group(1)
+        path = match.group(2)
+        if path and not path.startswith('/'):
+            path = '/' + path
+        return host_port, path
+    return None, url
+
+_userprog = None
+def splituser(host):
+    """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+    global _userprog
+    if _userprog is None:
+        import re
+        _userprog = re.compile('^(.*)@(.*)$')
+
+    match = _userprog.match(host)
+    if match: return match.group(1, 2)
+    return None, host
+
+_passwdprog = None
+def splitpasswd(user):
+    """splitpasswd('user:passwd') -> 'user', 'passwd'."""
+    global _passwdprog
+    if _passwdprog is None:
+        import re
+        _passwdprog = re.compile('^([^:]*):(.*)$',re.S)
+
+    match = _passwdprog.match(user)
+    if match: return match.group(1, 2)
+    return user, None
+
+# splittag('/path#tag') --> '/path', 'tag'
+_portprog = None
+def splitport(host):
+    """splitport('host:port') --> 'host', 'port'."""
+    global _portprog
+    if _portprog is None:
+        import re
+        _portprog = re.compile('^(.*):([0-9]+)$')
+
+    match = _portprog.match(host)
+    if match: return match.group(1, 2)
+    return host, None
+
+_nportprog = None
+def splitnport(host, defport=-1):
+    """Split host and port, returning numeric port.
+    Return given default port if no ':' found; defaults to -1.
+    Return numerical port if a valid number are found after ':'.
+    Return None if ':' but not a valid number."""
+    global _nportprog
+    if _nportprog is None:
+        import re
+        _nportprog = re.compile('^(.*):(.*)$')
+
+    match = _nportprog.match(host)
+    if match:
+        host, port = match.group(1, 2)
+        try:
+            if not port: raise ValueError("no digits")
+            nport = int(port)
+        except ValueError:
+            nport = None
+        return host, nport
+    return host, defport
+
+_queryprog = None
+def splitquery(url):
+    """splitquery('/path?query') --> '/path', 'query'."""
+    global _queryprog
+    if _queryprog is None:
+        import re
+        _queryprog = re.compile('^(.*)\?([^?]*)$')
+
+    match = _queryprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+_tagprog = None
+def splittag(url):
+    """splittag('/path#tag') --> '/path', 'tag'."""
+    global _tagprog
+    if _tagprog is None:
+        import re
+        _tagprog = re.compile('^(.*)#([^#]*)$')
+
+    match = _tagprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+def splitattr(url):
+    """splitattr('/path;attr1=value1;attr2=value2;...') ->
+        '/path', ['attr1=value1', 'attr2=value2', ...]."""
+    words = url.split(';')
+    return words[0], words[1:]
+
+_valueprog = None
+def splitvalue(attr):
+    """splitvalue('attr=value') --> 'attr', 'value'."""
+    global _valueprog
+    if _valueprog is None:
+        import re
+        _valueprog = re.compile('^([^=]*)=(.*)$')
+
+    match = _valueprog.match(attr)
+    if match: return match.group(1, 2)
+    return attr, None
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/urequest.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/urequest.py
new file mode 100644
index 00000000..fd52721b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/urllib/urequest.py
@@ -0,0 +1,65 @@
+import usocket
+
+def urlopen(url, data=None, method="GET"):
+    if data is not None and method == "GET":
+        method = "POST"
+    try:
+        proto, dummy, host, path = url.split("/", 3)
+    except ValueError:
+        proto, dummy, host = url.split("/", 2)
+        path = ""
+    if proto == "http:":
+        port = 80
+    elif proto == "https:":
+        import ussl
+        port = 443
+    else:
+        raise ValueError("Unsupported protocol: " + proto)
+
+    if ":" in host:
+        host, port = host.split(":", 1)
+        port = int(port)
+
+    ai = usocket.getaddrinfo(host, port, 0, usocket.SOCK_STREAM)
+    ai = ai[0]
+
+    s = usocket.socket(ai[0], ai[1], ai[2])
+    try:
+        s.connect(ai[-1])
+        if proto == "https:":
+            s = ussl.wrap_socket(s, server_hostname=host)
+
+        s.write(method)
+        s.write(b" /")
+        s.write(path)
+        s.write(b" HTTP/1.0\r\nHost: ")
+        s.write(host)
+        s.write(b"\r\n")
+
+        if data:
+            s.write(b"Content-Length: ")
+            s.write(str(len(data)))
+            s.write(b"\r\n")
+        s.write(b"\r\n")
+        if data:
+            s.write(data)
+
+        l = s.readline()
+        l = l.split(None, 2)
+        #print(l)
+        status = int(l[1])
+        while True:
+            l = s.readline()
+            if not l or l == b"\r\n":
+                break
+            #print(l)
+            if l.startswith(b"Transfer-Encoding:"):
+                if b"chunked" in l:
+                    raise ValueError("Unsupported " + l)
+            elif l.startswith(b"Location:"):
+                raise NotImplementedError("Redirects not yet supported")
+    except OSError:
+        s.close()
+        raise
+
+    return s
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/utarfile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/utarfile.py
new file mode 100644
index 00000000..460ca2cd
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/utarfile.py
@@ -0,0 +1,94 @@
+import uctypes
+
+# http://www.gnu.org/software/tar/manual/html_node/Standard.html
+TAR_HEADER = {
+    "name": (uctypes.ARRAY | 0, uctypes.UINT8 | 100),
+    "size": (uctypes.ARRAY | 124, uctypes.UINT8 | 11),
+}
+
+DIRTYPE = "dir"
+REGTYPE = "file"
+
+def roundup(val, align):
+    return (val + align - 1) & ~(align - 1)
+
+class FileSection:
+
+    def __init__(self, f, content_len, aligned_len):
+        self.f = f
+        self.content_len = content_len
+        self.align = aligned_len - content_len
+
+    def read(self, sz=65536):
+        if self.content_len == 0:
+            return b""
+        if sz > self.content_len:
+            sz = self.content_len
+        data = self.f.read(sz)
+        sz = len(data)
+        self.content_len -= sz
+        return data
+
+    def readinto(self, buf):
+        if self.content_len == 0:
+            return 0
+        if len(buf) > self.content_len:
+            buf = memoryview(buf)[:self.content_len]
+        sz = self.f.readinto(buf)
+        self.content_len -= sz
+        return sz
+
+    def skip(self):
+        sz = self.content_len + self.align
+        if sz:
+            buf = bytearray(16)
+            while sz:
+                s = min(sz, 16)
+                self.f.readinto(buf, s)
+                sz -= s
+
+class TarInfo:
+
+    def __str__(self):
+        return "TarInfo(%r, %s, %d)" % (self.name, self.type, self.size)
+
+class TarFile:
+
+    def __init__(self, name=None, fileobj=None):
+        if fileobj:
+            self.f = fileobj
+        else:
+            self.f = open(name, "rb")
+        self.subf = None
+
+    def next(self):
+            if self.subf:
+                self.subf.skip()
+            buf = self.f.read(512)
+            if not buf:
+                return None
+
+            h = uctypes.struct(uctypes.addressof(buf), TAR_HEADER, uctypes.LITTLE_ENDIAN)
+
+            # Empty block means end of archive
+            if h.name[0] == 0:
+                return None
+
+            d = TarInfo()
+            d.name = str(h.name, "utf-8").rstrip("\0")
+            d.size = int(bytes(h.size), 8)
+            d.type = [REGTYPE, DIRTYPE][d.name[-1] == "/"]
+            self.subf = d.subf = FileSection(self.f, d.size, roundup(d.size, 512))
+            return d
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        v = self.next()
+        if v is None:
+            raise StopIteration
+        return v
+
+    def extractfile(self, tarinfo):
+        return tarinfo.subf
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uu.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uu.py
new file mode 100644
index 00000000..d68d2937
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uu.py
@@ -0,0 +1,199 @@
+#! /usr/bin/env python3
+
+# Copyright 1994 by Lance Ellinghouse
+# Cathedral City, California Republic, United States of America.
+#                        All Rights Reserved
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Lance Ellinghouse
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
+# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
+# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# Modified by Jack Jansen, CWI, July 1995:
+# - Use binascii module to do the actual line-by-line conversion
+#   between ascii and binary. This results in a 1000-fold speedup. The C
+#   version is still 5 times faster, though.
+# - Arguments more compliant with python standard
+
+"""Implementation of the UUencode and UUdecode functions.
+
+encode(in_file, out_file [,name, mode])
+decode(in_file [, out_file, mode])
+"""
+
+import binascii
+import os
+import sys
+
+__all__ = ["Error", "encode", "decode"]
+
+class Error(Exception):
+    pass
+
+def encode(in_file, out_file, name=None, mode=None):
+    """Uuencode file"""
+    #
+    # If in_file is a pathname open it and change defaults
+    #
+    opened_files = []
+    try:
+        if in_file == '-':
+            in_file = sys.stdin.buffer
+        elif isinstance(in_file, str):
+            if name is None:
+                name = os.path.basename(in_file)
+            if mode is None:
+                try:
+                    mode = os.stat(in_file).st_mode
+                except AttributeError:
+                    pass
+            in_file = open(in_file, 'rb')
+            opened_files.append(in_file)
+        #
+        # Open out_file if it is a pathname
+        #
+        if out_file == '-':
+            out_file = sys.stdout.buffer
+        elif isinstance(out_file, str):
+            out_file = open(out_file, 'wb')
+            opened_files.append(out_file)
+        #
+        # Set defaults for name and mode
+        #
+        if name is None:
+            name = '-'
+        if mode is None:
+            mode = 0o666
+        #
+        # Write the data
+        #
+        out_file.write(('begin %o %s\n' % ((mode & 0o777), name)).encode("ascii"))
+        data = in_file.read(45)
+        while len(data) > 0:
+            out_file.write(binascii.b2a_uu(data))
+            data = in_file.read(45)
+        out_file.write(b' \nend\n')
+    finally:
+        for f in opened_files:
+            f.close()
+
+
+def decode(in_file, out_file=None, mode=None, quiet=False):
+    """Decode uuencoded file"""
+    #
+    # Open the input file, if needed.
+    #
+    opened_files = []
+    if in_file == '-':
+        in_file = sys.stdin.buffer
+    elif isinstance(in_file, str):
+        in_file = open(in_file, 'rb')
+        opened_files.append(in_file)
+
+    try:
+        #
+        # Read until a begin is encountered or we've exhausted the file
+        #
+        while True:
+            hdr = in_file.readline()
+            if not hdr:
+                raise Error('No valid begin line found in input file')
+            if not hdr.startswith(b'begin'):
+                continue
+            hdrfields = hdr.split(b' ', 2)
+            if len(hdrfields) == 3 and hdrfields[0] == b'begin':
+                try:
+                    int(hdrfields[1], 8)
+                    break
+                except ValueError:
+                    pass
+        if out_file is None:
+            # If the filename isn't ASCII, what's up with that?!?
+            out_file = hdrfields[2].rstrip(b' \t\r\n\f').decode("ascii")
+            if os.path.exists(out_file):
+                raise Error('Cannot overwrite existing file: %s' % out_file)
+        if mode is None:
+            mode = int(hdrfields[1], 8)
+        #
+        # Open the output file
+        #
+        if out_file == '-':
+            out_file = sys.stdout.buffer
+        elif isinstance(out_file, str):
+            fp = open(out_file, 'wb')
+            try:
+                os.path.chmod(out_file, mode)
+            except AttributeError:
+                pass
+            out_file = fp
+            opened_files.append(out_file)
+        #
+        # Main decoding loop
+        #
+        s = in_file.readline()
+        while s and s.strip(b' \t\r\n\f') != b'end':
+            try:
+                data = binascii.a2b_uu(s)
+            except binascii.Error as v:
+                # Workaround for broken uuencoders by /Fredrik Lundh
+                nbytes = (((s[0]-32) & 63) * 4 + 5) // 3
+                data = binascii.a2b_uu(s[:nbytes])
+                if not quiet:
+                    sys.stderr.write("Warning: %s\n" % v)
+            out_file.write(data)
+            s = in_file.readline()
+        if not s:
+            raise Error('Truncated input file')
+    finally:
+        for f in opened_files:
+            f.close()
+
+def test():
+    """uuencode/uudecode main program"""
+
+    import optparse
+    parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
+    parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
+    parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
+
+    (options, args) = parser.parse_args()
+    if len(args) > 2:
+        parser.error('incorrect number of arguments')
+        sys.exit(1)
+
+    # Use the binary streams underlying stdin/stdout
+    input = sys.stdin.buffer
+    output = sys.stdout.buffer
+    if len(args) > 0:
+        input = args[0]
+    if len(args) > 1:
+        output = args[1]
+
+    if options.decode:
+        if options.text:
+            if isinstance(output, str):
+                output = open(output, 'wb')
+            else:
+                print(sys.argv[0], ': cannot do -t to stdout')
+                sys.exit(1)
+        decode(input, output)
+    else:
+        if options.text:
+            if isinstance(input, str):
+                input = open(input, 'rb')
+            else:
+                print(sys.argv[0], ': cannot do -t from stdin')
+                sys.exit(1)
+        encode(input, output)
+
+if __name__ == '__main__':
+    test()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uuid.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/uuid.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/venv.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/venv.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/warnings.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/warnings.py
new file mode 100644
index 00000000..1cb31b53
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/warnings.py
@@ -0,0 +1,2 @@
+def warn(msg, cat=None, stacklevel=1):
+    print("%s: %s" % ("Warning" if cat is None else cat.__name__, msg))
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/weakref.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/weakref.py
new file mode 100644
index 00000000..76aabfa3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/weakref.py
@@ -0,0 +1,7 @@
+#
+# This is completely dummy implementation, which does not
+# provide real weak references, and thus will hoard memory!
+#
+
+def proxy(obj, cb=None):
+    return obj
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/xmltok.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/xmltok.py
new file mode 100644
index 00000000..c46f2bd4
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/xmltok.py
@@ -0,0 +1,142 @@
+TEXT = "TEXT"
+START_TAG = "START_TAG"
+#START_TAG_DONE = "START_TAG_DONE"
+END_TAG = "END_TAG"
+PI = "PI"
+#PI_DONE = "PI_DONE"
+ATTR = "ATTR"
+#ATTR_VAL = "ATTR_VAL"
+
+class XMLSyntaxError(Exception):
+    pass
+
+class XMLTokenizer:
+
+    def __init__(self, f):
+        self.f = f
+        self.nextch()
+
+    def curch(self):
+        return self.c
+
+    def getch(self):
+        c = self.c
+        self.nextch()
+        return c
+
+    def eof(self):
+        return self.c == ""
+
+    def nextch(self):
+        self.c = self.f.read(1)
+        if not self.c:
+            raise StopIteration
+        return self.c
+
+    def skip_ws(self):
+        while self.curch().isspace():
+            self.nextch()
+
+    def isident(self):
+        self.skip_ws()
+        return self.curch().isalpha()
+
+    def getident(self):
+        self.skip_ws()
+        ident = ""
+        while True:
+            c = self.curch()
+            if not(c.isalpha() or c.isdigit() or c in "_-."):
+                break
+            ident += self.getch()
+        return ident
+
+    def getnsident(self):
+        ns = ""
+        ident = self.getident()
+        if self.curch() == ":":
+            self.nextch()
+            ns = ident
+            ident = self.getident()
+        return (ns, ident)
+
+    def match(self, c):
+        self.skip_ws()
+        if self.curch() == c:
+            self.nextch()
+            return True
+        return False
+
+    def expect(self, c):
+        if not self.match(c):
+            raise XMLSyntaxError
+
+    def lex_attrs_till(self):
+        while self.isident():
+            attr = self.getnsident()
+            #yield (ATTR, attr)
+            self.expect("=")
+            self.expect('"')
+            val = ""
+            while self.curch() != '"':
+                val += self.getch()
+            #yield (ATTR_VAL, val)
+            self.expect('"')
+            yield (ATTR, attr, val)
+
+    def tokenize(self):
+        while not self.eof():
+            if self.match("<"):
+                if self.match("/"):
+                    yield (END_TAG, self.getnsident())
+                    self.expect(">")
+                elif self.match("?"):
+                    yield (PI, self.getident())
+                    yield from self.lex_attrs_till()
+                    self.expect("?")
+                    self.expect(">")
+                elif self.match("!"):
+                    self.expect("-")
+                    self.expect("-")
+                    last3 = ''
+                    while True:
+                        last3 = last3[-2:] + self.getch()
+                        if last3 == "-->":
+                            break
+                else:
+                    tag = self.getnsident()
+                    yield (START_TAG, tag)
+                    yield from self.lex_attrs_till()
+                    if self.match("/"):
+                        yield (END_TAG, tag)
+                    self.expect(">")
+            else:
+                text = ""
+                while self.curch() != "<":
+                    text += self.getch()
+                if text:
+                    yield (TEXT, text)
+
+
+def gfind(gen, pred):
+    for i in gen:
+        if pred(i):
+            return i
+
+def text_of(gen, tag):
+    # Return text content of a leaf tag
+    def match_tag(t):
+        if t[0] != START_TAG:
+            return False
+        if isinstance(tag, ()):
+            return t[1] == tag
+        return t[1][1] == tag
+
+    gfind(gen, match_tag)
+    # Assumes no attributes
+    t, val = next(gen)
+    assert t == TEXT
+    return val
+
+def tokenize(file):
+    return XMLTokenizer(file).tokenize()
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/zipfile.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/zipfile.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/zlib.py b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/zlib.py
new file mode 100644
index 00000000..e803341c
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/micropython/zlib.py
@@ -0,0 +1 @@
+from uzlib import *
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/monitor.py b/src/main/resources/assets/openpython/opos/v1.1/lib/monitor.py
new file mode 100644
index 00000000..b034c737
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/monitor.py
@@ -0,0 +1,52 @@
+from component import Component
+
+__all__ = ["Monitor", "monitor", "_set_monitor"]
+
+
+class Monitor:
+    def __init__(self, gpu: Component):
+        self.gpu = gpu
+        self.w = 80
+        self.h = 25
+        self.x = 1
+        self.y = 1
+        self.bc = 0
+
+    def scroll(self):
+        self.gpu.copy(1, 2, self.w, self.h, 0, - 1)
+        self.gpu.fill(1, self.h, self.w, 1, " ")
+
+    def put(self, char: str):
+        assert len(char) == 1
+        if char == "\n":
+            self.x = 1
+            self.y += 1
+
+        if self.x > self.w:
+            self.x = 1
+            self.y += 1
+
+        if self.y > self.h:
+            self.scroll()
+            self.y = self.h
+
+        if char == "\r" or char == "\n":
+            return
+
+        if self.bc > 0:
+            self.bc -= 1
+        elif char == "\b":
+            self.x -= 1
+            self.gpu.set(self.x, self.y, " ")
+            self.bc = 3
+        else:
+            self.gpu.set(self.x, self.y, char)
+            self.x += 1
+
+
+monitor = None
+
+
+def _set_monitor(obj):
+    global monitor
+    monitor = obj
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/ocpath.py b/src/main/resources/assets/openpython/opos/v1.1/lib/ocpath.py
new file mode 100644
index 00000000..63cea898
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/ocpath.py
@@ -0,0 +1,504 @@
+# https://github.com/python/cpython/blob/3.5/Lib/posixpath.py
+"""Common operations on Posix pathnames.
+Instead of importing this module directly, import os and refer to
+this module as os.path.  The "os.path" name is an alias for this
+module on Posix systems; on other systems (e.g. Mac, Windows),
+os.path provides the same operations in a manner specific to that
+platform, and is an alias to another module (e.g. macpath, ntpath).
+Some of this can actually be useful on non-Posix systems too, e.g.
+for manipulation of the pathname component of URLs.
+"""
+
+import genericpath
+import stat
+from genericpath import *
+
+__all__ = ["normcase", "isabs", "join", "splitdrive", "split", "splitext",
+           "basename", "dirname", "commonprefix", "getsize", "getmtime",
+           "getatime", "getctime", "islink", "exists", "lexists", "isdir", "isfile",
+           "ismount", "expanduser", "expandvars", "normpath", "abspath",
+           "samefile", "sameopenfile", "samestat",
+           "curdir", "pardir", "sep", "pathsep", "defpath", "altsep", "extsep",
+           "devnull", "realpath", "relpath",
+           "commonpath"]
+
+# Strings representing various path-related bits and pieces.
+# These are primarily for export; internally, they are hardcoded.
+curdir = '.'
+pardir = '..'
+extsep = '.'
+sep = '/'
+pathsep = ':'
+defpath = ':/bin:/usr/bin'
+altsep = None
+devnull = '/dev/null'
+
+import os
+
+
+def _get_sep(path):
+    if isinstance(path, bytes):
+        return b'/'
+    else:
+        return '/'
+
+
+# Normalize the case of a pathname.  Trivial in Posix, string.lower on Mac.
+# On MS-DOS this may also turn slashes into backslashes; however, other
+# normalizations (such as optimizing '../' away) are not allowed
+# (another function should be defined to do that).
+
+def normcase(s):
+    """Normalize case of pathname.  Has no effect under Posix"""
+    if not isinstance(s, (bytes, str)):
+        raise TypeError("normcase() argument must be str or bytes, "
+                        "not '{}'".format(s.__class__.__name__))
+    return s
+
+
+# Return whether a path is absolute.
+# Trivial in Posix, harder on the Mac or MS-DOS.
+
+def isabs(s):
+    """Test whether a path is absolute"""
+    sep = _get_sep(s)
+    return s.startswith(sep)
+
+
+# Join pathnames.
+# Ignore the previous parts if a part is absolute.
+# Insert a '/' unless the first part is empty or already ends in '/'.
+
+def join(a, *p):
+    """Join two or more pathname components, inserting '/' as needed.
+    If any component is an absolute path, all previous path components
+    will be discarded.  An empty last part will result in a path that
+    ends with a separator."""
+    sep = _get_sep(a)
+    path = a
+    try:
+        if not p:
+            path[:0] + sep  # 23780: Ensure compatible data type even if p is null.
+        for b in p:
+            if b.startswith(sep):
+                path = b
+            elif not path or path.endswith(sep):
+                path += b
+            else:
+                path += sep + b
+    except (TypeError, AttributeError, BytesWarning):
+        genericpath._check_arg_types('join', a, *p)
+        raise
+    return path
+
+
+# Split a path in head (everything up to the last '/') and tail (the
+# rest).  If the path ends in '/', tail will be empty.  If there is no
+# '/' in the path, head  will be empty.
+# Trailing '/'es are stripped from head unless it is the root.
+
+def split(p):
+    """Split a pathname.  Returns tuple "(head, tail)" where "tail" is
+    everything after the final slash.  Either part may be empty."""
+    sep = _get_sep(p)
+    i = p.rfind(sep) + 1
+    head, tail = p[:i], p[i:]
+    if head and head != sep * len(head):
+        head = head.rstrip(sep)
+    return head, tail
+
+
+# Split a path in root and extension.
+# The extension is everything starting at the last dot in the last
+# pathname component; the root is everything before that.
+# It is always true that root + ext == p.
+
+def splitext(p):
+    if isinstance(p, bytes):
+        sep = b'/'
+        extsep = b'.'
+    else:
+        sep = '/'
+        extsep = '.'
+    return genericpath._splitext(p, sep, None, extsep)
+
+
+# splitext.__doc__ = genericpath._splitext.__doc__
+
+
+# Split a pathname into a drive specification and the rest of the
+# path.  Useful on DOS/Windows/NT; on Unix, the drive is always empty.
+
+def splitdrive(p):
+    """Split a pathname into drive and path. On Posix, drive is always
+    empty."""
+    return p[:0], p
+
+
+# Return the tail (basename) part of a path, same as split(path)[1].
+
+def basename(p):
+    """Returns the final component of a pathname"""
+    sep = _get_sep(p)
+    i = p.rfind(sep) + 1
+    return p[i:]
+
+
+# Return the head (dirname) part of a path, same as split(path)[0].
+
+def dirname(p):
+    """Returns the directory component of a pathname"""
+    sep = _get_sep(p)
+    i = p.rfind(sep) + 1
+    head = p[:i]
+    if head and head != sep * len(head):
+        head = head.rstrip(sep)
+    return head
+
+
+# Is a path a symbolic link?
+# This will always return false on systems where os.stat doesn't exist.
+
+def islink(path):
+    """Test whether a path is a symbolic link"""
+    try:
+        st = os.stat(path)
+    except (OSError, AttributeError):
+        return False
+    return stat.S_ISLNK(st[stat.ST_MODE])
+
+
+# Being true for dangling symbolic links is also useful.
+
+def lexists(path):
+    """Test whether a path exists.  Returns True for broken symbolic links"""
+    try:
+        os.stat(path)
+    except OSError:
+        return False
+    return True
+
+
+# Is a path a mount point?
+# (Does this work for all UNIXes?  Is it even guaranteed to work by Posix?)
+
+def ismount(path):
+    """Test whether a path is a mount point"""
+    try:
+        s1 = os.stat(path)
+    except OSError:
+        # It doesn't exist -- so not a mount point. :-)
+        return False
+    else:
+        # A symlink can never be a mount point
+        if stat.S_ISLNK(s1[stat.ST_MODE]):
+            return False
+
+    if isinstance(path, bytes):
+        parent = join(path, b'..')
+    else:
+        parent = join(path, '..')
+    parent = realpath(parent)
+    try:
+        s2 = os.stat(parent)
+    except OSError:
+        return False
+
+    dev1 = s1[stat.ST_DEV]
+    dev2 = s2[stat.ST_DEV]
+    if dev1 != dev2:
+        return True  # path/.. on a different device as path
+    ino1 = s1[stat.ST_INO]
+    ino2 = s2[stat.ST_INO]
+    if ino1 == ino2:
+        return True  # path/.. is the same i-node as path
+    return False
+
+
+# Expand paths beginning with '~' or '~user'.
+# '~' means $HOME; '~user' means that user's home directory.
+# If the path doesn't begin with '~', or if the user or $HOME is unknown,
+# the path is returned unchanged (leaving error reporting to whatever
+# function is called with the expanded path as argument).
+# See also module 'glob' for expansion of *, ? and [...] in pathnames.
+# (A function should also be defined to do full *sh-style environment
+# variable expansion.)
+
+def expanduser(path):
+    """Expand ~ and ~user constructions.  If user or $HOME is unknown,
+    do nothing."""
+    if isinstance(path, bytes):
+        tilde = b'~'
+    else:
+        tilde = '~'
+    if not path.startswith(tilde):
+        return path
+    sep = _get_sep(path)
+    i = path.find(sep, 1)
+    if i < 0:
+        i = len(path)
+    if i == 1:
+        userhome = os.environ['HOME']
+    else:
+        import pwd
+        name = path[1:i]
+        if isinstance(name, bytes):
+            name = str(name, 'ASCII')
+        try:
+            pwent = pwd.getpwnam(name)
+        except KeyError:
+            return path
+        userhome = pwent.pw_dir
+    if isinstance(path, bytes):
+        userhome = os.fsencode(userhome)
+        root = b'/'
+    else:
+        root = '/'
+    userhome = userhome.rstrip(root)
+    return (userhome + path[i:]) or root
+
+
+# Expand paths containing shell variable substitutions.
+# This expands the forms $variable and ${variable} only.
+# Non-existent variables are left unchanged.
+
+_varprog = None
+_varprogb = None
+
+
+def expandvars(path):
+    """Expand shell variables of form $var and ${var}.  Unknown variables
+    are left unchanged."""
+    global _varprog, _varprogb
+    if isinstance(path, bytes):
+        if b'$' not in path:
+            return path
+        if not _varprogb:
+            import re
+            _varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
+        search = _varprogb.search
+        start = b'{'
+        end = b'}'
+        environ = getattr(os, 'environb', None)
+    else:
+        if '$' not in path:
+            return path
+        if not _varprog:
+            import re
+            _varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
+        search = _varprog.search
+        start = '{'
+        end = '}'
+        environ = os.environ
+    i = 0
+    while True:
+        m = search(path, i)
+        if not m:
+            break
+        i, j = m.span(0)
+        name = m.group(1)
+        if name.startswith(start) and name.endswith(end):
+            name = name[1:-1]
+        try:
+            if environ is None:
+                value = os.fsencode(os.environ[os.fsdecode(name)])
+            else:
+                value = environ[name]
+        except KeyError:
+            i = j
+        else:
+            tail = path[j:]
+            path = path[:i] + value
+            i = len(path)
+            path += tail
+    return path
+
+
+# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
+# It should be understood that this may change the meaning of the path
+# if it contains symbolic links!
+
+def normpath(path):
+    """Normalize path, eliminating double slashes, etc."""
+    if isinstance(path, bytes):
+        sep = b'/'
+        empty = b''
+        dot = b'.'
+        dotdot = b'..'
+    else:
+        sep = '/'
+        empty = ''
+        dot = '.'
+        dotdot = '..'
+    if path == empty:
+        return dot
+    initial_slashes = path.startswith(sep)
+    # POSIX allows one or two initial slashes, but treats three or more
+    # as single slash.
+    if (initial_slashes and
+            path.startswith(sep * 2) and not path.startswith(sep * 3)):
+        initial_slashes = 2
+    comps = path.split(sep)
+    new_comps = []
+    for comp in comps:
+        if comp in (empty, dot):
+            continue
+        if (comp != dotdot or (not initial_slashes and not new_comps) or
+                (new_comps and new_comps[-1] == dotdot)):
+            new_comps.append(comp)
+        elif new_comps:
+            new_comps.pop()
+    comps = new_comps
+    path = sep.join(comps)
+    if initial_slashes:
+        path = sep * initial_slashes + path
+    return path or dot
+
+
+def abspath(path):
+    """Return an absolute path."""
+    if not isabs(path):
+        if isinstance(path, bytes):
+            cwd = os.getcwd().encode()
+        else:
+            cwd = os.getcwd()
+        path = join(cwd, path)
+    return path
+
+
+# Return a canonical path (i.e. the absolute location of a file on the
+# filesystem).
+
+def realpath(filename):
+    """Return the canonical path of the specified filename, eliminating any
+symbolic links encountered in the path."""
+    path, ok = _joinrealpath(filename[:0], filename, {})
+    return abspath(path)
+
+
+# Join two paths, normalizing and eliminating any symbolic links
+# encountered in the second path.
+def _joinrealpath(path, rest, seen):
+    if isinstance(path, bytes):
+        sep = b'/'
+        curdir = b'.'
+        pardir = b'..'
+    else:
+        sep = '/'
+        curdir = '.'
+        pardir = '..'
+
+    if isabs(rest):
+        rest = rest[1:]
+        path = sep
+
+    while rest:
+        name, _, rest = rest.partition(sep)
+        if not name or name == curdir:
+            # current dir
+            continue
+        if name == pardir:
+            # parent dir
+            if path:
+                path, name = split(path)
+                if name == pardir:
+                    path = join(path, pardir, pardir)
+            else:
+                path = pardir
+            continue
+        newpath = join(path, name)
+        if not islink(newpath):
+            path = newpath
+            continue
+        # Resolve the symbolic link
+        if newpath in seen:
+            # Already seen this path
+            path = seen[newpath]
+            if path is not None:
+                # use cached value
+                continue
+            # The symlink is not resolved, so we must have a symlink loop.
+            # Return already resolved part + rest of the path unchanged.
+            return join(newpath, rest), False
+        seen[newpath] = None  # not resolved symlink
+        path, ok = _joinrealpath(path, os.readlink(newpath), seen)
+        if not ok:
+            return join(path, rest), False
+        seen[newpath] = path  # resolved symlink
+
+    return path, True
+
+
+def relpath(path, start=None):
+    """Return a relative version of a path"""
+
+    if not path:
+        raise ValueError("no path specified")
+
+    if isinstance(path, bytes):
+        curdir = b'.'
+        sep = b'/'
+        pardir = b'..'
+    else:
+        curdir = '.'
+        sep = '/'
+        pardir = '..'
+
+    if start is None:
+        start = curdir
+
+    try:
+        start_list = [x for x in abspath(start).split(sep) if x]
+        path_list = [x for x in abspath(path).split(sep) if x]
+        # Work out how much of the filepath is shared by start and path.
+        i = len(commonprefix([start_list, path_list]))
+
+        rel_list = [pardir] * (len(start_list) - i) + path_list[i:]
+        if not rel_list:
+            return curdir
+        return join(*rel_list)
+    except (TypeError, AttributeError, BytesWarning, DeprecationWarning):
+        genericpath._check_arg_types('relpath', path, start)
+        raise
+
+
+# Return the longest common sub-path of the sequence of paths given as input.
+# The paths are not normalized before comparing them (this is the
+# responsibility of the caller). Any trailing separator is stripped from the
+# returned path.
+
+def commonpath(paths):
+    """Given a sequence of path names, returns the longest common sub-path."""
+
+    if not paths:
+        raise ValueError('commonpath() arg is an empty sequence')
+
+    if isinstance(paths[0], bytes):
+        sep = b'/'
+        curdir = b'.'
+    else:
+        sep = '/'
+        curdir = '.'
+
+    try:
+        split_paths = [path.split(sep) for path in paths]
+
+        try:
+            isabs, = set(p[:1] == sep for p in paths)
+        except ValueError:
+            raise ValueError("Can't mix absolute and relative paths") from None
+
+        split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
+        s1 = min(split_paths)
+        s2 = max(split_paths)
+        common = s1
+        for i, c in enumerate(s1):
+            if c != s2[i]:
+                common = s1[:i]
+                break
+
+        prefix = sep if isabs else sep[:0]
+        return prefix + sep.join(common)
+    except (TypeError, AttributeError):
+        genericpath._check_arg_types('commonpath', *paths)
+        raise
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/colors.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/colors.py
new file mode 100644
index 00000000..79791b09
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/colors.py
@@ -0,0 +1,44 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/colors.lua
+
+from micropython import const
+
+__all__ = ["WHITE", "ORANGE", "MAGENTA", "LIGHTBLUE",
+           "YELLOW", "LIME", "PINK", "GRAY",
+           "SILVER", "CYAN", "PURPLE", "BLUE",
+           "BROWN", "GREEN", "RED", "BLACK"]
+
+WHITE = const(0)
+ORANGE = const(1)
+MAGENTA = const(2)
+LIGHTBLUE = const(3)
+YELLOW = const(4)
+LIME = const(5)
+PINK = const(6)
+GRAY = const(7)
+SILVER = const(8)
+CYAN = const(9)
+PURPLE = const(10)
+BLUE = const(11)
+BROWN = const(12)
+GREEN = const(13)
+RED = const(14)
+BLACK = const(15)
+
+# alias
+white = WHITE
+orange = ORANGE
+magenta = MAGENTA
+lightblue = LIGHTBLUE
+yellow = YELLOW
+lime = LIME
+pink = PINK
+gray = GRAY
+silver = SILVER
+cyan = CYAN
+purple = PURPLE
+blue = BLUE
+brown = BROWN
+green = GREEN
+red = RED
+black = BLACK
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/component.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/component.py
new file mode 100644
index 00000000..ae31cd79
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/component.py
@@ -0,0 +1,184 @@
+from ucomponent import invoke, invokes, get_methods, get_doc, get_list, get_type, get_slot
+
+import event
+import sys
+
+_list = list
+
+__all__ = ['Component', 'is_available', 'get_primary', 'get_primary_checked', 'set_primary', 'guard']
+
+primaries = {}
+
+
+class ComponentMethod:
+    __slots__ = "component", "name"
+
+    def __init__(self, component, name):
+        self.component = component
+        self.name = name
+
+    def __call__(self, *args):
+        return invoke(self.component.address, self.name, *args)
+
+    def call(self, *args):
+        return self(*args)  # alias
+
+    def _call(self, *args):
+        return invokes(self.component.address, self.name, *args)
+
+    def _guard_call(self, count, *args):
+        return guard(self._call(*args), count)
+
+    @property
+    def __doc__(self):
+        return doc(self.component.address, self.name)
+
+    def __repr__(self):
+        doc = self.__doc__
+        if doc:
+            doc = "\n" + doc.replace(" -- ", "\n")
+        else:
+            doc = ""
+
+        return "ComponentMethod<{0!r}, {1!r}>{2}".format(self.component, self.name, doc)
+
+
+class Component:
+    __slots__ = "address",
+
+    def __init__(self, address):
+        self.address = address
+
+    def __bool__(self):
+        return self.type is not None
+
+    @property
+    def type(self):
+        return get_type(self.address)
+
+    @property
+    def slot(self):
+        return get_slot(self.address)
+
+    def __getattr__(self, name):
+        return ComponentMethod(self, name)
+
+    def __dir__(self):
+        return dir(object()) + ["address", "type", "slot"] + methods(self.address)
+
+    def __repr__(self):
+        if self:
+            return "Component<{0}:{1}>".format(self.type, self.address)
+        else:
+            return "Component<INVALID:{0}>".format(self.address)
+
+
+def doc(address: str, method: str):
+    return get_doc(address, method)
+
+
+# noinspection PyShadowingBuiltins
+def list(filter: str = None, exact: bool = True):
+    if filter is None:
+        return [proxy(address) for address in get_list()]
+    elif exact:
+        return [proxy(address) for address in get_list(filter)]
+    else:
+        return [proxy(address)
+                for address, component_type
+                in get_list().items()
+                if filter in component_type]
+
+
+def methods(address: str) -> _list:
+    return _list(get_methods(address))
+
+
+def proxy(address: str):
+    return Component(address)
+
+
+def type(address: str):
+    return get_type(address)
+
+
+def slot(address: str) -> int:
+    slot = get_slot(address)
+    return slot if slot is not None else -1
+
+
+def fields(address: str):
+    raise NotImplementedError
+
+
+def get(address: str, component_type: str):
+    size = len(address)
+
+    for addr, compType in get_list(component_type):
+        if addr[:size] == address:
+            return proxy(addr)
+
+    raise Exception("no such component")
+
+
+def is_available(component_type: str):
+    return primaries.get(component_type) is not None
+
+
+def get_primary(component_type: str) -> Component:
+    return primaries.get(component_type)
+
+
+def get_primary_checked(component_type: str) -> Component:
+    if not is_available(component_type):
+        raise Exception("no primary {!r} available".format(component_type))
+
+    return primaries[component_type]
+
+
+def load_primary(component_type: str) -> Component:
+    return primaries.get(component_type)
+
+
+def set_primary(component_type: str, address: str):
+    primaries[component_type] = proxy(address)
+
+
+def guard(result, count: int):
+    if isinstance(result, tuple):
+        return result + (None,) * (count - len(result))
+    else:
+        return (result,) + (None,) * (count - 1)
+
+
+@event.on("component_added")
+def on_component_added(_, address, component_type):
+    prev = primaries.get(component_type)
+    if prev is None:
+        primaries[component_type] = proxy(address)
+
+
+@event.on("component_removed")
+def on_component_removed(_, address, component_type):
+    prev = primaries.get(component_type)
+    if prev is not None and prev.address == address:
+        del primaries[component_type]
+
+
+def __getattr__(name: str) -> Component:
+    return get_primary_checked(name)
+
+
+def setup():
+    for address, component_type in get_list().items():
+        if not is_available(component_type):
+            set_primary(component_type, address)
+
+
+def import_component(component_type: str, module_name: str) -> Component:
+    component = get_primary(component_type)
+    if component is None:
+        del sys.modules[module_name]
+        raise ImportError("component {!r} is missing; import {!r} failed".format(component_type, module_name))
+
+    return component
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/computer.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/computer.py
new file mode 100644
index 00000000..19118160
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/computer.py
@@ -0,0 +1,88 @@
+# noinspection PyUnresolvedReferences
+import gc
+
+import ucomputer
+import utime
+
+
+def address():
+    return ucomputer.get_computer_address()
+
+
+def tmp_address():
+    return ucomputer.get_tmp_address()
+
+
+def free_memory():
+    return gc.mem_free()
+
+
+def total_memory():
+    return gc.mem_alloc() + gc.mem_free()
+
+
+def uptime():
+    return utime.time_up()
+
+
+def shutdown(reboot: bool = False):
+    if reboot:
+        ucomputer.reboot()
+    else:
+        ucomputer.shutdown()
+
+    raise NotImplementedError("invalid behavior")
+
+
+def reboot():
+    ucomputer.reboot()
+    raise NotImplementedError("invalid behavior")
+
+
+def get_boot_address() -> str:
+    import component
+
+    # noinspection PyUnresolvedReferences
+    eeprom = component.eeprom
+    return eeprom.getData().decode()
+
+
+def set_boot_address(address: str):
+    import component
+
+    # noinspection PyUnresolvedReferences
+    eeprom = component.eeprom
+    eeprom.setData(address.encode())
+
+
+def runlevel():
+    return 1
+
+
+def users():
+    return ucomputer.get_users()
+
+
+def add_user(user: str):
+    return ucomputer.add_user(user)
+
+
+def remove_user(user: str):
+    return ucomputer.remove_user(user)
+
+
+def push_signal(name, *args):
+    ucomputer.push_signal(name, *args)
+
+
+def pull_signal(seconds):
+    signal = ucomputer.pop_signal(int(seconds * 20))
+    if signal is None:
+        return None
+
+    name, args = signal
+    return (name,) + args
+
+
+def beep(frequency=None, duration=None):
+    return ucomputer.beep(frequency, duration)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/devfs.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/devfs.py
new file mode 100644
index 00000000..30e73a04
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/devfs.py
@@ -0,0 +1 @@
+raise NotImplementedError
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/event.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/event.py
new file mode 100644
index 00000000..7390b717
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/event.py
@@ -0,0 +1,260 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/event.lua
+# - src/main/resources/assets/opencomputers/loot/openos/lib/core/full_event.lua
+
+import sys
+
+import computer
+import machine
+import process
+from computer import pull_signal
+
+__all__ = ["Handlers", "on", "listen", "pull", "pull_filtered", "pull_multiple", "cancel", "ignore"]
+
+INF = sys.maxsize
+ALWAYS = None
+TIMER = False
+EMPTY = ()
+
+last_interrupt = -1
+
+keyboard = None
+
+
+class Handler:
+    def __init__(self, key, callback, interval: float = None, times=None):
+        self.key = key
+        self.callback = callback
+        self.times = times
+        self.interval = interval  # timer
+        self.timeout = None if interval is None else computer.uptime() + interval  # timer
+
+
+class Handlers:
+    def __init__(self):
+        self.callback_by_event = {}
+        self.registered = {}
+
+    def register(self, handler: Handler):
+        self.registered[id(handler)] = handler
+
+        seq = self.callback_by_event.setdefault(handler.key, [])
+        seq.append(handler)
+
+        return id(handler)
+
+    def unregister(self, handler: Handler):
+        handler = self.registered.pop(id(handler), None)
+        if handler is not None:
+            key = handler.key
+            seq = self.callback_by_event[key]  # type: list
+            seq.remove(handler)
+            if not seq:
+                del self.callback_by_event[key]
+
+        return handler is None
+
+    def iter_all_handlers(self):
+        return self.registered.values()
+
+    def get_handlers(self, name):
+        return self.callback_by_event.get(name) or EMPTY
+
+    def unregister_by_id(self, timer_id):
+        handler = self.registered.get(timer_id)
+        return self.unregister(handler)
+
+
+handlers = Handlers()
+
+
+def register(key, callback, interval=None, times=None, opt_handlers: Handlers = None):
+    opt_handlers = opt_handlers if opt_handlers is not None else handlers
+    handler = Handler(key, callback, interval, times)
+    return opt_handlers.register(handler)
+
+
+def create_plain_filter(name, *args):
+    def func(sname, *sargs):
+        if name != sname:
+            return False
+
+        for arg, sarg in zip(args, sargs):
+            if arg is not None and arg != sarg:
+                return False
+
+        return True
+
+    return func
+
+
+def create_multiple_filter(*args):
+    def func(name, *_):
+        for arg in args:
+            if arg == name:
+                return True
+
+        return False
+
+    return func
+
+
+def signal_handler(ticks):
+    global last_interrupt
+    current_time = computer.uptime()
+    interrupting = (
+            current_time - last_interrupt > 1 and
+            keyboard.is_control_down() and
+            keyboard.is_key_down(keyboard.KEYS.c)
+    ) if keyboard else False
+
+    if interrupting:
+        last_interrupt = current_time
+        if keyboard and keyboard.is_alt_down():
+            process.current_process().signal("INTERRUPTED")
+
+        push("interrupted", current_time)
+
+    removes = set()
+    signal = pull_signal(ticks)
+
+    def process_handler(etype, signal):
+        for handler in handlers.get_handlers(etype):  # type: Handler
+            is_timer = handler.timeout is not None
+            if is_timer and current_time < handler.timeout:
+                continue
+
+            if handler.times is not None:
+                handler.times -= 1
+                if handler.times <= 0:
+                    removes.add(handler)
+                elif handler.interval is not None:
+                    handler.timeout = current_time + handler.interval
+
+            try:
+                name, args = signal
+                result = handler.callback(name, *args)
+            except BaseException as e:
+                on_error(e)
+            else:
+                if result is False:
+                    removes.add(handler)
+
+    process_handler(TIMER, None)
+    for handler in removes:
+        handlers.unregister(handler)
+
+    if signal is None:
+        return
+
+    removes = set()
+    name, *args = signal
+    process_handler(ALWAYS, signal)
+    process_handler(name, signal)
+
+    for handler in removes:
+        handlers.unregister(handler)
+
+    return signal
+
+
+def on(name):
+    def wrapper(callback):
+        listen(name, callback)
+        return callback
+
+    return wrapper
+
+
+def listen(name, callback):
+    for handler in handlers.registered.values():
+        if handler.key == name and handler.callback == callback:
+            return False
+
+    return register(name, callback)
+
+
+def ignore(name, callback):
+    removes = set()
+    for handler in handlers.iter_all_handlers():
+        if handler.key == name and handler.callback == callback:
+            removes.add(handler)
+            break  # ..?
+
+    for handler in removes:
+        handlers.unregister(handler)
+
+    return len(removes) > 0
+
+
+def timer(interval: float, callback, times: int):
+    return register(TIMER, callback, interval, times)
+
+
+def cancel(timer_id):
+    return handlers.unregister_by_id(timer_id)
+
+
+def push(name, *args):
+    computer.push_signal(name, *args)
+
+
+def pull(first, *args):
+    if isinstance(first, str):
+        return pull_filtered(create_plain_filter(first, *args))
+    else:
+        return pull_filtered(first, create_plain_filter(*args))
+
+
+def pull_filtered(first, second=None):
+    seconds = INF
+    func = None
+
+    if callable(first) and second is None:
+        func = first
+    elif callable(second):
+        seconds = first
+        func = second
+
+    deadline = computer.uptime() + seconds
+    while computer.uptime() < deadline:
+        closest = deadline
+        for handler in handlers.iter_all_handlers():
+            if handler.timeout is not None:
+                closest = min(closest, handler.timeout)
+
+        signal = computer.pull_signal(closest - computer.uptime())
+        if signal is None:
+            continue
+
+        if func(*signal):
+            return signal
+
+
+def pull_multiple(first, *args):
+    if isinstance(first, int):
+        return pull_filtered(first, create_multiple_filter(*args))
+    else:
+        return pull_filtered(create_multiple_filter(first, *args))
+
+
+def on_error(e):
+    machine.debug("signal_handler exc => %s: %s" % (type(e).__name__, e))
+
+
+def setup():
+    global keyboard
+
+    def pull_signal(seconds):
+        ticks = sys.maxsize if seconds == INF else int(seconds * 20)
+        return signal_handler(ticks)
+
+    # noinspection PyUnresolvedReferences
+    import keyboard
+
+    computer.pull_signal = pull_signal
+    machine.hook_signal(signal_handler)
+
+
+def wait(ticks):
+    signal_handler(ticks)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/internet.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/internet.py
new file mode 100644
index 00000000..54a4d464
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/internet.py
@@ -0,0 +1,32 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# src/main/resources/assets/opencomputers/loot/openos/lib/internet.lua
+
+import component
+
+robot = component.import_component("robot", __name__)
+
+
+def request(url: str, data=None, headers: dict = None):
+    internet = component.get_primary("internet")
+
+    post = None
+    if data is None:
+        pass
+    elif isinstance(data, (str, bytes)):
+        post = data
+    elif isinstance(data, dict):
+        post = "&".join("{}={}".format(key, value) for key, value in data.items())
+    else:
+        raise TypeError
+
+    return internet.request(url, post, headers)
+
+
+def socket(address, port):
+    internet = component.get_primary("internet")
+    return internet.socket(address, port)
+
+
+def open(address, port):
+    internet = component.get_primary("internet")
+    return internet.open(address, port)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/keyboard.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/keyboard.py
new file mode 100644
index 00000000..00e0d715
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/keyboard.py
@@ -0,0 +1,236 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/keyboard.lua
+# - src/main/resources/assets/opencomputers/loot/openos/lib/core/full_keyboard.lua
+
+import component
+from micropython import const
+
+keyboards_pressed_chars = {}
+keyboards_pressed_codes = {}
+
+__all__ = ["get_keyboard_address", "get_pressed_codes", "get_pressed_chars",
+           "is_alt_down", "is_control", "is_control_down", "is_key_down", "is_shift_down"]
+
+_LMENU = const(0x38)
+_RMENU = const(0xB8)
+_LCONTROL = const(0x1D)
+_RCONTROL = const(0x9D)
+_LSHIFT = const(0x2A)
+_RSHIFT = const(0x36)
+
+
+class KEYS:
+    N1 = 0x02
+    N2 = 0x03
+    N3 = 0x04
+    N4 = 0x05
+    N5 = 0x06
+    N6 = 0x07
+    N7 = 0x08
+    N8 = 0x09
+    N9 = 0x0A
+    N0 = 0x0B
+    A = 0x1E
+    B = 0x30
+    C = 0x2E
+    D = 0x20
+    E = 0x12
+    F = 0x21
+    G = 0x22
+    H = 0x23
+    I = 0x17
+    J = 0x24
+    K = 0x25
+    L = 0x26
+    M = 0x32
+    N = 0x31
+    O = 0x18
+    P = 0x19
+    Q = 0x10
+    R = 0x13
+    S = 0x1F
+    T = 0x14
+    U = 0x16
+    V = 0x2F
+    W = 0x11
+    X = 0x2D
+    Y = 0x15
+    Z = 0x2C
+
+    APOSTROPHE = 0x28
+    AT = 0x91
+    BACK = 0x0E  # BACKSPACE
+    BACKSLASH = 0x2B
+    CAPITAL = 0x3A  # CAPSLOCK
+    COLON = 0x92
+    COMMA = 0x33
+    ENTER = 0x1C
+    EQUALS = 0x0D
+    GRAVE = 0x29  # ACCENT GRAVE
+    LBRACKET = 0x1A
+    LCONTROL = 0x1D
+    LMENU = 0x38  # LEFT ALT
+    LSHIFT = 0x2A
+    MINUS = 0x0C
+    NUMLOCK = 0x45
+    PAUSE = 0xC5
+    PERIOD = 0x34
+    RBRACKET = 0x1B
+    RCONTROL = 0x9D
+    RMENU = 0xB8  # RIGHT ALT
+    RSHIFT = 0x36
+    SCROLL = 0x46  # SCROLL LOCK
+    SEMICOLON = 0x27
+    SLASH = 0x35  # / ON MAIN KEYBOARD
+    SPACE = 0x39
+    STOP = 0x95
+    TAB = 0x0F
+    UNDERLINE = 0x93
+
+    # KEYPAD (AND NUMPAD WITH NUMLOCK OFF)
+    UP = 0xC8
+    DOWN = 0xD0
+    LEFT = 0xCB
+    RIGHT = 0xCD
+    HOME = 0xC7
+    END = 0xCF
+    PAGEUP = 0xC9
+    PAGEDOWN = 0xD1
+    INSERT = 0xD2
+    DELETE = 0xD3
+
+    # FUNCTION KEYS
+    F1 = 0x3B
+    F2 = 0x3C
+    F3 = 0x3D
+    F4 = 0x3E
+    F5 = 0x3F
+    F6 = 0x40
+    F7 = 0x41
+    F8 = 0x42
+    F9 = 0x43
+    F10 = 0x44
+    F11 = 0x57
+    F12 = 0x58
+    F13 = 0x64
+    F14 = 0x65
+    F15 = 0x66
+    F16 = 0x67
+    F17 = 0x68
+    F18 = 0x69
+    F19 = 0x71
+
+    # JAPANESE KEYBOARDS
+    KANA = 0x70
+    KANJI = 0x94
+    CONVERT = 0x79
+    NOCONVERT = 0x7B
+    YEN = 0x7D
+    CIRCUMFLEX = 0x90
+    AX = 0x96
+
+    # NUMPAD
+    NUMPAD0 = 0x52
+    NUMPAD1 = 0x4F
+    NUMPAD2 = 0x50
+    NUMPAD3 = 0x51
+    NUMPAD4 = 0x4B
+    NUMPAD5 = 0x4C
+    NUMPAD6 = 0x4D
+    NUMPAD7 = 0x47
+    NUMPAD8 = 0x48
+    NUMPAD9 = 0x49
+    NUMPADMUL = 0x37
+    NUMPADDIV = 0xB5
+    NUMPADSUB = 0x4A
+    NUMPADADD = 0x4E
+    NUMPADDECIMAL = 0x53
+    NUMPADCOMMA = 0xB3
+    NUMPADENTER = 0x9C
+    NUMPADEQUALS = 0x8D
+
+    def __init__(self):
+        index = {}
+        for name in dir(self):
+            value = getattr(self, name)
+            if isinstance(value, int):
+                if name.startswith("N") and len(name) == 2:
+                    index[name[1:]] = value
+                    index[name] = value
+                    index[value] = name
+                elif len(name) == 1:
+                    index[name.upper()] = value
+                    index[name.lower()] = value
+                    index[value] = name
+                else:
+                    index[name] = value
+                    index[value] = name
+
+        self._index = index
+
+    def __getitem__(self, item):
+        return self._index[item]
+
+
+KEYS = KEYS()
+
+assert _LMENU == KEYS.LMENU
+assert _RMENU == KEYS.RMENU
+assert _LCONTROL == KEYS.LCONTROL
+assert _RCONTROL == KEYS.RCONTROL
+assert _LSHIFT == KEYS.LSHIFT
+assert _RSHIFT == KEYS.RSHIFT
+
+
+def get_keyboard_address(address=None):
+    if address is not None:
+        return address
+
+    keyboard = component.get_primary("keyboard")
+    if keyboard is not None:
+        return keyboard.address
+
+    return None
+
+
+def get_pressed_codes(address=None):
+    address = get_keyboard_address(address)
+    return keyboards_pressed_codes.get(address) if address else None
+
+
+def get_pressed_chars(address=None):
+    address = get_keyboard_address(address)
+    return keyboards_pressed_chars.get(address) if address else None
+
+
+def is_alt_down(address=None) -> bool:
+    pressed_codes = get_pressed_codes(address)
+    return (pressed_codes.get(_LMENU) or pressed_codes.get(_RMENU)) \
+        if pressed_codes is not None else False
+
+
+def is_control(char: int) -> bool:
+    return ((char < 0x20) or (0x7F <= char <= 0x9F)) if isinstance(char, int) else False
+
+
+def is_control_down(address=None) -> bool:
+    pressed_codes = get_pressed_codes(address)
+    return pressed_codes.get(_LCONTROL) or pressed_codes.get(_RCONTROL) \
+        if pressed_codes is not None else False
+
+
+def is_key_down(char_or_code, address=None) -> bool:
+    if isinstance(char_or_code, str):
+        pressed_chars = get_pressed_chars(address)
+        return bool(pressed_chars.get(char_or_code)) \
+            if pressed_chars is not None else False
+    else:
+        pressed_codes = get_pressed_codes(address)
+        return bool(pressed_codes.get(char_or_code)) \
+            if pressed_codes is not None else False
+
+
+def is_shift_down(address=None):
+    pressed_codes = get_pressed_codes(address)
+    return pressed_codes.get(_LSHIFT) or pressed_codes.get(_RSHIFT) \
+        if pressed_codes is not None else False
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/note.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/note.py
new file mode 100644
index 00000000..4d3c64c3
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/note.py
@@ -0,0 +1,24 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/note.lua
+
+note = {}
+notes = {}  # The table that maps note names to their respective MIDI codes
+reverseNotes = {}  # The reversed table "notes"
+
+tempNotes = ["c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"]
+sNotes = ["a0", "a#0", "b0"]
+bNotes = ["bb0"]
+
+for i in range(1, 6 + 1):
+    for v in tempNotes:
+        sNotes.append(v + str(i))
+        if len(v) == 1 and v != "c" and v != "f":
+            bNotes.append(v + "b" + str(i))
+
+for v in range(21, 95 + 1):
+    k = sNotes[v - 20 - 1]
+    notes[k] = str(v)
+    reverseNotes[v] = k
+
+# TODO: ?
+raise NotImplementedError
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/pipe.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/pipe.py
new file mode 100644
index 00000000..da847436
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/pipe.py
@@ -0,0 +1,4 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/pipe.lua
+
+raise NotImplementedError
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/process.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/process.py
new file mode 100644
index 00000000..96870e41
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/process.py
@@ -0,0 +1,104 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/process.lua
+
+import sys
+
+
+class Process:
+    def __init__(self, pid, context, environ, parent, args):
+        self.pid = pid
+        self.context = context
+        self.environ = environ
+        self.parent = parent
+        self.args = args
+        self.tcwd = None
+
+
+_pid = 1
+_callbacks = []
+_current_process = _init_process = Process(
+    pid=_pid,
+    context={'__name__': '<init.py>', '__path__': '/init.py'},
+    environ={},
+    parent=None,
+    args=['/init.py'],
+)
+
+
+def spawn(path, *args, environ=None):
+    global _current_process, _pid
+
+    _pid += 1
+
+    proc = Process(
+        pid=_pid,
+        context={'__name__': '__main__', '__path__': path},
+        environ={} if environ is None else {},
+        parent=_current_process,
+        args=list(args),
+    )
+
+    _current_process = proc
+
+    for enter_callback, _ in _callbacks:
+        enter_callback(proc)
+
+    try:
+        # noinspection PyUnresolvedReferences
+        execfile(path, proc.context)
+    except SystemExit as e:
+        return e.code
+    finally:
+        for _, leave_callback in _callbacks:
+            leave_callback(proc)
+
+        _current_process = proc.parent
+
+    return 0
+
+
+def init_process():
+    return _init_process
+
+
+def current_process():
+    assert _current_process is not None
+    return _current_process
+
+
+def walk_process(proc=None):
+    proc = _current_process if proc is None else proc
+    assert proc is not None
+    while proc is not None and proc.parent is not proc:
+        yield proc
+        proc = proc.parent
+
+
+def _install_hook(enter_callback, leave_callback):
+    _callbacks.append((enter_callback, leave_callback))
+
+
+def _sys_getattr(name: str):
+    if name == "argv":
+        return _current_process.args
+
+    raise AttributeError("{!r} object has no attribute {!r}".format('module', name))
+
+
+def setup():
+    import os
+    import filesystem
+    sys.__getattr__ = _sys_getattr
+
+    def enter_proc(proc):
+        proc.parent.tcwd = os.getcwd()
+
+    def leave_proc(proc):
+        prev_forced_cd, filesystem.forced_cd = filesystem.forced_cd, True
+
+        try:
+            os.chdir(proc.parent.tcwd)
+        finally:
+            filesystem.forced_cd = prev_forced_cd
+
+    _install_hook(enter_proc, leave_proc)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/rc.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/rc.py
new file mode 100644
index 00000000..79658ca1
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/rc.py
@@ -0,0 +1,4 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/rc.lua
+
+loaded = {}
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/robot.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/robot.py
new file mode 100644
index 00000000..991a8727
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/robot.py
@@ -0,0 +1,338 @@
+from micropython import const
+
+import component
+import random
+from sides import *
+
+robot = component.import_component("robot", __name__)
+
+__all__ = [
+    # General
+    "name", "get_light_color", "set_light_color",
+
+    # World
+    "detect",
+
+    # Inventory
+    "slot", "compare", "drop", "place", "suck",
+
+    # Tool
+    "durability", "swing", "use",
+
+    # Movement
+    "move", "forward", "back", "up", "down", "turn",
+
+    # Tank
+    "tank", "compare_fluid", "drain", "fill",
+]
+
+# check sides.py
+DOWN = const(0)
+UP = const(1)
+BACK = const(2)
+FRONT = const(3)
+
+
+class SidedMethod:
+    def __init__(self, func):
+        self.func = func
+
+    def __doc__(self):
+        return self.func.__doc__
+
+    # __str__, __repr__ later
+
+    def __call__(self, *args):
+        return self.front(*args)
+
+    def front(self, *args):
+        return self.func(FRONT, *args)
+
+    def up(self, *args):
+        return self.func(UP, *args)
+
+    def down(self, *args):
+        return self.func(DOWN, *args)
+
+
+class SidedContextMethod:
+    def __init__(self, func, context_func):
+        self.func = func
+        self.context_func = context_func
+
+    def __doc__(self):
+        return self.func.__doc__
+
+    # __str__, __repr__ later
+
+    def __call__(self, *args):
+        return self.front(*args)
+
+    def front(self, *args):
+        with self.context_func():
+            return self.func(FRONT, *args)
+
+    def up(self, *args):
+        with self.context_func():
+            return self.func(UP, *args)
+
+    def down(self, *args):
+        with self.context_func():
+            return self.func(DOWN, *args)
+
+
+class RobotResult:
+    def __init__(self, result):
+        self.result = result
+
+    @property
+    def success(self):
+        return self.result[0]
+
+    @property
+    def reason(self):
+        return self.result[1] if len(self.result) >= 2 else None
+
+    def unwarp(self):
+        if not self:
+            raise Exception(self.reason)
+
+        return True
+
+    def __bool__(self):
+        return bool(self.success)
+
+    @classmethod
+    def proxy(cls, method):
+        # noinspection PyProtectedMember
+        func = method._call
+
+        return lambda *args: cls(func(*args))
+
+    def __repr__(self):
+        if self:
+            return "<Success>"
+        else:
+            if self.reason is None:
+                return "<Failure>"
+            else:
+                return "<Failure: {!r}>".format(self.reason)
+
+
+def all_slots():
+    pass
+
+
+# General
+def name():
+    return robot.name()
+
+
+def get_light_color() -> int:
+    return robot.getLightColor()
+
+
+def set_light_color(color: int):
+    return robot.setLightColor(color)
+
+
+# World
+detect = SidedMethod(robot.detect)
+
+
+# Inventory
+class SlotContext:
+    def __init__(self, slot):
+        self.slot = slot
+        self.selected = None
+
+    def __enter__(self):
+        self.selected = robot.select()
+        robot.select(self.slot)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        robot.select(self.slot)
+
+
+class Slot:
+    @classmethod
+    def selected(cls):
+        return robot.select()
+
+    @classmethod
+    def size(cls):
+        return robot.inventorySize()
+
+    def __init__(self, slot):
+        self.slot = slot
+
+    def __int__(self):
+        return self.slot
+
+    def select(self):
+        success = robot.select(self.slot) == self.slot
+        if success:
+            _SELECTED_SLOT = self.slot
+
+        return success
+
+    @property
+    def count(self):
+        return robot.count(self.slot)
+
+    @property
+    def space(self):
+        return robot.space(self.slot)
+
+    def context(self):
+        return SlotContext(self.slot)
+
+    def compare_to(self, another_slot):
+        with self.context():
+            return robot.compareTo(another_slot)
+
+    def transfer(self, new_slot, count: int):
+        with self.context():
+            return robot.transferTo(new_slot, count)
+
+    @property
+    def compare(self):
+        return SidedContextMethod(robot.compare, self.context)
+
+    @property
+    def drop(self):
+        return SidedContextMethod(robot.drop, self.context)
+
+    @property
+    def place(self):
+        return SidedContextMethod(robot.place, self.context)
+
+    @property
+    def suck(self):
+        return SidedContextMethod(robot.suck, self.context)
+
+
+slot = Slot
+
+# Inventory + World
+compare = SidedMethod(robot.compare)
+drop = SidedMethod(robot.drop)
+place = SidedMethod(RobotResult.proxy(robot.place))
+suck = SidedMethod(robot.suck)
+
+# Tool
+durability = robot.durability
+swing = SidedMethod(RobotResult.proxy(robot.swing))
+use = SidedMethod(RobotResult.proxy(robot.use))
+
+
+# Movement
+
+
+class Move:
+    def __init__(self):
+        self._move = RobotResult.proxy(robot.move)
+
+    def forward(self):
+        return self._move(FRONT)
+
+    def back(self):
+        return self._move(BACK)
+
+    def up(self):
+        return self._move(UP)
+
+    def down(self):
+        return self._move(DOWN)
+
+
+move = Move()
+forward = move.forward
+back = move.back
+up = move.up
+down = move.down
+
+
+class Turn:
+    def __init__(self):
+        self._turn = RobotResult.proxy(robot.turn)
+
+    def left(self):
+        return self._turn(False)
+
+    def right(self):
+        return self._turn(True)
+
+    def around(self):
+        turn = random.random() > 0.5
+        return self._turn(turn) and self._turn(turn)
+
+
+turn = Turn()
+
+
+# Tank
+class TankContext:
+    def __init__(self, tank):
+        self.tank = tank
+        self.selected = None
+
+    def __enter__(self):
+        self.selected = robot.selectTank()
+        robot.selectTank(self.tank)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        robot.selectTank(self.tank)
+
+
+class Tank:
+    @classmethod
+    def selected(cls):
+        return robot.selectTank()
+
+    def __init__(self, slot):
+        self.tank = slot
+
+    def __int__(self):
+        return self.tank
+
+    def select(self):
+        return robot.selectTank(self.tank) == self.tank
+
+    @property
+    def level(self):
+        return robot.tankLevel(self.tank)
+
+    @property
+    def space(self):
+        return robot.tankSpace(self.tank)
+
+    def context(self):
+        return TankContext(self.tank)
+
+    def compare_to(self, another_tank):
+        with self.context():
+            return robot.compareFluidTo(another_tank)
+
+    def transfer(self, new_tank, count: int):
+        with self.context():
+            return robot.transferFluidTo(new_tank, count)
+
+    @property
+    def compare(self):
+        return SidedContextMethod(robot.compareFluid, self.context)
+
+    @property
+    def drain(self):
+        return SidedContextMethod(robot.drain, self.context)
+
+    @property
+    def fill(self):
+        return SidedContextMethod(robot.fill, self.context)
+
+
+tank = Tank
+
+compare_fluid = SidedMethod(robot.compareFluid)
+drain = SidedMethod(robot.drain)
+fill = SidedMethod(robot.fill)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/serialization.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/serialization.py
new file mode 100644
index 00000000..36c200ff
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/serialization.py
@@ -0,0 +1,15 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/serialization.lua
+
+__all__ = ["serialize", "unserialize"]
+
+
+def serialize(value, pretty=False):
+    raise NotImplementedError
+
+
+def unserialize(value):
+    raise NotImplementedError
+
+
+raise NotImplementedError
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/shell.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/shell.py
new file mode 100644
index 00000000..b0b4b7ec
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/shell.py
@@ -0,0 +1,8 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/shell.lua
+# - src/main/resources/assets/opencomputers/loot/openos/lib/core/full_shell.lua
+
+
+from process import spawn
+
+__all__ = ["spawn"]
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/sides.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/sides.py
new file mode 100644
index 00000000..52637b6d
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/sides.py
@@ -0,0 +1,29 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/sides.lua
+
+from micropython import const
+
+__all__ = ["BOTTOM", "TOP", "BACK", "FRONT", "RIGHT", "LEFT"]
+
+BOTTOM = const(0)
+TOP = const(1)
+BACK = const(2)
+FRONT = const(3)
+RIGHT = const(4)
+LEFT = const(5)
+
+# alias
+NEGY = DOWN = BOTTOM
+POSY = UP = TOP
+NEGZ = NORTH = BACK
+POSZ = SOUTH = FORWARD = FRONT
+NEGX = WEST = RIGHT
+POSX = EAST = LEFT
+
+# alias for lowercase
+negy = down = bottom = BOTTOM
+posy = up = top = TOP
+negz = north = back = BACK
+posz = south = forward = front = FRONT
+negx = west = right = RIGHT
+posx = east = left = LEFT
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/term.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/term.py
new file mode 100644
index 00000000..5826e715
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/term.py
@@ -0,0 +1,2 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/term.lua
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/text.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/text.py
new file mode 100644
index 00000000..d9e33f05
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/text.py
@@ -0,0 +1,2 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/text.lua
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/tty.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/tty.py
new file mode 100644
index 00000000..012d9e9b
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/tty.py
@@ -0,0 +1,254 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/tty.lua
+import component
+import computer
+import event
+import re
+
+__all__ = "window",
+
+screen_cache = {}
+gpu_intercept = {}
+
+
+class Viewport:
+    def __init__(self, width, height, dx, dy, x, y):
+        self.width = width
+        self.height = height
+        self.dx = dx
+        self.dy = dy
+        self.x = x
+        self.y = y
+
+    def update_size(self, width, height):
+        self.width = width
+        self.height = height
+
+    def copy(self):
+        return Viewport(self.width, self.height, self.dx, self.dy, self.x, self.y)
+
+    def unpack(self):
+        return self.width, self.height, self.dx, self.dy, self.x, self.y
+
+
+class Window:
+    def __init__(self):
+        self.gpu = None
+        self.keyboard = None
+        self.viewport = Viewport(None, None, 0, 0, 0, 1)
+        self.fullscreen = True
+        self.blink = True
+        self.output_buffer = ""
+        self.nowarp = False
+
+    @property
+    def screen(self):
+        return self.gpu.getScreen() if self.gpu else None
+
+
+class Stream:
+    class Cursor:
+        def __init__(self, sy=0):
+            self.sy = sy
+            self.tails = {}
+
+    def read(self):
+        pass
+
+    def write(self, value):
+        segment_regex = re.compile("([\27\t\r\n\a\b])")
+
+        update_viewport()
+
+        gpu = window.gpu
+        viewport = window.viewport
+        window.output_buffer += value
+        cursor = self.Cursor()
+        beeped = False
+
+        while True:
+            cursor.sy += self.scroll()
+            if not window.output_buffer:
+                break
+
+            ansi_print = ""
+            x, y = get_cursor()
+
+            segment = window.output_buffer[:viewport.width]
+
+            m = segment_regex.search(buf)
+            segment = ansi_print + m.group(0) if m else buf
+            ei = m.end() if m else None
+            delim = m.group(1) if m else None
+
+            if segment:
+                gpu_x = x + viewport.dx
+                gpu_y = y + viewport.dy
+                tail = ""
+                wlen_needed = len(segment)
+                wlen_remaining = viewport.width - x + 1
+                if wlen_remaining < wlen_needed:
+                    segment = segment[:wlen_remaining]
+                    wlen_needed = len(segment)
+                    tail = " " if wlen_needed < wlen_remaining else ""
+                    cursor.tails[gpu_y - cursor.sy] = tail
+                    if not window.nowarp:
+                        ei = len(segment)
+                        delim = "\n"
+
+                gpu.set(gpu_x, gpu_y, segment + tail)
+                x += wlen_needed
+
+            window.output_buffer = (
+                window.output_buffer[ei:]
+                if ei
+                else window.output_buffer[viewport.width:]
+            )
+
+            if delim is None:
+                pass
+            elif delim == "\r":
+                x = 1
+            elif delim == "\n":
+                x = 1
+                y = y + 1
+            elif delim == "\b":
+                x = x - 1
+            elif delim == "\a" and not beeped:
+                computer.beep()
+                beeped = True
+            elif delim == "\27":
+                window.output_buffer = delim + window.output_buffer
+
+            set_cursor(x, y)
+
+        return cursor.sy
+
+    def clear(self):
+        pass
+
+    def scroll(self, lines=None):
+        viewport = window.viewport
+        if not lines:
+            if viewport.y < 1:
+                lines = viewport.y - 1
+            elif viewport.y > viewport.height:
+                lines = viewport.y - viewport.height
+            else:
+                return 0
+
+        gpu = window.gpu
+        if not gpu:
+            return 0
+
+        lines = max(min(lines, viewport.height), -viewport.height)
+
+        abs_lines = abs(lines)
+        box_height = viewport.height - abs_lines
+        fill_top = viewport.dy + 1 + (lines < 0 and 0 or box_height)
+
+        gpu.copy(viewport.dx + 1, viewport.dy + 1 + max(0, lines), viewport.width, box_height, 0, -lines)
+        gpu.fill(viewport.dx + 1, fill_top, viewport.width, abs_lines, ' ')
+
+        set_cursor(viewport.x, max(1, min(viewport.y, viewport.height)))
+        return lines
+
+
+window = Window()
+stream = Stream()
+
+
+def update_viewport():
+    screen = window.screen
+    if window.fullscreen and screen and not screen_cache.get(screen):
+        screen_cache[screen] = True
+        width, height = window.gpu.getViewport()
+        window.viewport.update_size(width, height)
+
+
+def get_viewport():
+    update_viewport()
+    return window.viewport.copy()
+
+
+def set_viewport(width, height, dx=0, dy=0, x=1, y=1):
+    window.viewport = Viewport(width, height, dx, dy, x, y)
+
+
+def gpu():
+    return window.gpu
+
+
+def clear():
+    stream.clear()
+    set_cursor(1, 1)
+
+
+def isAvailable():
+    return window.screen is not None
+
+
+def get_cursor():
+    viewport = window.viewport
+    return viewport.x, viewport.y
+
+
+def set_cursor(x, y):
+    viewport = window.viewport
+    viewport.x, viewport.y = x, y
+
+
+def bind(gpu):
+    if not gpu_intercept.get(gpu):
+        gpu_intercept[gpu] = True
+
+        _setResolution, _setViewport = gpu.setResolution, gpu.setViewport
+
+        def setResolution(*args):
+            screen_reset(gpu)
+            return _setResolution(*args)
+
+        def setViewport(*args):
+            screen_reset(gpu)
+            return _setViewport(*args)
+
+        gpu.setResolution = setResolution
+        gpu.setViewport = setViewport
+
+    if window.gpu is not gpu:
+        window.gpu = gpu
+        window.keyboard = None
+        update_viewport()
+
+    screen_reset(gpu)
+
+
+def keyboard():
+    if window.keyboard:
+        return window.keyboard
+
+    system_keyboard = component.get_primary("keyboard").address if component.is_available("keyboard") else None
+    screen = window.screen
+
+    if not screen:
+        return "no_system_keyboard"
+
+    if component.is_available("screen") and component.get_primary("screen").address == screen:
+        window.keyboard = system_keyboard
+    else:
+        window.keyboard = component.Component(screen).getKeyboards() or system_keyboard
+
+    return window.keyboard
+
+
+def screen():
+    return window.screen
+
+
+@event.on("screen_resized")
+def screen_reset_event(_name, gpu_addr, screen_addr):
+    screen_cache[gpu_addr] = None
+
+
+def screen_reset(gpu):
+    screen_cache[gpu.address] = None
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/uuid.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/uuid.py
new file mode 100644
index 00000000..282a4c93
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/uuid.py
@@ -0,0 +1,2 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/uuid.lua
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/openos/vt100.py b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/vt100.py
new file mode 100644
index 00000000..6c8dec04
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/openos/vt100.py
@@ -0,0 +1,2 @@
+# https://github.com/MightyPirates/OpenComputers/blob/master-MC1.12/
+# - src/main/resources/assets/opencomputers/loot/openos/lib/vt100.lua
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/os.py b/src/main/resources/assets/openpython/opos/v1.1/lib/os.py
new file mode 100644
index 00000000..01c13274
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/os.py
@@ -0,0 +1,102 @@
+import uos
+from ucollections import namedtuple
+from uos import *
+
+import ocpath as path
+from ocpath import curdir, pardir, sep, extsep, altsep, pathsep, defpath, devnull
+from process import current_process, walk_process
+
+__all__ = [
+    # uos.*
+    "chdir", "getcwd", "ilistdir", "listdir", "mkdir", "remove", "rmdir", "rename", "stat", "statvfs", "sync",
+    "urandom", "dupterm", "mount", "umount",
+    # ocpath
+    # "path",
+    # ocpath.*
+    "curdir", "pardir", "sep", "extsep", "altsep", "pathsep", "defpath", "devnull",
+    # os
+    "name", "Environ", "environ", "getenv", "path"
+]
+
+stat_result = namedtuple("os.stat_result", (
+    "st_mode", "st_ino", "st_dev", "st_nlink",
+    "st_uid", "st_gid",
+    "st_size",
+    "st_atime", "st_mtime", "st_ctime"
+))
+
+vfs_result = namedtuple("os.vfs_result", (
+    "f_bsize", "f_frsize",
+    "f_blocks", "f_bfree",
+    "f_bavail", "f_files",
+    "f_ffree", "f_favail",
+    "f_flag",
+    "f_namemax",
+))
+
+
+def stat(p):
+    r = uos.stat(p)
+    return stat_result(*r)
+
+
+def vfsstat(p):
+    r = uos.statvfs(p)
+    return vfs_result(*r)
+
+
+name = "oc"
+linesep = '\n'
+
+
+def listdir(p=None):
+    return uos.listdir(getcwd() if p is None else path.join(getcwd(), p))
+
+
+class Environ:
+    def __getitem__(self, item):
+        value = self.get(item)
+        if value is None:
+            raise KeyError(item)
+
+        return value
+
+    def __setitem__(self, key, value):
+        proc = current_process()
+        proc.environ[key] = value
+
+    def __delitem__(self, key):
+        for proc in walk_process():
+            if key in proc.environ:
+                proc = current_process()
+                proc.environ[key] = None
+                break
+
+    def __contains__(self, item):
+        value = self.get(item)
+        return value is not None
+
+    def get(self, key, default=None):
+        for proc in walk_process():
+            if key in proc.environ:
+                return proc.environ[key]
+
+        return default
+
+
+environ = Environ()
+
+
+def getenv(name):
+    return environ.get(name)
+
+
+def setup():
+    from process import init_process
+    proc = init_process()
+    proc.environ = {
+        "TMPDIR": "/tmp",
+        "PATH": defpath,
+        "HOME": "/home",
+        "_": "/init.py",
+    }
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/urllib/parse.py b/src/main/resources/assets/openpython/opos/v1.1/lib/urllib/parse.py
new file mode 100644
index 00000000..17734bb4
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/urllib/parse.py
@@ -0,0 +1,973 @@
+"""Parse (absolute and relative) URLs.
+
+urlparse module is based upon the following RFC specifications.
+
+RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
+and L.  Masinter, January 2005.
+
+RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
+and L.Masinter, December 1999.
+
+RFC 2396:  "Uniform Resource Identifiers (URI)": Generic Syntax by T.
+Berners-Lee, R. Fielding, and L. Masinter, August 1998.
+
+RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
+
+RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
+1995.
+
+RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
+McCahill, December 1994
+
+RFC 3986 is considered the current standard and any future changes to
+urlparse module should conform with it.  The urlparse module is
+currently not entirely compliant with this RFC due to defacto
+scenarios for parsing, and for backward compatibility purposes, some
+parsing quirks from older RFCs are retained. The testcases in
+test_urlparse.py provides a good indicator of parsing behavior.
+"""
+
+import re
+import collections
+
+__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
+           "urlsplit", "urlunsplit", "urlencode", "parse_qs",
+           "parse_qsl", "quote", "quote_plus", "quote_from_bytes",
+           "unquote", "unquote_plus", "unquote_to_bytes"]
+
+# A classification of schemes ('' means apply by default)
+uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
+                 'wais', 'file', 'https', 'shttp', 'mms',
+                 'prospero', 'rtsp', 'rtspu', '', 'sftp',
+                 'svn', 'svn+ssh']
+uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
+               'imap', 'wais', 'file', 'mms', 'https', 'shttp',
+               'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
+               'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
+uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
+               'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
+               'mms', '', 'sftp', 'tel']
+
+# These are not actually used anymore, but should stay for backwards
+# compatibility.  (They are undocumented, but have a public-looking name.)
+non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
+                    'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
+uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
+              'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
+uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
+                 'nntp', 'wais', 'https', 'shttp', 'snews',
+                 'file', 'prospero', '']
+
+# Characters valid in scheme names
+scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
+                'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                '0123456789'
+                '+-.')
+
+# XXX: Consider replacing with functools.lru_cache
+MAX_CACHE_SIZE = 20
+_parse_cache = {}
+
+def clear_cache():
+    """Clear the parse cache and the quoters cache."""
+    _parse_cache.clear()
+    _safe_quoters.clear()
+
+
+# Helpers for bytes handling
+# For 3.2, we deliberately require applications that
+# handle improperly quoted URLs to do their own
+# decoding and encoding. If valid use cases are
+# presented, we may relax this by using latin-1
+# decoding internally for 3.3
+_implicit_encoding = 'ascii'
+_implicit_errors = 'strict'
+
+def _noop(obj):
+    return obj
+
+def _encode_result(obj, encoding=_implicit_encoding,
+                        errors=_implicit_errors):
+    return obj.encode(encoding, errors)
+
+def _decode_args(args, encoding=_implicit_encoding,
+                       errors=_implicit_errors):
+    return tuple(x.decode(encoding, errors) if x else '' for x in args)
+
+def _coerce_args(*args):
+    # Invokes decode if necessary to create str args
+    # and returns the coerced inputs along with
+    # an appropriate result coercion function
+    #   - noop for str inputs
+    #   - encoding function otherwise
+    str_input = isinstance(args[0], str)
+    for arg in args[1:]:
+        # We special-case the empty string to support the
+        # "scheme=''" default argument to some functions
+        if arg and isinstance(arg, str) != str_input:
+            raise TypeError("Cannot mix str and non-str arguments")
+    if str_input:
+        return args + (_noop,)
+    return _decode_args(args) + (_encode_result,)
+
+# Result objects are more helpful than simple tuples
+class _ResultMixinStr(object):
+    """Standard approach to encoding parsed results from str to bytes"""
+    __slots__ = ()
+
+    def encode(self, encoding='ascii', errors='strict'):
+        return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
+
+
+class _ResultMixinBytes(object):
+    """Standard approach to decoding parsed results from bytes to str"""
+    __slots__ = ()
+
+    def decode(self, encoding='ascii', errors='strict'):
+        return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
+
+
+class _NetlocResultMixinBase(object):
+    """Shared methods for the parsed result objects containing a netloc element"""
+    __slots__ = ()
+
+    @property
+    def username(self):
+        return self._userinfo[0]
+
+    @property
+    def password(self):
+        return self._userinfo[1]
+
+    @property
+    def hostname(self):
+        hostname = self._hostinfo[0]
+        if not hostname:
+            hostname = None
+        elif hostname is not None:
+            hostname = hostname.lower()
+        return hostname
+
+    @property
+    def port(self):
+        port = self._hostinfo[1]
+        if port is not None:
+            port = int(port, 10)
+            # Return None on an illegal port
+            if not ( 0 <= port <= 65535):
+                return None
+        return port
+
+
+class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
+    __slots__ = ()
+
+    @property
+    def _userinfo(self):
+        netloc = self.netloc
+        userinfo, have_info, hostinfo = netloc.rpartition('@')
+        if have_info:
+            username, have_password, password = userinfo.partition(':')
+            if not have_password:
+                password = None
+        else:
+            username = password = None
+        return username, password
+
+    @property
+    def _hostinfo(self):
+        netloc = self.netloc
+        _, _, hostinfo = netloc.rpartition('@')
+        _, have_open_br, bracketed = hostinfo.partition('[')
+        if have_open_br:
+            hostname, _, port = bracketed.partition(']')
+            _, have_port, port = port.partition(':')
+        else:
+            hostname, have_port, port = hostinfo.partition(':')
+        if not have_port:
+            port = None
+        return hostname, port
+
+
+class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
+    __slots__ = ()
+
+    @property
+    def _userinfo(self):
+        netloc = self.netloc
+        userinfo, have_info, hostinfo = netloc.rpartition(b'@')
+        if have_info:
+            username, have_password, password = userinfo.partition(b':')
+            if not have_password:
+                password = None
+        else:
+            username = password = None
+        return username, password
+
+    @property
+    def _hostinfo(self):
+        netloc = self.netloc
+        _, _, hostinfo = netloc.rpartition(b'@')
+        _, have_open_br, bracketed = hostinfo.partition(b'[')
+        if have_open_br:
+            hostname, _, port = bracketed.partition(b']')
+            _, have_port, port = port.partition(b':')
+        else:
+            hostname, have_port, port = hostinfo.partition(b':')
+        if not have_port:
+            port = None
+        return hostname, port
+
+
+from collections import namedtuple
+
+_DefragResultBase = namedtuple('DefragResult', 'url fragment')
+_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
+_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
+
+# For backwards compatibility, alias _NetlocResultMixinStr
+# ResultBase is no longer part of the documented API, but it is
+# retained since deprecating it isn't worth the hassle
+ResultBase = _NetlocResultMixinStr
+
+# Structured result objects for string data
+class DefragResult(_DefragResultBase, _ResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        if self.fragment:
+            return self.url + '#' + self.fragment
+        else:
+            return self.url
+
+class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        return urlunsplit(self)
+
+class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
+    __slots__ = ()
+    def geturl(self):
+        return urlunparse(self)
+
+# Structured result objects for bytes data
+class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        if self.fragment:
+            return self.url + b'#' + self.fragment
+        else:
+            return self.url
+
+class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        return urlunsplit(self)
+
+class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
+    __slots__ = ()
+    def geturl(self):
+        return urlunparse(self)
+
+# Set up the encode/decode result pairs
+def _fix_result_transcoding():
+    _result_pairs = (
+        (DefragResult, DefragResultBytes),
+        (SplitResult, SplitResultBytes),
+        (ParseResult, ParseResultBytes),
+    )
+    for _decoded, _encoded in _result_pairs:
+        _decoded._encoded_counterpart = _encoded
+        _encoded._decoded_counterpart = _decoded
+
+_fix_result_transcoding()
+del _fix_result_transcoding
+
+def urlparse(url, scheme='', allow_fragments=True):
+    """Parse a URL into 6 components:
+    <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
+    Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    url, scheme, _coerce_result = _coerce_args(url, scheme)
+    splitresult = urlsplit(url, scheme, allow_fragments)
+    scheme, netloc, url, query, fragment = splitresult
+    if scheme in uses_params and ';' in url:
+        url, params = _splitparams(url)
+    else:
+        params = ''
+    result = ParseResult(scheme, netloc, url, params, query, fragment)
+    return _coerce_result(result)
+
+def _splitparams(url):
+    if '/'  in url:
+        i = url.find(';', url.rfind('/'))
+        if i < 0:
+            return url, ''
+    else:
+        i = url.find(';')
+    return url[:i], url[i+1:]
+
+def _splitnetloc(url, start=0):
+    delim = len(url)   # position of end of domain part of url, default is end
+    for c in '/?#':    # look for delimiters; the order is NOT important
+        wdelim = url.find(c, start)        # find first of this delim
+        if wdelim >= 0:                    # if found
+            delim = min(delim, wdelim)     # use earliest delim position
+    return url[start:delim], url[delim:]   # return (domain, rest)
+
+def urlsplit(url, scheme='', allow_fragments=True):
+    """Parse a URL into 5 components:
+    <scheme>://<netloc>/<path>?<query>#<fragment>
+    Return a 5-tuple: (scheme, netloc, path, query, fragment).
+    Note that we don't break the components up in smaller bits
+    (e.g. netloc is a single string) and we don't expand % escapes."""
+    url, scheme, _coerce_result = _coerce_args(url, scheme)
+    allow_fragments = bool(allow_fragments)
+    key = url, scheme, allow_fragments, type(url), type(scheme)
+    cached = _parse_cache.get(key, None)
+    if cached:
+        return _coerce_result(cached)
+    if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
+        clear_cache()
+    netloc = query = fragment = ''
+    i = url.find(':')
+    if i > 0:
+        if url[:i] == 'http': # optimize the common case
+            scheme = url[:i].lower()
+            url = url[i+1:]
+            if url[:2] == '//':
+                netloc, url = _splitnetloc(url, 2)
+                if (('[' in netloc and ']' not in netloc) or
+                        (']' in netloc and '[' not in netloc)):
+                    raise ValueError("Invalid IPv6 URL")
+            if allow_fragments and '#' in url:
+                url, fragment = url.split('#', 1)
+            if '?' in url:
+                url, query = url.split('?', 1)
+            v = SplitResult(scheme, netloc, url, query, fragment)
+            _parse_cache[key] = v
+            return _coerce_result(v)
+        for c in url[:i]:
+            if c not in scheme_chars:
+                break
+        else:
+            # make sure "url" is not actually a port number (in which case
+            # "scheme" is really part of the path)
+            rest = url[i+1:]
+            if not rest or any(c not in '0123456789' for c in rest):
+                # not a port number
+                scheme, url = url[:i].lower(), rest
+
+    if url[:2] == '//':
+        netloc, url = _splitnetloc(url, 2)
+        if (('[' in netloc and ']' not in netloc) or
+                (']' in netloc and '[' not in netloc)):
+            raise ValueError("Invalid IPv6 URL")
+    if allow_fragments and '#' in url:
+        url, fragment = url.split('#', 1)
+    if '?' in url:
+        url, query = url.split('?', 1)
+    v = SplitResult(scheme, netloc, url, query, fragment)
+    _parse_cache[key] = v
+    return _coerce_result(v)
+
+def urlunparse(components):
+    """Put a parsed URL back together again.  This may result in a
+    slightly different, but equivalent URL, if the URL that was parsed
+    originally had redundant delimiters, e.g. a ? with an empty query
+    (the draft states that these are equivalent)."""
+    scheme, netloc, url, params, query, fragment, _coerce_result = (
+                                                  _coerce_args(*components))
+    if params:
+        url = "%s;%s" % (url, params)
+    return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
+
+def urlunsplit(components):
+    """Combine the elements of a tuple as returned by urlsplit() into a
+    complete URL as a string. The data argument can be any five-item iterable.
+    This may result in a slightly different, but equivalent URL, if the URL that
+    was parsed originally had unnecessary delimiters (for example, a ? with an
+    empty query; the RFC states that these are equivalent)."""
+    scheme, netloc, url, query, fragment, _coerce_result = (
+                                          _coerce_args(*components))
+    if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
+        if url and url[:1] != '/': url = '/' + url
+        url = '//' + (netloc or '') + url
+    if scheme:
+        url = scheme + ':' + url
+    if query:
+        url = url + '?' + query
+    if fragment:
+        url = url + '#' + fragment
+    return _coerce_result(url)
+
+def urljoin(base, url, allow_fragments=True):
+    """Join a base URL and a possibly relative URL to form an absolute
+    interpretation of the latter."""
+    if not base:
+        return url
+    if not url:
+        return base
+    base, url, _coerce_result = _coerce_args(base, url)
+    bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
+            urlparse(base, '', allow_fragments)
+    scheme, netloc, path, params, query, fragment = \
+            urlparse(url, bscheme, allow_fragments)
+    if scheme != bscheme or scheme not in uses_relative:
+        return _coerce_result(url)
+    if scheme in uses_netloc:
+        if netloc:
+            return _coerce_result(urlunparse((scheme, netloc, path,
+                                              params, query, fragment)))
+        netloc = bnetloc
+    if path[:1] == '/':
+        return _coerce_result(urlunparse((scheme, netloc, path,
+                                          params, query, fragment)))
+    if not path and not params:
+        path = bpath
+        params = bparams
+        if not query:
+            query = bquery
+        return _coerce_result(urlunparse((scheme, netloc, path,
+                                          params, query, fragment)))
+    segments = bpath.split('/')[:-1] + path.split('/')
+    # XXX The stuff below is bogus in various ways...
+    if segments[-1] == '.':
+        segments[-1] = ''
+    while '.' in segments:
+        segments.remove('.')
+    while 1:
+        i = 1
+        n = len(segments) - 1
+        while i < n:
+            if (segments[i] == '..'
+                and segments[i-1] not in ('', '..')):
+                del segments[i-1:i+1]
+                break
+            i = i+1
+        else:
+            break
+    if segments == ['', '..']:
+        segments[-1] = ''
+    elif len(segments) >= 2 and segments[-1] == '..':
+        segments[-2:] = ['']
+    return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
+                                      params, query, fragment)))
+
+def urldefrag(url):
+    """Removes any existing fragment from URL.
+
+    Returns a tuple of the defragmented URL and the fragment.  If
+    the URL contained no fragments, the second element is the
+    empty string.
+    """
+    url, _coerce_result = _coerce_args(url)
+    if '#' in url:
+        s, n, p, a, q, frag = urlparse(url)
+        defrag = urlunparse((s, n, p, a, q, ''))
+    else:
+        frag = ''
+        defrag = url
+    return _coerce_result(DefragResult(defrag, frag))
+
+_hexdig = '0123456789ABCDEFabcdef'
+_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
+              for a in _hexdig for b in _hexdig}
+
+def unquote_to_bytes(string):
+    """unquote_to_bytes('abc%20def') -> b'abc def'."""
+    # Note: strings are encoded as UTF-8. This is only an issue if it contains
+    # unescaped non-ASCII characters, which URIs should not.
+    if not string:
+        # Is it a string-like object?
+        string.split
+        return b''
+    if isinstance(string, str):
+        string = string.encode('utf-8')
+    bits = string.split(b'%')
+    if len(bits) == 1:
+        return string
+    res = [bits[0]]
+    append = res.append
+    for item in bits[1:]:
+        try:
+            append(_hextobyte[item[:2]])
+            append(item[2:])
+        except KeyError:
+            append(b'%')
+            append(item)
+    return b''.join(res)
+
+_asciire = re.compile(r'([\x00-\x7f]+)')
+
+def unquote(string, encoding='utf-8', errors='replace'):
+    """Replace %xx escapes by their single-character equivalent. The optional
+    encoding and errors parameters specify how to decode percent-encoded
+    sequences into Unicode characters, as accepted by the bytes.decode()
+    method.
+    By default, percent-encoded sequences are decoded with UTF-8, and invalid
+    sequences are replaced by a placeholder character.
+
+    unquote('abc%20def') -> 'abc def'.
+    """
+    if '%' not in string:
+        string.split
+        return string
+    if encoding is None:
+        encoding = 'utf-8'
+    if errors is None:
+        errors = 'replace'
+    bits = _asciire.split(string)
+    res = [bits[0]]
+    append = res.append
+    for i in range(1, len(bits), 2):
+        append(unquote_to_bytes(bits[i]).decode(encoding, errors))
+        append(bits[i + 1])
+    return ''.join(res)
+
+def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
+             encoding='utf-8', errors='replace'):
+    """Parse a query given as a string argument.
+
+        Arguments:
+
+        qs: percent-encoded query string to be parsed
+
+        keep_blank_values: flag indicating whether blank values in
+            percent-encoded queries should be treated as blank strings.
+            A true value indicates that blanks should be retained as
+            blank strings.  The default false value indicates that
+            blank values are to be ignored and treated as if they were
+            not included.
+
+        strict_parsing: flag indicating what to do with parsing errors.
+            If false (the default), errors are silently ignored.
+            If true, errors raise a ValueError exception.
+
+        encoding and errors: specify how to decode percent-encoded sequences
+            into Unicode characters, as accepted by the bytes.decode() method.
+    """
+    parsed_result = {}
+    pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
+                      encoding=encoding, errors=errors)
+    for name, value in pairs:
+        if name in parsed_result:
+            parsed_result[name].append(value)
+        else:
+            parsed_result[name] = [value]
+    return parsed_result
+
+def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
+              encoding='utf-8', errors='replace'):
+    """Parse a query given as a string argument.
+
+    Arguments:
+
+    qs: percent-encoded query string to be parsed
+
+    keep_blank_values: flag indicating whether blank values in
+        percent-encoded queries should be treated as blank strings.  A
+        true value indicates that blanks should be retained as blank
+        strings.  The default false value indicates that blank values
+        are to be ignored and treated as if they were  not included.
+
+    strict_parsing: flag indicating what to do with parsing errors. If
+        false (the default), errors are silently ignored. If true,
+        errors raise a ValueError exception.
+
+    encoding and errors: specify how to decode percent-encoded sequences
+        into Unicode characters, as accepted by the bytes.decode() method.
+
+    Returns a list, as G-d intended.
+    """
+    qs, _coerce_result = _coerce_args(qs)
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
+    r = []
+    for name_value in pairs:
+        if not name_value and not strict_parsing:
+            continue
+        nv = name_value.split('=', 1)
+        if len(nv) != 2:
+            if strict_parsing:
+                raise ValueError("bad query field: %r" % (name_value,))
+            # Handle case of a control-name with no equal sign
+            if keep_blank_values:
+                nv.append('')
+            else:
+                continue
+        if len(nv[1]) or keep_blank_values:
+            name = nv[0].replace('+', ' ')
+            name = unquote(name, encoding=encoding, errors=errors)
+            name = _coerce_result(name)
+            value = nv[1].replace('+', ' ')
+            value = unquote(value, encoding=encoding, errors=errors)
+            value = _coerce_result(value)
+            r.append((name, value))
+    return r
+
+def unquote_plus(string, encoding='utf-8', errors='replace'):
+    """Like unquote(), but also replace plus signs by spaces, as required for
+    unquoting HTML form values.
+
+    unquote_plus('%7e/abc+def') -> '~/abc def'
+    """
+    string = string.replace('+', ' ')
+    return unquote(string, encoding, errors)
+
+_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                         b'abcdefghijklmnopqrstuvwxyz'
+                         b'0123456789'
+                         b'_.-')
+_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
+_safe_quoters = {}
+
+class Quoter(collections.defaultdict):
+    """A mapping from bytes (in range(0,256)) to strings.
+
+    String values are percent-encoded byte values, unless the key < 128, and
+    in the "safe" set (either the specified safe set, or default set).
+    """
+    # Keeps a cache internally, using defaultdict, for efficiency (lookups
+    # of cached keys don't call Python code at all).
+    def __init__(self, safe):
+        """safe: bytes object."""
+        self.safe = _ALWAYS_SAFE.union(safe)
+
+    def __repr__(self):
+        # Without this, will just display as a defaultdict
+        return "<Quoter %r>" % dict(self)
+
+    def __missing__(self, b):
+        # Handle a cache miss. Store quoted string in cache and return.
+        res = chr(b) if b in self.safe else '%{:02X}'.format(b)
+        self[b] = res
+        return res
+
+def quote(string, safe='/', encoding=None, errors=None):
+    """quote('abc def') -> 'abc%20def'
+
+    Each part of a URL, e.g. the path info, the query, etc., has a
+    different set of reserved characters that must be quoted.
+
+    RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
+    the following reserved characters.
+
+    reserved    = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
+                  "$" | ","
+
+    Each of these characters is reserved in some component of a URL,
+    but not necessarily in all of them.
+
+    By default, the quote function is intended for quoting the path
+    section of a URL.  Thus, it will not encode '/'.  This character
+    is reserved, but in typical usage the quote function is being
+    called on a path where the existing slash characters are used as
+    reserved characters.
+
+    string and safe may be either str or bytes objects. encoding must
+    not be specified if string is a str.
+
+    The optional encoding and errors parameters specify how to deal with
+    non-ASCII characters, as accepted by the str.encode method.
+    By default, encoding='utf-8' (characters are encoded with UTF-8), and
+    errors='strict' (unsupported characters raise a UnicodeEncodeError).
+    """
+    if isinstance(string, str):
+        if not string:
+            return string
+        if encoding is None:
+            encoding = 'utf-8'
+        if errors is None:
+            errors = 'strict'
+        string = string.encode(encoding, errors)
+    else:
+        if encoding is not None:
+            raise TypeError("quote() doesn't support 'encoding' for bytes")
+        if errors is not None:
+            raise TypeError("quote() doesn't support 'errors' for bytes")
+    return quote_from_bytes(string, safe)
+
+def quote_plus(string, safe='', encoding=None, errors=None):
+    """Like quote(), but also replace ' ' with '+', as required for quoting
+    HTML form values. Plus signs in the original string are escaped unless
+    they are included in safe. It also does not have safe default to '/'.
+    """
+    # Check if ' ' in string, where string may either be a str or bytes.  If
+    # there are no spaces, the regular quote will produce the right answer.
+    if ((isinstance(string, str) and ' ' not in string) or
+        (isinstance(string, bytes) and b' ' not in string)):
+        return quote(string, safe, encoding, errors)
+    if isinstance(safe, str):
+        space = ' '
+    else:
+        space = b' '
+    string = quote(string, safe + space, encoding, errors)
+    return string.replace(' ', '+')
+
+def quote_from_bytes(bs, safe='/'):
+    """Like quote(), but accepts a bytes object rather than a str, and does
+    not perform string-to-bytes encoding.  It always returns an ASCII string.
+    quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
+    """
+    if not isinstance(bs, (bytes, bytearray)):
+        raise TypeError("quote_from_bytes() expected bytes")
+    if not bs:
+        return ''
+    if isinstance(safe, str):
+        # Normalize 'safe' by converting to bytes and removing non-ASCII chars
+        safe = safe.encode('ascii', 'ignore')
+    else:
+        safe = bytes([c for c in safe if c < 128])
+    if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
+        return bs.decode()
+    try:
+        quoter = _safe_quoters[safe]
+    except KeyError:
+        _safe_quoters[safe] = quoter = Quoter(safe).__getitem__
+    return ''.join([quoter(char) for char in bs])
+
+def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
+    """Encode a dict or sequence of two-element tuples into a URL query string.
+
+    If any values in the query arg are sequences and doseq is true, each
+    sequence element is converted to a separate parameter.
+
+    If the query arg is a sequence of two-element tuples, the order of the
+    parameters in the output will match the order of parameters in the
+    input.
+
+    The components of a query arg may each be either a string or a bytes type.
+    When a component is a string, the safe, encoding and error parameters are
+    sent to the quote_plus function for encoding.
+    """
+
+    if hasattr(query, "items"):
+        query = query.items()
+    else:
+        # It's a bother at times that strings and string-like objects are
+        # sequences.
+        try:
+            # non-sequence items should not work with len()
+            # non-empty strings will fail this
+            if len(query) and not isinstance(query[0], tuple):
+                raise TypeError
+            # Zero-length sequences of all types will get here and succeed,
+            # but that's a minor nit.  Since the original implementation
+            # allowed empty dicts that type of behavior probably should be
+            # preserved for consistency
+        except TypeError:
+#            ty, va, tb = sys.exc_info()
+            raise TypeError("not a valid non-string sequence "
+                            "or mapping object")#.with_traceback(tb)
+
+    l = []
+    if not doseq:
+        for k, v in query:
+            if isinstance(k, bytes):
+                k = quote_plus(k, safe)
+            else:
+                k = quote_plus(str(k), safe, encoding, errors)
+
+            if isinstance(v, bytes):
+                v = quote_plus(v, safe)
+            else:
+                v = quote_plus(str(v), safe, encoding, errors)
+            l.append(k + '=' + v)
+    else:
+        for k, v in query:
+            if isinstance(k, bytes):
+                k = quote_plus(k, safe)
+            else:
+                k = quote_plus(str(k), safe, encoding, errors)
+
+            if isinstance(v, bytes):
+                v = quote_plus(v, safe)
+                l.append(k + '=' + v)
+            elif isinstance(v, str):
+                v = quote_plus(v, safe, encoding, errors)
+                l.append(k + '=' + v)
+            else:
+                try:
+                    # Is this a sufficient test for sequence-ness?
+                    x = len(v)
+                except TypeError:
+                    # not a sequence
+                    v = quote_plus(str(v), safe, encoding, errors)
+                    l.append(k + '=' + v)
+                else:
+                    # loop over the sequence
+                    for elt in v:
+                        if isinstance(elt, bytes):
+                            elt = quote_plus(elt, safe)
+                        else:
+                            elt = quote_plus(str(elt), safe, encoding, errors)
+                        l.append(k + '=' + elt)
+    return '&'.join(l)
+
+# Utilities to parse URLs (most of these return None for missing parts):
+# unwrap('<URL:type://host/path>') --> 'type://host/path'
+# splittype('type:opaquestring') --> 'type', 'opaquestring'
+# splithost('//host[:port]/path') --> 'host[:port]', '/path'
+# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
+# splitpasswd('user:passwd') -> 'user', 'passwd'
+# splitport('host:port') --> 'host', 'port'
+# splitquery('/path?query') --> '/path', 'query'
+# splittag('/path#tag') --> '/path', 'tag'
+# splitattr('/path;attr1=value1;attr2=value2;...') ->
+#   '/path', ['attr1=value1', 'attr2=value2', ...]
+# splitvalue('attr=value') --> 'attr', 'value'
+# urllib.parse.unquote('abc%20def') -> 'abc def'
+# quote('abc def') -> 'abc%20def')
+
+def to_bytes(url):
+    """to_bytes(u"URL") --> 'URL'."""
+    # Most URL schemes require ASCII. If that changes, the conversion
+    # can be relaxed.
+    # XXX get rid of to_bytes()
+    if isinstance(url, str):
+        try:
+            url = url.encode("ASCII").decode()
+        except UnicodeError:
+            raise UnicodeError("URL " + repr(url) +
+                               " contains non-ASCII characters")
+    return url
+
+def unwrap(url):
+    """unwrap('<URL:type://host/path>') --> 'type://host/path'."""
+    url = str(url).strip()
+    if url[:1] == '<' and url[-1:] == '>':
+        url = url[1:-1].strip()
+    if url[:4] == 'URL:': url = url[4:].strip()
+    return url
+
+_typeprog = None
+def splittype(url):
+    """splittype('type:opaquestring') --> 'type', 'opaquestring'."""
+    global _typeprog
+    if _typeprog is None:
+        import re
+        _typeprog = re.compile('^([^/:]+):')
+
+    match = _typeprog.match(url)
+    if match:
+        scheme = match.group(1)
+        return scheme.lower(), url[len(scheme) + 1:]
+    return None, url
+
+_hostprog = None
+def splithost(url):
+    """splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
+    global _hostprog
+    if _hostprog is None:
+        import re
+        _hostprog = re.compile('^//([^/?]*)(.*)$')
+
+    match = _hostprog.match(url)
+    if match:
+        host_port = match.group(1)
+        path = match.group(2)
+        if path and not path.startswith('/'):
+            path = '/' + path
+        return host_port, path
+    return None, url
+
+_userprog = None
+def splituser(host):
+    """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+    global _userprog
+    if _userprog is None:
+        import re
+        _userprog = re.compile('^(.*)@(.*)$')
+
+    match = _userprog.match(host)
+    if match: return match.group(1, 2)
+    return None, host
+
+_passwdprog = None
+def splitpasswd(user):
+    """splitpasswd('user:passwd') -> 'user', 'passwd'."""
+    global _passwdprog
+    if _passwdprog is None:
+        import re
+        _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+
+    match = _passwdprog.match(user)
+    if match: return match.group(1, 2)
+    return user, None
+
+# splittag('/path#tag') --> '/path', 'tag'
+_portprog = None
+def splitport(host):
+    """splitport('host:port') --> 'host', 'port'."""
+    global _portprog
+    if _portprog is None:
+        import re
+        _portprog = re.compile('^(.*):([0-9]+)$')
+
+    match = _portprog.match(host)
+    if match: return match.group(1, 2)
+    return host, None
+
+_nportprog = None
+def splitnport(host, defport=-1):
+    """Split host and port, returning numeric port.
+    Return given default port if no ':' found; defaults to -1.
+    Return numerical port if a valid number are found after ':'.
+    Return None if ':' but not a valid number."""
+    global _nportprog
+    if _nportprog is None:
+        import re
+        _nportprog = re.compile('^(.*):(.*)$')
+
+    match = _nportprog.match(host)
+    if match:
+        host, port = match.group(1, 2)
+        try:
+            if not port: raise ValueError("no digits")
+            nport = int(port)
+        except ValueError:
+            nport = None
+        return host, nport
+    return host, defport
+
+_queryprog = None
+def splitquery(url):
+    """splitquery('/path?query') --> '/path', 'query'."""
+    global _queryprog
+    if _queryprog is None:
+        import re
+        _queryprog = re.compile('^(.*)\?([^?]*)$')
+
+    match = _queryprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+_tagprog = None
+def splittag(url):
+    """splittag('/path#tag') --> '/path', 'tag'."""
+    global _tagprog
+    if _tagprog is None:
+        import re
+        _tagprog = re.compile('^(.*)#([^#]*)$')
+
+    match = _tagprog.match(url)
+    if match: return match.group(1, 2)
+    return url, None
+
+def splitattr(url):
+    """splitattr('/path;attr1=value1;attr2=value2;...') ->
+        '/path', ['attr1=value1', 'attr2=value2', ...]."""
+    words = url.split(';')
+    return words[0], words[1:]
+
+_valueprog = None
+def splitvalue(attr):
+    """splitvalue('attr=value') --> 'attr', 'value'."""
+    global _valueprog
+    if _valueprog is None:
+        import re
+        _valueprog = re.compile('^([^=]*)=(.*)$')
+
+    match = _valueprog.match(attr)
+    if match: return match.group(1, 2)
+    return attr, None
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/urllib/request.py b/src/main/resources/assets/openpython/opos/v1.1/lib/urllib/request.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/assets/openpython/opos/v1.1/lib/value.py b/src/main/resources/assets/openpython/opos/v1.1/lib/value.py
new file mode 100644
index 00000000..47767eb5
--- /dev/null
+++ b/src/main/resources/assets/openpython/opos/v1.1/lib/value.py
@@ -0,0 +1,72 @@
+import uvalue
+from uvalue import *
+
+__all__ = ["Value", "setup"]
+
+
+class ValueMethod:
+    def __init__(self, value, name):
+        self.value = value
+        self.name = name
+
+    def __call__(self, *args):
+        return invoke(self.value._uvalue, self.name, *args)
+
+    @property
+    def __doc__(self):
+        return doc(self.value._uvalue, self.name)
+
+    def __repr__(self):
+        doc = self.__doc__
+        if doc:
+            doc = "\n" + doc.replace(" -- ", "\n")
+        else:
+            doc = ""
+
+        return "ValueMethod<{0!r}, {1!r}>{2}".format(self.value._uvalue, self.name, doc)
+
+
+class ValueCloseMethod(ValueMethod):
+    def __call__(self, *args):
+        result = ValueMethod.__call__(self, *args)
+        self.value.dispose()
+        return result
+
+
+class Value:
+    def __init__(self, uvalue):
+        self._uvalue = uvalue
+
+    def __dir__(self):
+        return dir(object()) + ["__dir__", "__getattr__", "__doc__", "__str__", "__repr__", "__bool__", "__del__",
+                                "dispose", "_uvalue"] + list(methods(self._uvalue))
+
+    def __getattr__(self, name):
+        return ValueMethod(self, name)
+
+    def __str__(self):
+        return doc(self._uvalue) if self else repr(self)
+
+    def __repr__(self):
+        return "Value<{0}>".format(doc(self._uvalue) if self else None)
+
+    def __bool__(self):
+        return self._uvalue.value is not None
+
+    def __del__(self):
+        self._uvalue.dispose()
+
+    @property
+    def close(self):
+        return ValueCloseMethod(self, "close")
+
+    def dispose(self):
+        self._uvalue.dispose()
+
+
+def value_hook(raw_object):
+    return Value(raw_object)
+
+
+def setup():
+    uvalue.hook_value(value_hook)
diff --git a/src/main/resources/assets/openpython/opos/v1.1/usr/bin/env.py b/src/main/resources/assets/openpython/opos/v1.1/usr/bin/env.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/main/resources/mcmod.info b/src/main/resources/mcmod.info
index d0c4c6d0..0b2ef638 100644
--- a/src/main/resources/mcmod.info
+++ b/src/main/resources/mcmod.info
@@ -1,6 +1,6 @@
 [
     {
-        "modid": "OpenPython",
+        "modid": "openpython",
         "name": "OpenPython",
         "description": "Micropython for OpenComputer",
         "version": "${version}",
@@ -12,4 +12,4 @@
         "logoFile": "",
         "screenshots": []
     }
-]
+]
\ No newline at end of file
diff --git a/src/main/resources/pack.mcmeta b/src/main/resources/pack.mcmeta
deleted file mode 100644
index e40f4cf6..00000000
--- a/src/main/resources/pack.mcmeta
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-    "pack": {
-        "description": "OpenPython pack",
-        "pack_format": 3
-    }
-}