diff --git a/docs/conf.py b/docs/conf.py index 8327d7e288eee..3e694c04eff49 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -95,7 +95,7 @@ # General information about the project. project = 'MicroPython' -copyright = '2014-2017, Damien P. George, Paul Sokolovsky, OpenMV LLC, and contributors' +copyright = '2014-2018, Damien P. George, Paul Sokolovsky, OpenMV LLC, and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/docs/genrst/builtin_types.rst b/docs/genrst/builtin_types.rst index 47bc416e239be..ca05fa1e21fe9 100644 --- a/docs/genrst/builtin_types.rst +++ b/docs/genrst/builtin_types.rst @@ -2,7 +2,7 @@ Builtin Types ============= -Generated Tue 21 Nov 2017 21:31:50 UTC +Generated Sat 28 Apr 2018 19:34:04 UTC Exception --------- @@ -70,7 +70,7 @@ Exception in while loop condition may have unexpected line number Sample code:: l = ["-foo", "-bar"] - + i = 0 while l[i][0] == "-": print("iter") @@ -90,27 +90,35 @@ Sample code:: .. _cpydiff_types_exception_subclassinit: -Exception.__init__ raises TypeError if overridden and called by subclass -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Exception.__init__ method does not exist. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** Subclassing native classes is not fully supported in MicroPython. + +**Workaround:** Call using ``super()`` instead:: + +class A(Exception): + def __init__(self): + super().__init__() Sample code:: class A(Exception): def __init__(self): Exception.__init__(self) - + a = A() -+-------------+-----------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+-----------------------------------------------------------+ -| | :: | -| | | -| | Traceback (most recent call last): | -| | File "", line 11, in | -| | File "", line 9, in __init__ | -| | TypeError: argument should be a 'Exception' not a 'A' | -+-------------+-----------------------------------------------------------+ ++-------------+-------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------------------------------------+ +| | :: | +| | | +| | Traceback (most recent call last): | +| | File "", line 15, in | +| | File "", line 13, in __init__ | +| | AttributeError: type object 'Exception' has no attribute '__init__' | ++-------------+-------------------------------------------------------------------------+ bytearray --------- @@ -221,7 +229,7 @@ Sample code:: class A(int): __add__ = lambda self, other: A(int(self) + other) - + a = A(42) print(a+a) @@ -346,14 +354,15 @@ Sample code:: except UnicodeDecodeError: print('UnicodeDecodeError') -+------------------------+-------------------------+ -| CPy output: | uPy output: | -+------------------------+-------------------------+ -| :: | :: | -| | | -| UnicodeDecodeError | '\u0840' | -| | Should not get here | -+------------------------+-------------------------+ ++------------------------+---------------------------------------------------------+ +| CPy output: | uPy output: | ++------------------------+---------------------------------------------------------+ +| :: | :: | +| | | +| UnicodeDecodeError | Traceback (most recent call last): | +| | File "", line 9, in | +| | NameError: name 'UnicodeDecodeError' is not defined | ++------------------------+---------------------------------------------------------+ .. _cpydiff_types_str_endswith: @@ -465,7 +474,7 @@ Sample code:: class S(str): pass - + s = S('hello') print(s == 'hello') diff --git a/docs/genrst/core_language.rst b/docs/genrst/core_language.rst index 7903bfe38c2fc..6695dfd8da375 100644 --- a/docs/genrst/core_language.rst +++ b/docs/genrst/core_language.rst @@ -2,7 +2,7 @@ Core Language ============= -Generated Tue 21 Nov 2017 21:31:50 UTC +Generated Sat 28 Apr 2018 19:34:04 UTC Classes ------- @@ -250,6 +250,59 @@ Sample code:: | Exit | | +-------------+-------------+ +Runtime +------- + +.. _cpydiff_core_locals: + +Local variables aren't included in locals() result +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** MicroPython doesn't maintain symbolic local environment, it is optimized to an array of slots. Thus, local variables can't be accessed by a name. + +Sample code:: + + def test(): + val = 2 + print(locals()) + + test() + ++----------------+------------------------------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++----------------+------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| {'val': 2} | {'test': , '__name__': '__main__', '__file__': ''} | ++----------------+------------------------------------------------------------------------------------------------+ + +.. _cpydiff_core_locals_eval: + +Code running in eval() function doesn't have access to local variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** MicroPython doesn't maintain symbolic local environment, it is optimized to an array of slots. Thus, local variables can't be accessed by a name. Effectively, ``eval(expr)`` in MicroPython is equivalent to ``eval(expr, globals(), globals())``. + +Sample code:: + + val = 1 + + def test(): + val = 2 + print(val) + eval("print(val)") + + test() + ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | :: | +| | | +| 2 | 2 | +| 2 | 1 | ++-------------+-------------+ + import ------ @@ -268,13 +321,13 @@ Sample code:: print(modules.__path__) -+-----------------------------------------------------------------------------+-------------------------------+ -| CPy output: | uPy output: | -+-----------------------------------------------------------------------------+-------------------------------+ -| :: | :: | -| | | -| ['/home/kwagyeman/GitHub/openmv-doc/micropython/tests/cpydiff/modules'] | ../tests/cpydiff//modules | -+-----------------------------------------------------------------------------+-------------------------------+ ++---------------------------------------------------------------------------------------+-------------------------------+ +| CPy output: | uPy output: | ++---------------------------------------------------------------------------------------+-------------------------------+ +| :: | :: | +| | | +| ['/home/kwagyeman/Documents/GitHub/openmv/src/micropython/tests/cpydiff/modules'] | ../tests/cpydiff//modules | ++---------------------------------------------------------------------------------------+-------------------------------+ .. _cpydiff_core_import_prereg: diff --git a/docs/genrst/modules.rst b/docs/genrst/modules.rst index feb2f7432c1b3..c2f18b26ab142 100644 --- a/docs/genrst/modules.rst +++ b/docs/genrst/modules.rst @@ -2,7 +2,7 @@ Modules ======= -Generated Tue 21 Nov 2017 21:31:50 UTC +Generated Sat 28 Apr 2018 19:34:04 UTC array ----- @@ -70,6 +70,37 @@ Sample code:: | | NotImplementedError: only slices with step=1 (aka None) are supported | +----------------+---------------------------------------------------------------------------+ +builtins +-------- + +.. _cpydiff_builtin_next_arg2: + +Second argument to next() is not implemented +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** MicroPython is optimised for code space. + +**Workaround:** Instead of ``val = next(it, deflt)`` use:: + + try: + val = next(it) + except StopIteration: + val = deflt + +Sample code:: + + print(next(iter(range(0)), 42)) + ++-------------+-----------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-----------------------------------------------------------------------+ +| :: | :: | +| | | +| 42 | Traceback (most recent call last): | +| | File "", line 12, in | +| | TypeError: function takes 1 positional arguments but 2 were given | ++-------------+-----------------------------------------------------------------------+ + deque ----- @@ -86,15 +117,15 @@ Sample code:: D = collections.deque() print(D) -+---------------+--------------------------------------------------------------+ -| CPy output: | uPy output: | -+---------------+--------------------------------------------------------------+ -| :: | :: | -| | | -| deque([]) | Traceback (most recent call last): | -| | File "", line 8, in | -| | AttributeError: 'module' object has no attribute 'deque' | -+---------------+--------------------------------------------------------------+ ++---------------+-----------------------------------------------------------------+ +| CPy output: | uPy output: | ++---------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| deque([]) | Traceback (most recent call last): | +| | File "", line 8, in | +| | TypeError: function missing 2 required positional arguments | ++---------------+-----------------------------------------------------------------+ json ---- diff --git a/docs/genrst/syntax.rst b/docs/genrst/syntax.rst index c5d2d0b576916..cfb4440780e12 100644 --- a/docs/genrst/syntax.rst +++ b/docs/genrst/syntax.rst @@ -2,7 +2,7 @@ Syntax ====== -Generated Tue 21 Nov 2017 21:31:50 UTC +Generated Sat 28 Apr 2018 19:34:04 UTC Spaces ------ diff --git a/docs/library/index.rst b/docs/library/index.rst index 086dd224cb2c4..9e4cad6d30195 100644 --- a/docs/library/index.rst +++ b/docs/library/index.rst @@ -174,7 +174,6 @@ it will fallback to loading the built-in ``ujson`` module. math.rst sys.rst ubinascii.rst - ucollections.rst uerrno.rst uhashlib.rst uheapq.rst @@ -187,6 +186,7 @@ it will fallback to loading the built-in ``ujson`` module. ustruct.rst utime.rst uzlib.rst + _thread.rst MicroPython-specific libraries @@ -274,4 +274,4 @@ the following libraries. omv.mjpeg.rst omv.lcd.rst omv.fir.rst - omv.cpufreq.rst + omv.omv.rst diff --git a/docs/library/network.rst b/docs/library/network.rst index 7831a2310e09b..44c1f7da66d93 100644 --- a/docs/library/network.rst +++ b/docs/library/network.rst @@ -143,161 +143,159 @@ parameter should be `id`. class CC3K ========== - + This class provides a driver for CC3000 WiFi modules. Example usage:: - + import network nic = network.CC3K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4, pyb.Pin.board.Y3) nic.connect('your-ssid', 'your-password') while not nic.isconnected(): pyb.delay(50) print(nic.ifconfig()) - + # now use socket as usual ... - + For this example to work the CC3000 module must have the following connections: - + - MOSI connected to Y8 - MISO connected to Y7 - CLK connected to Y6 - CS connected to Y5 - VBEN connected to Y4 - IRQ connected to Y3 - + It is possible to use other SPI busses and other pins for CS, VBEN and IRQ. - + Constructors ------------ - + .. class:: CC3K(spi, pin_cs, pin_en, pin_irq) - + Create a CC3K driver object, initialise the CC3000 module using the given SPI bus and pins, and return the CC3K object. - + Arguments are: - + - *spi* is an :ref:`SPI object ` which is the SPI bus that the CC3000 is connected to (the MOSI, MISO and CLK pins). - *pin_cs* is a :ref:`Pin object ` which is connected to the CC3000 CS pin. - *pin_en* is a :ref:`Pin object ` which is connected to the CC3000 VBEN pin. - *pin_irq* is a :ref:`Pin object ` which is connected to the CC3000 IRQ pin. - + All of these objects will be initialised by the driver, so there is no need to initialise them yourself. For example, you can use:: - + nic = network.CC3K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4, pyb.Pin.board.Y3) - + Methods ------- - + .. method:: cc3k.connect(ssid, key=None, \*, security=WPA2, bssid=None) - + Connect to a WiFi access point using the given SSID, and other security parameters. - + .. method:: cc3k.disconnect() - + Disconnect from the WiFi access point. - + .. method:: cc3k.isconnected() - + Returns True if connected to a WiFi access point and has a valid IP address, False otherwise. - + .. method:: cc3k.ifconfig() - + Returns a 7-tuple with (ip, subnet mask, gateway, DNS server, DHCP server, MAC address, SSID). - + .. method:: cc3k.patch_version() - + Return the version of the patch program (firmware) on the CC3000. - + .. method:: cc3k.patch_program('pgm') - + Upload the current firmware to the CC3000. You must pass 'pgm' as the first argument in order for the upload to proceed. - + Constants --------- - + .. data:: CC3K.WEP .. data:: CC3K.WPA .. data:: CC3K.WPA2 - + security type to use - + class WIZNET5K ============== - + This class allows you to control WIZnet5x00 Ethernet adaptors based on the W5200 and W5500 chipsets. The particular chipset that is supported by the firmware is selected at compile-time via the MICROPY_PY_WIZNET5K option. - + Example usage:: - + import network nic = network.WIZNET5K(pyb.SPI(1), pyb.Pin.board.X5, pyb.Pin.board.X4) print(nic.ifconfig()) - + # now use socket as usual ... - + For this example to work the WIZnet5x00 module must have the following connections: - + - MOSI connected to X8 - MISO connected to X7 - SCLK connected to X6 - nSS connected to X5 - nRESET connected to X4 - + It is possible to use other SPI busses and other pins for nSS and nRESET. - + Constructors ------------ - + .. class:: WIZNET5K(spi, pin_cs, pin_rst) - + Create a WIZNET5K driver object, initialise the WIZnet5x00 module using the given SPI bus and pins, and return the WIZNET5K object. - + Arguments are: - + - *spi* is an :ref:`SPI object ` which is the SPI bus that the WIZnet5x00 is connected to (the MOSI, MISO and SCLK pins). - *pin_cs* is a :ref:`Pin object ` which is connected to the WIZnet5x00 nSS pin. - *pin_rst* is a :ref:`Pin object ` which is connected to the WIZnet5x00 nRESET pin. - + All of these objects will be initialised by the driver, so there is no need to initialise them yourself. For example, you can use:: - + nic = network.WIZNET5K(pyb.SPI(1), pyb.Pin.board.X5, pyb.Pin.board.X4) - + Methods ------- - + .. method:: wiznet5k.isconnected() Returns ``True`` if the physical Ethernet link is connected and up. Returns ``False`` otherwise. .. method:: wiznet5k.ifconfig([(ip, subnet, gateway, dns)]) - + Get/set IP address, subnet mask, gateway and DNS. - + When called with no arguments, this method returns a 4-tuple with the above information. - + To set the above values, pass a 4-tuple with the required information. For example:: - + nic.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8')) - + .. method:: wiznet5k.regs() - - Dump the WIZnet5x00 registers. Useful for debugging. -.. _network.WLAN: + Dump the WIZnet5x00 registers. Useful for debugging. .. only:: port_esp8266 @@ -446,8 +444,6 @@ parameter should be `id`. dhcp_hostname The DHCP hostname to use ============= =========== - - .. only:: port_wipy class WLAN @@ -469,7 +465,7 @@ parameter should be `id`. Constructors ------------ - + .. class:: WLAN(id=0, ...) Create a WLAN object, and optionally configure it. See `init()` for params of configuration. @@ -486,11 +482,11 @@ parameter should be `id`. ------- .. method:: wlan.init(mode, \*, ssid, auth, channel, antenna) - + Set or get the WiFi network processor configuration. - + Arguments are: - + - *mode* can be either ``WLAN.STA`` or ``WLAN.AP``. - *ssid* is a string with the ssid name. Only needed when mode is ``WLAN.AP``. - *auth* is a tuple with (sec, key). Security can be ``None``, ``WLAN.WEP``, @@ -500,7 +496,7 @@ parameter should be `id`. - *channel* a number in the range 1-11. Only needed when mode is ``WLAN.AP``. - *antenna* selects between the internal and the external antenna. Can be either ``WLAN.INT_ANT`` or ``WLAN.EXT_ANT``. - + For example, you can do:: # create and configure as an access point @@ -602,12 +598,10 @@ parameter should be `id`. selects the antenna type - - .. only:: port_openmvcam - class WINC -- wifi shield driver - ================================ + class WINC -- wifi shield driver + ================================ The ``WINC`` class is used for controlling the wifi shield. @@ -620,10 +614,10 @@ parameter should be `id`. wlan.ifconfig() - Constructors - ------------ + Constructors + ------------ - .. class:: WINC(mode=MODE_STATION) + .. class:: WINC([mode=MODE_STATION]) Creates a winc driver object and connects to the wifi shield which uses I/O pins P0, P1, P2, P3, P6, P7, and P8. @@ -636,50 +630,49 @@ parameter should be `id`. * network.WINC.MODE_AP - The module will create an AP (Access Point) and - accept connections from a client. + The module will create an AP (Access Point) and accept connections from a client. - Note1: The start_ap() function must be called after setting AP mode to configure the AP. + .. note:: - Note2: The WINC1500 has some limitations in its AP implementation: + The start_ap() method must be called after setting AP mode to configure the AP. - * Only one client can connect at a time. - * Only OPEN or WEP security are supported. - * There's a bug in the FW, when the client disconnects any bound sockets are lost (they just stop working). As a workaround, set a timeout for the server socket to force it to raise an exception and then reopen it (See the example script). + Also, the WINC1500 has some limitations in its AP implementation: - * network.WINC.MODE_P2P - - Enable Peer-to-Peer mode, also known as WiFiDirect. This mode is similar to AP, it allows two devices to connect and exchange data directly. - Note: This mode is Not implemented Yet. + * Only one client can connect at a time. + * Only OPEN or WEP security are supported. + * There's a bug in the WiFi Module FW, when the client disconnects any bound sockets are lost (they just stop working). As a workaround, set a timeout for the server socket to force it to raise an exception and then reopen it (See the example script). * network.WINC.MODE_FIRMWARE: This mode enables WiFi module firmware update. - Note: Do NOT use unless you know what you're doing, modules are shipped with the latest FW update there's No need to update the FW. - Methods - ------- + .. note:: - .. method:: winc.connect(ssid, key=None, security=WPA_PSK) + Do NOT use unless you know what you're doing, modules are shipped with the latest FW update there is no need to update the FW. + + Methods + ------- + + .. method:: winc.connect(ssid, [key=None, [security=WPA_PSK]]) Connect to a wifi network with ssid ``ssid`` using key ``key`` with security ``security``. - After connecting to the network use the ``usocket`` module to open TCP/UDP + After connecting to the network use the `usocket` module to open TCP/UDP ports to send and receive data. .. note:: - This function takes a little while to return. + This method takes a little while to return. - .. method:: winc.start_ap(SSID, key=None, security=OPEN, channel=1) + .. method:: winc.start_ap(ssid, [key=None, [security=OPEN, [channel=1]]]) - When running in AP mode this function must be called after creating + When running in AP mode this method must be called after creating a WINC object to configure and start the AP . - * SSID: The AP SSID (must be set) - * Key: The AP encryption key. A Key is required only if security is WEP. - * security: AP security mode. (Only network.WINC.OPEN or network.WINC.WEP are supported). + * ssid: The AP SSID (must be set). + * key: The AP encryption key. A Key is required only if security is WEP. + * security: AP security mode (only OPEN or WEP are supported). * channel: WiFi channel, change this if you have another AP running at the same channel. .. method:: winc.disconnect() @@ -691,9 +684,19 @@ parameter should be `id`. Returns True if connected to an access point and an IP address has been obtained. + .. method:: winc.connected_sta() + + This method returns a list containing the connected client's IP adress. + + .. method:: winc.wait_for_sta(timeout) + + This method blocks and waits for a client to connect. If timeout is 0 + this will block forever. This method returns a list containing the + connected client's IP adress. + .. method:: winc.ifconfig() - Returns a tuple containing: + Returns a list containing: * [0]: RSSI - received signal strength indicator (int) * [1]: Authorization Type (see constants) @@ -705,7 +708,7 @@ parameter should be `id`. .. method:: winc.scan() - Returns a list of tuples containing: + Returns a list containing: * [0]: Channel Number (int) * [1]: RSSI - received signal strength indicator (int) @@ -732,54 +735,41 @@ parameter should be `id`. * [5]: Driver Patch Version Number (int) * [6]: Hardware Revision Number - Chip ID (int) - .. method:: winc.fw_dump() - - Dumps the wifi shield firmware to a binary at "/firmware/fw_dump.bin" + .. method:: winc.fw_dump(path) - .. method:: winc.fw_update() + Dumps the wifi shield firmware to a binary file at ``path``. You must + have put the module into firmware mode to use this. - Programs the wifi shield with binary image found at - "/firmware/m2m_aio_3a0.bin". + .. method:: winc.fw_update(path) - .. method:: winc.connected_sta() - - This method returns the connected client IP. - - .. method:: winc.wait_for_sta(timeout) + Programs the wifi shield with binary image found at ``path``. You must + have put the module into firmware mode to use this. - This function blocks and waits for a client to connect. If timeout is 0 this will block forever. - - Constants - --------- + Constants + --------- .. data:: winc.OPEN For connecting to an open wifi network. - .. note:: - - Insecure. - .. data:: winc.WEP For connecting to a WEP based password protected network. - .. note:: - - Insecure. - .. data:: winc.WPA_PSK For connecting to a WPA/PSK based password protected network. - .. note:: + .. data:: winc.MODE_STA - For networks that need a password for all users. + Start in station mode (i.e. connect to a network). - .. data:: winc.802_1X + .. data:: winc.MODE_AP - For connecting to a 802.1X based password protected network. + Start in access point mode (i.e. become the network). - .. note:: + .. data:: winc.MODE_FIRMWARE - For networks that need a separate password per user. + Setup in firmware update mode. + +.. _network.WLAN: diff --git a/docs/library/omv.cpufreq.rst b/docs/library/omv.cpufreq.rst deleted file mode 100644 index bd232e6a325c0..0000000000000 --- a/docs/library/omv.cpufreq.rst +++ /dev/null @@ -1,63 +0,0 @@ -:mod:`cpufreq` --- easy cpu frequency control -============================================= - -.. module:: cpufreq - :synopsis: easy cpu frequency control - -The ``cpufreq`` module is used for easily controlling the cpu frequency. -In particular, the ``cpufreq`` module allows you to easily underclock or -overclock your OpenMV Cam. - -Example usage:: - - import cpufreq - - cpufreq.set_frequency(cpufreq.CPUFREQ_216MHZ) - -The OpenMV Cam M4's default frequency is 180MHz. -The OpenMV Cam M7's default frequency is 216MHz. - -Functions ---------- - -.. function:: cpufreq.get_frequency() - - Returns a tuple containing: - - * [0] - sysclk: frequency of the CPU (int). - * [1] - hclk: frequency of the AHB bus, core memory and DMA (int). - * [2] - pclk1: frequency of the APB1 bus (int). - * [3] - pclk2: frequency of the APB2 bus (int). - -.. function:: cpufreq.set_frequency(freq) - - Changes the cpu frequency. ``freq`` may be one of: - - * cpufreq.CPUFREQ_120MHZ - * cpufreq.CPUFREQ_144MHZ - * cpufreq.CPUFREQ_168MHZ - * cpufreq.CPUFREQ_192MHZ - * cpufreq.CPUFREQ_216MHZ - -Constants ---------- - -.. data:: cpufreq.CPUFREQ_120MHZ - - Used to set the frequency to 120 MHz. - -.. data:: cpufreq.CPUFREQ_144MHZ - - Used to set the frequency to 144 MHz. - -.. data:: cpufreq.CPUFREQ_168MHZ - - Used to set the frequency to 168 MHz. - -.. data:: cpufreq.CPUFREQ_192MHZ - - Used to set the frequency to 192 MHz. - -.. data:: cpufreq.CPUFREQ_216MHZ - - Used to set the frequency to 216 MHz. diff --git a/docs/library/omv.fir.rst b/docs/library/omv.fir.rst index 54b837593f191..79bb256842fd7 100644 --- a/docs/library/omv.fir.rst +++ b/docs/library/omv.fir.rst @@ -29,7 +29,7 @@ Example usage:: Functions --------- -.. function:: fir.init(type=1, refresh=64, resolution=18) +.. function:: fir.init([type=1, [refresh=64, [resolution=18]]]) Initializes an attached thermopile shield using I/O pins P4 and P5. @@ -51,12 +51,6 @@ Functions * 17-bits -> Max of ~600C. * 18-bits -> Max of ~450C. - .. note:: - - ``type``, ``refresh``, and ``resolution`` are keyword arguments which must - be explicitly invoked in the function call by writing ``type=``, - ``refresh=``, and ``resolution=``. - .. function:: fir.deinit() Deinitializes the thermopile shield freeing up I/O pins. @@ -108,9 +102,9 @@ Functions ``ir`` is a (width * height) list of floats. -.. function:: fir.draw_ta(image, ta, alpha=128, scale=[-17.7778, 37.7778]) +.. function:: fir.draw_ta(image, ta, [alpha=128, [scale]]) - Draws the ambient temperature (``ta``) on the ``image`` using a rainbow + Draws the ambient temperature ``ta`` on the `image` using a rainbow table color conversion. ``alpha`` controls the transparency. 256 for an opaque overlay. 0 for none. @@ -119,20 +113,15 @@ Functions the minimum temperature cutoff and the second number is the max. Values closer to the min are blue and values closer to the max are red. - The default ``scale`` of [-17.7778C, 37.7778C] corresponds to [0F, 100F]. + The default ``scale`` is (-17.7778C, 37.7778C) corresponds to (0F, 100F). .. note:: For best results look at really cold or hot objects. - .. note:: - - ``alpha`` and ``scale`` are keyword arguments which must be explicitly - invoked in the function call by writing ``alpha=`` and ``scale=``. +.. function:: fir.draw_ir(image, ir, [alpha=128, [scale]]) -.. function:: fir.draw_ta(image, ir, alpha=128, scale=[auto, auto]) - - Draws the temperature list (``ir``) on the ``image`` using a rainbow + Draws the temperature list ``ir`` on the `image` using a rainbow table color conversion. ``alpha`` controls the transparency. 256 for an opaque overlay. 0 for none. @@ -142,13 +131,8 @@ Functions closer to the min are blue and values closer to the max are red. The minimum and maximum values in the temperature list are used to scale - the output ``image`` automatically unless explicitly overridden. + the output `image` automatically unless explicitly overridden using scale. .. note:: For best results look at really cold or hot objects. - - .. note:: - - ``alpha`` and ``scale`` are keyword arguments which must be explicitly - invoked in the function call by writing ``alpha=`` and ``scale=``. diff --git a/docs/library/omv.gif.rst b/docs/library/omv.gif.rst index 701fb2a5a9f9c..4c8da8fe5a083 100644 --- a/docs/library/omv.gif.rst +++ b/docs/library/omv.gif.rst @@ -11,7 +11,7 @@ class Gif -- Gif recorder You can use the gif module to record small video clips. Note that gif files save uncompressed image data. So, they are best for recording short video clips that -you want to share. Use ``mjpeg`` for long clips. +you want to share. Use `mjpeg` for long clips. Example usage:: @@ -36,7 +36,7 @@ Example usage:: Constructors ------------ -.. class:: gif.Gif(filename, width=Auto, height=Auto, color=Auto, loop=True) +.. class:: gif.Gif(filename, [width, [height, [color, [loop=True]]]]) Create a Gif object which you can add frames to. ``filename`` is the path to save the gif recording to. @@ -50,17 +50,10 @@ Constructors ``color`` is automatically set equal to the image sensor color mode unless explicitly overridden: - - False for color results in a grayscale 7-bit per pixel gif. - - True for color results in a rgb232 7-bit per pixel gif. + - False for color results in a `sensor.GRAYSCALE` 7-bit per pixel gif. + - True for color results in a `sensor.RGB565` 7-bit per pixel gif. ``loop`` when True results in the gif automatically looping on playback. - Defaults to True. - - .. note:: - - ``width``, ``height``, ``color``, and ``loop`` are keyword arguments - which must be explicitly invoked in the function call by writing - ``width=``, ``height=``, ``color=``, and ``resolution=``. Methods ------- @@ -75,7 +68,7 @@ Methods .. method:: gif.format() - Returns ``sensor.RGB565`` if color or ``sensor.GRAYSCALE`` if not. + Returns `sensor.RGB565` if color is True or `sensor.GRAYSCALE` if not. .. method:: gif.size() @@ -85,7 +78,7 @@ Methods Returns if the gif object had loop set in its constructor. -.. method:: gif.add_frame(image, delay=10) +.. method:: gif.add_frame(image, [delay=10]) Add an image to the gif recording. The image width, height, and color mode, must be equal to the same width, height, and color modes used in the constructor @@ -94,11 +87,6 @@ Methods ``delay`` is the number of centi-seconds to wait before displaying this frame after the previous frame (if not the first frame). - .. note:: - - ``delay`` is keyword arguments which must be explicitly invoked in the - function call by writing ``delay=``. - .. method:: gif.close() Finalizes the gif recording. This method must be called once the recording diff --git a/docs/library/omv.image.rst b/docs/library/omv.image.rst index 5d3c528a0aa48..d30e4802f6f2a 100644 --- a/docs/library/omv.image.rst +++ b/docs/library/omv.image.rst @@ -57,7 +57,7 @@ Functions ``path`` is the path to the descriptor file to save. -.. function:: image.match_descriptor(descritor0, descriptor1, threshold=70, filter_outliers=False) +.. function:: image.match_descriptor(descritor0, descriptor1, [threshold=70, [filter_outliers=False]]) For LBP descriptors this function returns an integer representing the difference between the two descriptors. You may then threshold/compare this @@ -73,29 +73,23 @@ Functions ``filter_outliers`` is used for ORB keypoints to filter out outlier keypoints allow you to raise the ``threshold``. Defaults to False. - .. note:: - - ``threshold`` and ``filter_outliers`` are keyword arguments which must be - explicitly invoked in the function call by writing ``threshold=`` and - ``filter_outliers=``. - class HaarCascade -- Feature Descriptor ======================================= -The Haar Cascade feature descriptor is used for the ``image.find_features()`` +The Haar Cascade feature descriptor is used for the `image.find_features()` method. It doesn't have any methods itself for you to call. Constructors ------------ -.. class:: image.HaarCascade(path, stages=Auto) +.. class:: image.HaarCascade(path, [stages=Auto]) Loads a Haar Cascade into memory from a Haar Cascade binary file formatted for your OpenMV Cam. If you pass "frontalface" instead of a path then this constructor will load the built-in frontal face Haar Cascade into memory. Additionally, you can also pass "eye" to load a Haar Cascade for eyes into memory. Finally, this method returns the loaded Haar Cascade object for use - with ``image.find_features()``. + with `image.find_features()`. ``stages`` defaults to the number of stages in the Haar Cascade. However, you can specify a lower number of stages to speed up processing the feature @@ -129,15 +123,17 @@ Constructors not cat like things labeled differently. The generator algorithm will then produce a Haar Cascade that detects cats. - .. note:: - - ``stages`` is a keyword argument which must be explicitly invoked in the - function call by writing ``stages=``. - class Similarity -- Similarity Object ===================================== -The similarity object is returned by ``image.get_similarity``. +The similarity object is returned by `image.get_similarity()`. + +Constructors +------------ + +.. class:: image.similarity() + + Please call `image.get_similarity()` to create this object. Methods ------- @@ -182,7 +178,7 @@ Methods class Histogram -- Histogram Object =================================== -The histogram object is returned by ``image.get_histogram``. +The histogram object is returned by `image.get_histogram()`. Grayscale histograms have one channel with some number of bins. All bins are normalized so that all bins sum to 1. @@ -190,6 +186,13 @@ normalized so that all bins sum to 1. RGB565 histograms have three channels with some number of bins each. All bins are normalized so that all bins in a channel sum to 1. +Constructors +------------ + +.. class:: image.histogram() + + Please call `image.get_histogram()` to create this object. + Methods ------- @@ -219,7 +222,7 @@ Methods .. method:: histogram.get_percentile(percentile) - Computes the CDF of the histogram channels and returns a ``percentile`` + Computes the CDF of the histogram channels and returns a `image.percentile` object with the values of the histogram at the passed in ``percentile`` (0.0 - 1.0) (float). So, if you pass in 0.1 this method will tell you (going from left-to-right in the histogram) what bin when summed into an accumulator @@ -227,18 +230,18 @@ Methods 0.1) and max (with 0.9) of a color distribution without outlier effects ruining your results for adaptive color tracking. -.. method:: histogram.get_threhsold() +.. method:: histogram.get_threshold() Uses Otsu's Method to compute the optimal threshold values that split the histogram into two halves for each channel of the histogram. This method - returns a ``threshold`` object. This method is particularly useful for - determining optimal ``binary()`` thresholds. + returns a `image.threshold` object. This method is particularly useful for + determining optimal `image.binary()` thresholds. .. method:: histogram.get_statistics() Computes the mean, median, mode, standard deviation, min, max, lower quartile, and upper quartile of each color channel in the histogram and - returns a ``statistics`` object. + returns a `statistics` object. You may also use ``histogram.statistics()`` and ``histogram.get_stats()`` as aliases for this method. @@ -246,7 +249,7 @@ Methods class Percentile -- Percentile Object ===================================== -The percentile object is returned by ``histogram.get_percentile``. +The percentile object is returned by `histogram.get_percentile()`. Grayscale percentiles have one channel. Use the non ``l_*``, ``a_*``, and ``b_*`` method. @@ -254,6 +257,13 @@ Grayscale percentiles have one channel. Use the non ``l_*``, ``a_*``, and RGB565 percentiles have three channels. Use the ``l_*``, ``a_*``, and ``b_*`` methods. +Constructors +------------ + +.. class:: image.percentile() + + Please call `histogram.get_percentile()` to create this object. + Methods ------- @@ -281,10 +291,10 @@ Methods You may also get this value doing ``[2]`` on the object. -class Threhsold -- Threshold Object +class Threshold -- Threshold Object =================================== -The threshold object is returned by ``histogram.get_threshold``. +The threshold object is returned by `histogram.get_threshold()`. Grayscale thresholds have one channel. Use the non ``l_*``, ``a_*``, and ``b_*`` method. @@ -292,38 +302,45 @@ Grayscale thresholds have one channel. Use the non ``l_*``, ``a_*``, and RGB565 thresholds have three channels. Use the ``l_*``, ``a_*``, and ``b_*`` methods. +Constructors +------------ + +.. class:: image.threshold() + + Please call `histogram.get_threshold()` to create this object. + Methods ------- -.. method:: threhsold.value() +.. method:: threshold.value() - Return the grayscale threhsold value (between 0 and 255). + Return the grayscale threshold value (between 0 and 255). You may also get this value doing ``[0]`` on the object. -.. method:: threhsold.l_value() +.. method:: threshold.l_value() - Return the RGB565 LAB L channel threhsold value (between 0 and 100). + Return the RGB565 LAB L channel threshold value (between 0 and 100). You may also get this value doing ``[0]`` on the object. -.. method:: threhsold.a_value() +.. method:: threshold.a_value() - Return the RGB565 LAB A channel threhsold value (between -128 and 127). + Return the RGB565 LAB A channel threshold value (between -128 and 127). You may also get this value doing ``[1]`` on the object. -.. method:: threhsold.b_value() +.. method:: threshold.b_value() - Return the RGB565 LAB B channel threhsold value (between -128 and 127). + Return the RGB565 LAB B channel threshold value (between -128 and 127). You may also get this value doing ``[2]`` on the object. class Statistics -- Statistics Object ===================================== -The percentile object is returned by ``histogram.get_statistics`` or -``image.get_statistics``. +The percentile object is returned by `histogram.get_statistics()` or +`image.get_statistics()`. Grayscale statistics have one channel. Use the non ``l_*``, ``a_*``, and ``b_*`` method. @@ -331,6 +348,13 @@ Grayscale statistics have one channel. Use the non ``l_*``, ``a_*``, and RGB565 statistics have three channels. Use the ``l_*``, ``a_*``, and ``b_*`` methods. +Constructors +------------ + +.. class:: image.statistics() + + Please call `histogram.get_statistics()` or `image.get_statistics()` to create this object. + Methods ------- @@ -529,15 +553,22 @@ Methods class Blob -- Blob object ========================= -The blob object is returned by ``image.find_blobs``. +The blob object is returned by `image.find_blobs()`. + +Constructors +------------ + +.. class:: image.blob() + + Please call `image.find_blobs()` to create this object. Methods ------- .. method:: blob.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the blob's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the blob's bounding box. .. method:: blob.x() @@ -593,9 +624,9 @@ Methods .. method:: blob.code() Returns a 16-bit binary number with a bit set in it for each color threshold - that's part of this blob. For example, if you passed ``image.find_blobs`` + that's part of this blob. For example, if you passed `image.find_blobs()` three color thresholds to look for then bits 0/1/2 may be set for this blob. - Note that only one bit will be set for each blob unless ``image.find_blobs`` + Note that only one bit will be set for each blob unless `image.find_blobs()` was called with ``merge=True``. Then its possible for multiple blobs with different color thresholds to be merged together. You can use this method along with multiple thresholds to implement color code tracking. @@ -605,7 +636,7 @@ Methods .. method:: blob.count() Returns the number of blobs merged into this blob. THis is 1 unless you - called ``image.find_blobs`` with ``merge=True``. + called `image.find_blobs()` with ``merge=True``. You may also get this value doing ``[9]`` on the object. @@ -622,15 +653,22 @@ Methods class Line -- Line object ========================= -The line object is returned by ``image.find_lines``, ``image.find_line_segments``, or ``image.get_regression``. +The line object is returned by `image.find_lines()`, `image.find_line_segments()`, or `image.get_regression()`. + +Constructors +------------ + +.. class:: image.line() + + Please call `image.find_lines()`, `image.find_line_segments()`, or `image.get_regression()` to create this object. Methods ------- .. method:: line.line() - Returns a line tuple (x1, y1, x2, y2) for use with other ``image`` methods - like ``image.draw_line``. + Returns a line tuple (x1, y1, x2, y2) for use with other `image` methods + like `image.draw_line()`. .. method:: line.x1() @@ -658,7 +696,7 @@ Methods .. method:: line.length() - Returns the line's length - sqrt(((x2-x1)^2) + ((y2-y1)^2). + Returns the line's length: sqrt(((x2-x1)^2) + ((y2-y1)^2). You may also get this value doing ``[4]`` on the object. @@ -683,7 +721,14 @@ Methods class Circle -- Circle object ============================= -The circle object is returned by ``image.find_circles``. +The circle object is returned by `image.find_circles()`. + +Constructors +------------ + +.. class:: image.circle() + + Please call `image.find_circles()` to create this object. Methods ------- @@ -715,7 +760,14 @@ Methods class Rect -- Rectangle Object ============================== -The rect object is returned by ``image.find_rects``. +The rect object is returned by `image.find_rects()`. + +Constructors +------------ + +.. class:: image.rect() + + Please call `image.find_rects()` to create this object. Methods ------- @@ -727,8 +779,8 @@ Methods .. method:: rect.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the rect's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the rect's bounding box. .. method:: rect.x() @@ -763,7 +815,14 @@ Methods class QRCode -- QRCode object ============================= -The qrcode object is returned by ``image.find_qrcodes``. +The qrcode object is returned by `image.find_qrcodes()`. + +Constructors +------------ + +.. class:: image.qrcode() + + Please call `image.find_qrcodes()` to create this object. Methods ------- @@ -775,8 +834,8 @@ Methods .. method:: qrcode.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the qrcode's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the qrcode's bounding box. .. method:: qrcode.x() @@ -865,7 +924,14 @@ Methods class AprilTag -- AprilTag object ================================= -The apriltag object is returned by ``image.find_apriltags``. +The apriltag object is returned by `image.find_apriltags()`. + +Constructors +------------ + +.. class:: image.apriltag() + + Please call `image.find_apriltags()` to create this object. Methods ------- @@ -877,8 +943,8 @@ Methods .. method:: apriltag.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the apriltag's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the apriltag's bounding box. .. method:: apriltag.x() @@ -1040,14 +1106,21 @@ Methods Returns the rotation in radians of the apriltag in the Z plane. E.g. rotating the camera while looking directly at the tag. - Note that this is just a renamed version of ``apriltag.rotation()``. + Note that this is just a renamed version of `apriltag.rotation()`. You may also get this value doing ``[17]`` on the object. class DataMatrix -- DataMatrix object ===================================== -The datamatrix object is returned by ``image.find_datamatrices``. +The datamatrix object is returned by `image.find_datamatrices()`. + +Constructors +------------ + +.. class:: image.datamatrix() + + Please call `image.find_datamatrices()` to create this object. Methods ------- @@ -1059,8 +1132,8 @@ Methods .. method:: datamatrix.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the datamatrix's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the datamatrix's bounding box. .. method:: datamatrix.x() @@ -1125,7 +1198,14 @@ Methods class BarCode -- BarCode object =============================== -The barcode object is returned by ``image.find_barcodes``. +The barcode object is returned by `image.find_barcodes()`. + +Constructors +------------ + +.. class:: image.barcode() + + Please call `image.find_barcodes()` to create this object. Methods ------- @@ -1137,8 +1217,8 @@ Methods .. method:: barcode.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the barcode's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the barcode's bounding box. .. method:: barcode.x() @@ -1211,7 +1291,14 @@ Methods class Displacement -- Displacement object ========================================= -The displacement object is returned by ``image.find_displacement``. +The displacement object is returned by `image.find_displacement()`. + +Constructors +------------ + +.. class:: image.displacement() + + Please call `image.find_displacement()` to create this object. Methods ------- @@ -1252,15 +1339,22 @@ Methods class kptmatch -- Keypoint Object ================================= -The kptmatch object is returned by ``image.match_descriptor`` for keypoint matches. +The kptmatch object is returned by `image.match_descriptor()` for keypoint matches. + +Constructors +------------ + +.. class:: image.kptmatch() + + Please call `image.match_descriptor()` to create this object. Methods ------- .. method:: kptmatch.rect() - Returns a rectangle tuple (x, y, w, h) for use with other ``image`` methods - like ``image.draw_rectangle`` of the kptmatch's bounding box. + Returns a rectangle tuple (x, y, w, h) for use with other `image` methods + like `image.draw_rectangle()` of the kptmatch's bounding box. .. method:: kptmatch.cx() @@ -1367,7 +1461,7 @@ Methods Returns the size of the file being read. -.. method:: imagereader.next_frame([copy_to_fb=True, loop=True]) +.. method:: imagereader.next_frame([copy_to_fb=True, [loop=True]]) Returns an image object from the file written by ImageWriter. If ``copy_to_fb`` is True then the image object will be directly loaded into @@ -1377,9 +1471,9 @@ Methods image from the stream is read playback will start from the beginning again. Otherwise, this method will return None after all frames have been read. - Note that next_frame() tries to limit playback speed by pausing after - reading frames to match the speed frames were recorded at. Otherwise this - method would zoom through all images at 200+ FPS. + Note that `imagereader.next_frame()` tries to limit playback speed by pausing + after reading frames to match the speed frames were recorded at. Otherwise + this method would zoom through all images at 200+ FPS. .. method:: imagereader.close() @@ -1395,7 +1489,7 @@ The image object is the basic object for machine vision operations. Constructors ------------ -.. class:: image.Image(path, copy_to_fb=False) +.. class:: image.Image(path, [copy_to_fb=False]) Creates a new image object from a file at ``path``. @@ -1429,70 +1523,166 @@ Constructors particular, if you'd like to transmit an image you can just pass it to the UART/SPI/I2C write functions to be transmitted automatically. - .. note:: - - ``copy_to_fb`` is a keyword argument which must be explicitly invoked in - the function call by writing ``copy_to_fb=``. - Methods ------- -.. method:: image.copy(roi=Auto) +.. method:: image.width() - Creates a copy of the image object. + Returns the image width in pixels. - ``roi`` is the region-of-interest rectangle (x, y, w, h) to copy from. - If not specified, it is equal to the image rectangle which copies the entire - image. This argument is not applicable for JPEG images. +.. method:: image.height() - Keep in mind that image copies are stored in the MicroPython heap and not - the frame buffer. As such, you need to keep image copies under 8KB for the - OpenMV Cam M4 and 16KB for the OpenMV Cam M7. If you attempt a copy - operation that uses up all the heap space this function will throw an - exception. Since images are large this is rather easy to trigger. + Returns the image height in pixels. - .. note:: +.. method:: image.format() - ``roi`` is a keyword argument which must be explicitly invoked in - the function call by writing ``roi=``. + Returns `sensor.GRAYSCALE` for grayscale images, `sensor.RGB565` for RGB565 + images, `sensor.BAYER` for bayer pattern images, and `sensor.JPEG` for JPEG + images. -.. method:: image.save(path, roi=Auto, quality=50) +.. method:: image.size() - Saves a copy of the image to the filesystem at ``path``. + Returns the image size in bytes. - Supports bmp/pgm/ppm/jpg/jpeg image files. Note that you cannot save jpeg - compressed images to an uncompressed format. +.. method:: image.get_pixel(x, y, [rgbtuple]) - ``roi`` is the region-of-interest rectangle (x, y, w, h) to copy from. - If not specified, it is equal to the image rectangle which copies the entire - image. This argument is not applicable for JPEG images. + For grayscale images: Returns the grayscale pixel value at location (x, y). + For RGB565 images: Returns the RGB888 pixel tuple (r, g, b) at location (x, y). + For bayer pattern images: Returns the the pixel value at the location (x, y). - ``quality`` is the jpeg compression quality to use to save the image to jpeg - format if the image is not already compressed. + Returns None if ``x`` or ``y`` is outside of the image. + + ``x`` and ``y`` may either be passed independently or as a tuple. + + ``rgbtuple`` if True causes this method to return an RGB888 tuple. Otherwise, + this method returns the integer value of the underlying pixel. I.e. for RGB565 + images this method returns a byte-reversed RGB565 value. Defaults to True + for RGB565 images and False otherwise. + + Not supported on compressed images. .. note:: - ``roi`` and ``quality`` are keyword arguments which must be explicitly - invoked in the function call by writing ``roi=`` or ``quality=``. + `image.get_pixel()` and `image.set_pixel()` are the only methods that allow + you to manipulate bayer pattern images. Bayer pattern images are literal images + where pixels in the image are R/G/R/G/etc. for even rows and G/B/G/B/etc. for + odd rows. Each pixel is 8-bits. -.. method:: image.compress(quality=50) +.. method:: image.set_pixel(x, y, pixel) - JPEG compresses the image in place. Use this method versus ``compressed`` - to save heap space and to use a higher ``quality`` for compression at the - cost of destroying the original image. + For grayscale images: Sets the pixel at location (x, y) to the grayscale value ``pixel``. + For RGB565 images: Sets the pixel at location (x, y) to the RGB888 tuple (r, g, b) ``pixel``. + For bayer pattern images: Sets the pixel value at the location (x, y) to the value ``pixel``. - ``quality`` is the compression quality (0-100) (int). + Returns the image object so you can call another method using ``.`` notation. + + ``x`` and ``y`` may either be passed independently or as a tuple. + + ``pixel`` may either be an RGB888 tuple (r, g, b) or the underlying pixel + value (i.e. a byte-reversed RGB565 value for RGB565 images or an 8-bit value + for grayscale images. + + Not supported on compressed images. .. note:: - ``quality`` is a keyword argument which must be explicitly - invoked in the function call by writing ``quality=``. + `image.get_pixel()` and `image.set_pixel()` are the only methods that allow + you to manipulate bayer pattern images. Bayer pattern images are literal images + where pixels in the image are R/G/R/G/etc. for even rows and G/B/G/B/etc. for + odd rows. Each pixel is 8-bits. + +.. method:: image.mean_pool(x_div, y_div) + + Finds the mean of ``x_div`` * ``y_div`` squares in the image and returns + the modified image composed of the mean of each square. + + This method allows you to shrink an image down very quickly in-place. + + Not supported on compressed images or bayer images. + +.. method:: image.mean_pooled(x_div, y_div) + + Finds the mean of ``x_div`` * ``y_div`` squares in the image and returns + a new image composed of the mean of each square. + + This method allows you to create a shrunken down image copy. + + Not supported on compressed images or bayer images. + +.. method:: image.midpoint_pool(x_div, y_div, [bias=0.5]) + + Finds the midpoint of ``x_div`` * ``y_div`` squares in the image and returns + the modified image composed of the midpoint of each square. + + A ``bias`` of 0.0 returns the min of each area while a ``bias`` of 1.0 returns + the max of each area. + + This method allows you to shrink an image down very quickly in-place. + + Not supported on compressed images or bayer images. + +.. method:: image.midpoint_pooled(x_div, y_div, [bias=0.5]) + + Finds the midpoint of ``x_div`` * ``y_div`` squares in the image and returns + a new image composed of the midpoint of each square. + + A ``bias`` of 0.0 returns the min of each area while a ``bias`` of 1.0 returns + the max of each area. + + This method allows you to create a shrunken down image copy. + + Not supported on compressed images or bayer images. + +.. method:: image.to_grayscale([copy=False]) + + Converts an image to a grayscale image. This method modifies the underlying + image pixels changing the image size in bytes too so it can only be done + in place on a Grayscale or an RGB565 image. Otherwise ``copy`` must be True + to create a new modified image on the heap. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.to_rgb565([copy=False]) + + Converts an image to an RGB565 image. This method modifies the underlying + image pixels changing the image size in bytes too so it can only be done + in place on an RGB565 image. Otherwise ``copy`` must be True to + create a new modified image on the heap. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.to_rainbow([copy=False]) + + Converts an image to a rainbow image. This method modifies the underlying + image pixels changing the image size in bytes too so it can only be done + in place on a RGB565 image. Otherwise ``copy`` must be True to + create a new modified image on the heap. + + A rainbow image is a color image with a unique color value for each 8-bitmask + grayscale lighting value in an image. For example, it provides heat-map color + to a thermal-image. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.compress([quality=50]) - Only call this on Grayscale and RGB565 images. + JPEG compresses the image in place. Use this method versus `image.compressed()` + to save heap space and to use a higher ``quality`` for compression at the + cost of destroying the original image. + + Returns the image object so you can call another method using ``.`` notation. + + ``quality`` is the compression quality (0-100) (int). -.. method:: image.compress_for_ide(quality=50) +.. method:: image.compress_for_ide([quality=50]) - JPEG compresses the image in place. Use this method versus ``compressed`` + JPEG compresses the image in place. Use this method versus `image.compressed()` to save heap space and to use a higher ``quality`` for compression at the cost of destroying the original image. @@ -1504,35 +1694,25 @@ Methods You need to use this method to format image data for display to terminal windows created via "Open Terminal" in OpenMV IDE. - ``quality`` is the compression quality (0-100) (int). - - .. note:: - - ``quality`` is a keyword argument which must be explicitly - invoked in the function call by writing ``quality=``. + Returns the image object so you can call another method using ``.`` notation. - Only call this on Grayscale and RGB565 images. + ``quality`` is the compression quality (0-100) (int). -.. method:: image.compressed(quality=50) +.. method:: image.compressed([quality=50]) Returns a JPEG compressed image - the original image is untouched. However, this method requires a somewhat large allocation of heap space so the image - compression quality must be low and the image resolution must be low. + compression quality must be lower and the image resolution must be lower + than what you could do with `image.compress()`. ``quality`` is the compression quality (0-100) (int). - .. note:: - - ``quality`` is a keyword argument which must be explicitly - invoked in the function call by writing ``quality=``. - - Only call this on Grayscale and RGB565 images. - -.. method:: image.compressed_for_ide(quality=50) +.. method:: image.compressed_for_ide([quality=50]) Returns a JPEG compressed image - the original image is untouched. However, this method requires a somewhat large allocation of heap space so the image - compression quality must be low and the image resolution must be low. + compression quality must be lower and the image resolution must be lower + than what you could do with `image.compress()`. This method JPEG compresses the image and then formats the JPEG data for transmission to OpenMV IDE to display by encoding every 6-bits as a byte @@ -1544,551 +1724,1024 @@ Methods ``quality`` is the compression quality (0-100) (int). - .. note:: +.. method:: image.copy([roi, [copy_to_fb=False]]) - ``quality`` is a keyword argument which must be explicitly - invoked in the function call by writing ``quality=``. + Creates a deep copy of the image object. - Only call this on Grayscale and RGB565 images. + ``roi`` is the region-of-interest rectangle (x, y, w, h) to copy from. + If not specified, it is equal to the image rectangle which copies the entire + image. This argument is not applicable for JPEG images. -.. method:: image.width() + Keep in mind that image copies are stored in the MicroPython heap and not + the frame buffer. As such, you need to keep image copies under 8KB for the + OpenMV Cam M4 and 16KB for the OpenMV Cam M7. If you attempt a copy + operation that uses up all the heap space this function will throw an + exception. Since images are large this is rather easy to trigger. - Returns the image width in pixels. + If ``copy_to_fb`` is True then this method instead replaces the frame + buffer with the image. The frame buffer has a lot more space than the heap + and can hold large images. -.. method:: image.height() +.. method:: image.save(path, [roi, [quality=50]]) - Returns the image height in pixels. + Saves a copy of the image to the filesystem at ``path``. -.. method:: image.format() + Supports bmp/pgm/ppm/jpg/jpeg image files. Note that you cannot save jpeg + compressed images to an uncompressed format. - Returns ``sensor.GRAYSCALE`` for grayscale images, ``sensor.RGB565`` for RGB - images and ``sensor.JPEG`` for JPEG images. + ``roi`` is the region-of-interest rectangle (x, y, w, h) to save from. + If not specified, it is equal to the image rectangle which copies the entire + image. This argument is not applicable for JPEG images. -.. method:: image.size() + ``quality`` is the jpeg compression quality to use to save the image to jpeg + format if the image is not already compressed (0-100) (int). - Returns the image size in bytes. + Returns the image object so you can call another method using ``.`` notation. .. method:: image.clear() - Zeros all bytes in GRAYSCALE or RGB565 images. Do not call this method on - JPEG images. + Sets all pixels in the image to zero (very fast). -.. method:: image.get_pixel(x, y) - - For grayscale images: Returns the grayscale pixel value at location (x, y). - For RGB images: Returns the rgb888 pixel tuple (r, g, b) at location (x, y). - For Bayer images: Returns the the pixel value at the location (x, y). + Returns the image object so you can call another method using ``.`` notation. Not supported on compressed images. -.. method:: image.set_pixel(x, y, pixel) +.. method:: image.draw_line(x0, y0, x1, y1, [color, [thickness=1]]) - For grayscale images: Sets the pixel at location (x, y) to the grayscale - value ``pixel``. - For RGB images: Sets the pixel at location (x, y) to the rgb888 tuple - (r, g, b) ``pixel``. + Draws a line from (x0, y0) to (x1, y1) on the image. You may either + pass x0, y0, x1, y1 separately or as a tuple (x0, y0, x1, y1). - Not supported on compressed images. + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. -.. method:: image.draw_line(line_tuple, color=White) + ``thickness`` controls how thick the line is in pixels. - Draws a line using the ``line_tuple`` (x0, y0, x1, y1) from (x0, y0) to - (x1, y1) on the image. + Returns the image object so you can call another method using ``.`` notation. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + Not supported on compressed images or bayer images. - Not supported on compressed images. +.. method:: image.draw_rectangle(x, y, w, h, [color, [thickness=1, [fill=False]]]) - .. note:: + Draws a rectangle on the image. You may either pass x, y, w, h separately + or as a tuple (x, y, w, h). - ``color`` is a keyword argument which must be explicitly - invoked in the function call by writing ``color=``. + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. -.. method:: image.draw_rectangle(rect_tuple, color=White) + ``thickness`` controls how thick the lines are in pixels. - Draws an unfilled rectangle using the ``rect_tuple`` (x, y, w, h) on the - image. + Pass ``fill`` set to True to fill the rectangle. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed images or bayer images. - .. note:: +.. method:: image.draw_circle(x, y, radius, [color, [thickness=1, [fill=False]]]) - ``color`` is a keyword argument which must be explicitly - invoked in the function call by writing ``color=``. + Draws a circle on the image. You may either pass x, y, radius separately or + as a tuple (x, y, radius). -.. method:: image.draw_circle(x, y, radius, color=White) + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. - Draws an unfilled circle at (``x``, ``y``) with integer ``radius`` on the - image. + ``thickness`` controls how thick the edges are in pixels. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + Pass ``fill`` set to True to fill the circle. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. - .. note:: + Not supported on compressed images or bayer images. - ``color`` is a keyword argument which must be explicitly - invoked in the function call by writing ``color=``. +.. method:: image.draw_string(x, y, text, [color, [scale=1, [x_spacing=0, [y_spacing=0, [mono_space=True]]]]]) -.. method:: image.draw_string(x, y, text, color=White) + Draws 8x10 text starting at location (x, y) in the image. You may either pass + x, y separately or as a tuple (x, y). - Draws 8x10 text starting at (``x``, ``y``) using ``text`` on the image. - ``\n``, ``\r``, and ``\r\n`` line endings move the cursor to the next line. + ``text`` is a string to write to the image. ``\n``, ``\r``, and ``\r\n`` + line endings move the cursor to the next line. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. - Not supported on compressed images. + ``scale`` may be increased to increase the size of the text on the image. + Integer values only (e.g. 1/2/3/etc.). - .. note:: + ``x_spacing`` allows you to add (if positive) or subtract (if negative) x + pixels between cahracters. - ``color`` is a keyword argument which must be explicitly - invoked in the function call by writing ``color=``. + ``y_spacing`` allows you to add (if positive) or subtract (if negative) y + pixels between cahracters (for multi-line text). -.. method:: image.draw_cross(x, y, size=5, color=White) + ``mono_space`` defaults to True which forces text to be fixed spaced. For + large text scales this looks terrible. Set the False to get non-fixed width + character spacing which looks A LOT better. - Draws a cross at (``x``, ``y``) whose sides are ``size`` (int) long on the - image. + Returns the image object so you can call another method using ``.`` notation. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + Not supported on compressed images or bayer images. - Not supported on compressed images. +.. method:: image.draw_cross(x, y, [color, [size=5, [thickness=1]]]) - .. note:: + Draws a cross at location x, y. You may either pass x, y separately or as a + tuple (x, y). - ``size`` and ``color`` are keyword arguments which must be explicitly - invoked in the function call by writing ``size=`` or ``color=``. + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. -.. method:: image.draw_keypoints(keypoints, size=Auto, color=White) + ``size`` controls how long the lines of the cross extend. - Draws the keypoints of a keypoints object on the image. ``size`` controls - the size of the keypoints and is scaled to look good on the image unless - overridden. + ``thickness`` controls how thick the edges are in pixels. - ``color`` is an int value (0-255) for grayscale images and a RGB888 tuple - (r, g, b) for RGB images. Defaults to white. + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed images or bayer images. - .. note:: +.. method:: image.draw_arrow(x0, y0, x1, y1, [color, [thickness=1]]) - ``size`` and ``color`` are keyword arguments which must be explicitly - invoked in the function call by writing ``size=`` or ``color=``. + Draws an arrow from (x0, y0) to (x1, y1) on the image. You may + either pass x0, y0, x1, y1 separately or as a tuple (x0, y0, x1, y1). -.. method:: image.binary(thresholds, invert=False) + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. - For grayscale images ``thresholds`` is a list of (lower, upper) grayscale - pixel thresholds to segment the image by. Segmentation converts all pixels - within the thresholds to 1 (white) and all pixels outside to 0 (black). + ``thickness`` controls how thick the line is in pixels. - For RGB images ``thresholds`` is a list of (l_lo, l_hi, a_lo, a_hi, b_lo, - b_hi) LAB pixel thresholds to segment the image by. Segmentation converts - all pixels within the thresholds to 1 (white) and all pixels outside to 0 - (black). + Returns the image object so you can call another method using ``.`` notation. - Lo/Hi thresholds being swapped is automatically handled. + Not supported on compressed images or bayer images. - ``invert`` inverts the outcome of the segmentation operation. +.. method:: image.draw_image(image, x, y, [x_scale=1.0, [y_scale=1.0, [mask=None]]]) - Not supported on compressed images. + Draws an ``image`` whose top-left corner starts at location x, y. You may either + pass x, y separately or as a tuple (x, y). - .. note:: + ``x_scale`` controls how much the image is scaled by in the x direction (float). - ``invert`` is a keyword argument which must be explicitly - invoked in the function call by writing ``invert=``. + ``y_scale`` controls how much the image is scaled by in the y direction (float). -.. method:: image.invert() + ``mask`` is another image to use as a pixel level mask for the drawing operation. + The mask should be an image with just black or white pixels and should be the + same size as the ``image`` you are drawing if passed. You may use the mask + to do sprite style drawing operations. - Inverts the binary image 0 (black) pixels go to 1 (white) and 1 (white) - pixels go to 0 (black). + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed images or bayer images. -.. method:: image.b_and(image) +.. method:: image.draw_keypoints(keypoints, [color, [size=10, [thickness=1, [fill=False]]]]) - Logically ANDs this image with another image. + Draws the keypoints of a keypoints object on the image. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. - Both images must be the same size and the same type (grayscale/rgb). + ``size`` controls how large the keypoints are. - Not supported on compressed images. + ``thickness`` controls how thick the line is in pixels. -.. method:: image.b_or(image) + Pass ``fill`` set to True to fill the keypoints. - Logically ORs this image with another image. + Returns the image object so you can call another method using ``.`` notation. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Not supported on compressed images or bayer images. - Both images must be the same size and the same type (grayscale/rgb). +.. method:: image.flood_fill(x, y, [seed_threshold=0.05, [floating_threshold=0.05, [color, [invert=False, [clear_background=False, [mask=None]]]]]]) - Not supported on compressed images. + Flood fills a region of the image starting from location x, y. You may either + pass x, y separately or as a tuple (x, y). -.. method:: image.b_nand(image) + ``seed_threshold`` controls how different any pixel in the fill area may be + from the original starting pixel. - Logically NANDs this image with another image. You can also invoke this - method by using ``image.nand``. + ``floating_threshold`` controls how different any pixel in the fill area may + be from any neighbor pixels. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + ``color`` is an RGB888 tuple for Grayscale or RGB565 images. Defaults to + white. However, you may also pass the underlying pixel value (0-255) for + grayscale images or a byte-reversed RGB565 value for RGB565 images. - Both images must be the same size and the same type (grayscale/rgb). + Pass ``invert`` as True to re-color everything outside of the flood-fill + connected area. - Not supported on compressed images. + Pass ``clear_background`` as True to zero the rest of the pixels that + flood-fill did not re-color. -.. method:: image.b_nor(image) + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + evaluated when flood filling. - Logically NORs this image with another image. You can also invoke this - method by using ``image.nor``. + Returns the image object so you can call another method using ``.`` notation. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Not supported on compressed images or bayer images. - Both images must be the same size and the same type (grayscale/rgb). +.. method:: image.binary(thresholds, [invert=False, [zero=False, [mask=None]]]) - Not supported on compressed images. + Sets all pixels in the image to black or white depending on if the pixel + is inside of a threshold in the threshold list ``thresholds`` or not. -.. method:: image.b_xor(image) + ``thresholds`` must be a list of tuples + ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you + want to track. For + grayscale images each tuple needs to contain two values - a min grayscale + value and a max grayscale value. Only pixel regions that fall between these + thresholds will be considered. For RGB565 images each tuple needs to have + six values (l_lo, l_hi, a_lo, a_hi, b_lo, b_hi) - which are minimums and + maximums for the LAB L, A, and B channels respectively. For easy usage this + function will automatically fix swapped min and max values. Additionally, + if a tuple is larger than six values the rest are ignored. Conversely, if the + tuple is too short the rest of the thresholds are assumed to be at maximum + range. - Logically XORs this image with another image. You can also invoke this - method by using ``image.xor``. + .. note:: - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + To get the thresholds for the object you want to track just select (click + and drag) on the object you want to track in the IDE frame buffer. The + histogram will then update to just be in that area. Then just write down + where the color distribution starts and falls off in each histogram channel. + These will be your low and high values for ``thresholds``. It's best to + manually determine the thresholds versus using the upper and lower + quartile statistics because they are too tight. - Both images must be the same size and the same type (grayscale/rgb). + You may also determine color thresholds by going into + ``Tools->Machine Vision->Threshold Editor`` in OpenMV IDE and selecting + thresholds from the GUI slider window. - Not supported on compressed images. + ``invert`` inverts the thresholding operation such that instead of matching + pixels inside of some known color bounds pixels are matched that are outside + of the known color bounds. -.. method:: image.b_xnor(image) + Set ``zero`` to True to instead zero thresholded pixels and leave pixels + not in the threshold list untouched. - Logically XNORs this image with another image. You can also invoke this - method by using ``image.xnor``. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Returns the image object so you can call another method using ``.`` notation. - Both images must be the same size and the same type (grayscale/rgb). + Not supported on compressed images or bayer images. - Not supported on compressed images. +.. method:: image.invert() -.. method:: image.erode(size, threshold=Auto) + Flips (binary inverts) all pixels values in a binary image very quickly. - Removes pixels from the edges of segmented areas. + Returns the image object so you can call another method using ``.`` notation. - This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels - across the image and zeroing the center pixel of the kernel if the sum of - the neighbour pixels set is not greater than ``threshold``. + Not supported on compressed images or bayer images. - This method works like the standard erode method if threshold is not set. If - ``threshold`` is set then you can specify erode to only erode pixels that - have, for example, less than 2 pixels set around them with a threshold of 2. +.. method:: image.b_and(image, [mask=None]) - Not supported on compressed images. This method is designed to work on - binary images. + Logically ANDs this image with another image. - .. note:: + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - ``threshold`` is a keyword argument which must be explicitly - invoked in the function call by writing ``threshold=``. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. -.. method:: image.dilate(size, threshold=Auto) + Returns the image object so you can call another method using ``.`` notation. - Adds pixels to the edges of segmented areas. + Not supported on compressed images or bayer images. - This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels - across the image and setting the center pixel of the kernel if the sum of - the neighbour pixels set is greater than ``threshold``. +.. method:: image.b_nand(image, [mask=None]) - This method works like the standard dilate method if threshold is not set. - If ``threshold`` is set then you can specify dilate to only dilate pixels - that have, for example, more than 2 pixels set around them with a threshold - of 2. + Logically NANDs this image with another image. - Not supported on compressed images. This method is designed to work on - binary images. + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - .. note:: + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - ``threshold`` is a keyword argument which must be explicitly - invoked in the function call by writing ``threshold=``. + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.b_or(image, [mask=None]) + + Logically ORs this image with another image. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.b_nor(image, [mask=None]) + + Logically NORs this image with another image. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.b_xor(image, [mask=None]) + + Logically XORs this image with another image. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.b_xnor(image, [mask=None]) + + Logically XNORs this image with another image. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.erode(size, [threshold, [mask=None]]) + + Removes pixels from the edges of segmented areas. + + This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels + across the image and zeroing the center pixel of the kernel if the sum of + the neighbour pixels set is not greater than ``threshold``. + + This method works like the standard erode method if threshold is not set. If + ``threshold`` is set then you can specify erode to only erode pixels that + have, for example, less than 2 pixels set around them with a threshold of 2. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.dilate(size, [threshold, [mask=None]]) + + Adds pixels to the edges of segmented areas. + + This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels + across the image and setting the center pixel of the kernel if the sum of + the neighbour pixels set is greater than ``threshold``. + + This method works like the standard dilate method if threshold is not set. + If ``threshold`` is set then you can specify dilate to only dilate pixels + that have, for example, more than 2 pixels set around them with a threshold + of 2. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.open(size, [threshold, [mask=None]]) + + Performs erosion and dilation on an image in order. Please see `image.erode()` + and `image.dilate()` for more information. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.close(size, [threshold, [mask=None]]) + + Performs dilation and erosion on an image in order. Please see `image.dilate()` + and `image.erode()` for more information. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.top_hat(size, [threshold, [mask=None]]) + + Returns the image difference of the image and `image.open()`'ed image. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Not supported on compressed images or bayer images. + +.. method:: image.black_hat(size, [threshold, [mask=None]]) + + Returns the image difference of the image and `image.close()`'ed image. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Not supported on compressed images or bayer images. .. method:: image.negate() - Numerically inverts pixel values for each color channel. E.g. (255-pixel). + Flips (numerically inverts) all pixels values in an image very quickly. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. -.. method:: image.difference(image) + Not supported on compressed images or bayer images. - Subtracts another image from this image. E.g. for each color channel each - pixel is replaced with ABS(this.pixel-image.pixel). +.. method:: image.replace(image, [hmirror=False, [vflip=False, [mask=None]]]) - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Replaces all pixels in the image with a new image. - Both images must be the same size and the same type (grayscale/rgb). + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - Not supported on compressed images. + Set ``hmirror`` to True to horizontally mirror the replacing image. - .. note:: This function is used for frame differencing which you can then - use to do motion detection. You can then mask the resulting image - using NAND/NOR before running statistics functions on the image. + Set ``vflip`` to True to vertically flip the replacing image. -.. method:: image.replace(image) + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Replace this image with ``image`` (this is much faster than blend for this). + Returns the image object so you can call another method using ``.`` notation. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Not supported on compressed images or bayer images. - Both images must be the same size and the same type (grayscale/rgb). +.. method:: image.add(image, [mask=None]) - Not supported on compressed images. + Adds an image pixel-wise to this one. -.. method:: image.blend(image, alpha=128) + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - Blends another image ``image`` into this image. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Returns the image object so you can call another method using ``.`` notation. - ``alpha`` controls the transparency. 256 for an opaque overlay. 0 for none. + Not supported on compressed images or bayer images. - Both images must be the same size and the same type (grayscale/rgb). +.. method:: image.sub(image, [reverse=False, [mask=None]]) - Not supported on compressed images. + Subtracts an image pixel-wise to this one. - .. note:: + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - ``alpha`` is a keyword argument which must be explicitly - invoked in the function call by writing ``alpha=``. + Set ``reverse`` to True to reverse the subtraction operation from + ``this_image-image`` to ``image-this_image``. -.. method:: image.max(image) + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - On a pixel-by-pixel level replace a pixel in this iamge with the maximum pixel - value between this image and another image. + Returns the image object so you can call another method using ``.`` notation. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + Not supported on compressed images or bayer images. - Both images must be the same size and the same type (grayscale/rgb). +.. method:: image.mul(image, [invert=False, [mask=None]]) - Not supported on compressed images. + Multiplies two images pixel-wise with each other. -.. method:: image.min(image) + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - On a pixel-by-pixel level replace a pixel in this iamge with the minimum pixel - value between this image and another image. + Set ``invert`` to True to change the multiplication operation from ``a*b`` + to ``1/((1/a)*(1/b))``. In particular, this lightens the image instead of + darkening it (e.g. multiply versus burn operations). - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Both images must be the same size and the same type (grayscale/rgb). + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed images or bayer images. -.. method:: image.remove_shadows([image]) +.. method:: image.div(image, [invert=False, [mask=None]]) - Removes shadows from this image. + Divides this image by another one. - If no "shadow-free" version of the current image is passed this method will - attempt to remove shadows from the image without a source of truth. The - curent algorithm for this is suitable for removing shadows from flat uniform - backgrounds. Note that this method takes multiple seconds to run and is only - good for producing a shadow-free version of the image dynamically for real-time - shadow removal. Future versions of this algorithm will be suitable for more - environments but equally slow. + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - If a "shadow-free" version of the current image is paassed this method will - remove all shadow in the image using the "source-of-truth" background - shadow-free image to filter out shadows. Non-shadow pixels will not be filtered - out so you may add new objects to the scene that were not previously there and - any non-shadow pixels in those objects will show up. + Set ``invert`` to True to change the division direction from ``a/b`` to + ``b/a``. - This method is incredibly useful for frame differencing motion detection. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Only works on RGB565 images. + Returns the image object so you can call another method using ``.`` notation. -.. method:: image.morph(size, kernel, mul=Auto, add=0) + Not supported on compressed images or bayer images. - Convolves the image by a filter kernel. +.. method:: image.min(image, [mask=None]) - ``size`` controls the size of the kernel which must be - ((size*2)+1)x((size*2)+1) pixels big. + Returns the minimum image of two images pixel-wise. - ``kernel`` is the kernel to convolve the image by. It can either be a tuple - or a list of [-128:127] values. + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - ``mul`` is number to multiply the convolution pixel result by. When not set - it defaults to a value that will prevent scaling in the convolution output. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - ``add`` is a value to add to each convolution pixel result. + Returns the image object so you can call another method using ``.`` notation. - ``mul`` basically allows you to do a global contrast adjustment and ``add`` - allows you to do a global brightness adjustment. + Not supported on compressed images or bayer images. - .. note:: +.. method:: image.max(image, [mask=None]) + + Returns the minimum image of two images pixel-wise. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.difference(image, [mask=None]) + + Returns the absolute difference image between two images (e.g. ||a-b||). + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.blend(image, [alpha=128, [mask=None]]) + + Alpha blends two images with each other. + + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). + + ``alpha`` controls how much of the other image to blend into this image. + ``alpha`` should be an integer value between 0 and 256. A value closer to + zero blends more of the other image into this image and a value closer to + 256 does the opposite. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. - ``mul`` and ``add`` are keyword arguments which must be explicitly - invoked in the function call by writing ``mul=`` or ``add=``. + Not supported on compressed images or bayer images. -.. method:: image.midpoint(size, [bias=0.5, threshold=False, offset=0, invert=False, mask]) +.. method:: image.histeq([adaptive=False, [clip_limit=-1, [mask=None]]]) - Runs the midpoint filter on the image. + Runs the histogram equalization algorithm on the image. Histogram + equalization normalizes the contrast and brightness in the image. + + If you pass ``adaptive`` as True then an adaptive histogram equalization + method will be run on the image instead which as generally better results + than non-adaptive histogram qualization but a longer run time. + + ``clip_limit`` provides a way to limit the contrast of the adaptive histogram + qualization. Use a small value for this, like 10, to produce good histogram + equalized contrast limited images. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.mean(size, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + + Standard mean blurring filter using a box filter. + + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. + + If you'd like to adaptive threshold the image on the output of the filter + you can pass ``threshold=True`` which will enable adaptive thresholding of the + image which sets pixels to one or zero based on a pixel's brightness in relation + to the brightness of the kernel of pixels around them. A negative ``offset`` + value sets more pixels to 1 as you make it more negative while a positive + value only sets the sharpest contrast changes to 1. Set ``invert`` to invert + the binary image resulting output. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: median(size, [percentile=0.5, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + + Runs the median filter on the image. The median filter is the best filter + for smoothing surfaces while preserving edges but it is very slow. + + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. + + ``percentile`` controls the percentile of the value used in the kernel. By + default each pixel is replaced with the 50th percentile (center) of its + neighbors. You can set this to 0 for a min filter, 0.25 for a lower quartile + filter, 0.75 for an upper quartile filter, and 1.0 for a max filter. + + If you'd like to adaptive threshold the image on the output of the filter + you can pass ``threshold=True`` which will enable adaptive thresholding of the + image which sets pixels to one or zero based on a pixel's brightness in relation + to the brightness of the kernel of pixels around them. A negative ``offset`` + value sets more pixels to 1 as you make it more negative while a positive + value only sets the sharpest contrast changes to 1. Set ``invert`` to invert + the binary image resulting output. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.mode(size, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + + Runs the mode filter on the image by replacing each pixel with the mode of + their neighbors. This method works great on grayscale images. However, on + RGB images it creates a lot of artifacts on edges because of the non-linear + nature of the operation. + + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. + + If you'd like to adaptive threshold the image on the output of the filter + you can pass ``threshold=True`` which will enable adaptive thresholding of the + image which sets pixels to one or zero based on a pixel's brightness in relation + to the brightness of the kernel of pixels around them. A negative ``offset`` + value sets more pixels to 1 as you make it more negative while a positive + value only sets the sharpest contrast changes to 1. Set ``invert`` to invert + the binary image resulting output. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.midpoint(size, [bias=0.5, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + + Runs the midpoint filter on the image. This filter finds the midpoint + ((max-min)/2) of each pixel neighborhood in the image. - ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), or higher. + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. ``bias`` controls the min/max mixing. 0 for min filtering only, 1.0 for max filtering only. By using the ``bias`` you can min/max filter the image. - If you'd like to adaptive threshold the image on the output of the mean filter + If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the - image which sets pixels to 1 or zero based on a pixel's brightness in relation + image which sets pixels to one or zero based on a pixel's brightness in relation to the brightness of the kernel of pixels around them. A negative ``offset`` value sets more pixels to 1 as you make it more negative while a positive value only sets the sharpest contrast changes to 1. Set ``invert`` to invert the binary image resulting output. - ``mask`` may be an optional binary image of the same size used to turn the - mode filter on/off on a pixel-by-pixel level based if a pixel is 1/0 respectively. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. - .. note:: + Not supported on compressed images or bayer images. - ``bias`` is a keyword argument which must be explicitly - invoked in the function call by writing ``bias=``. +.. method:: image.morph(size, kernel, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]) -.. method:: image.mean(size, [threshold=False, offset=0, invert=False, mask]) + Convolves the image by a filter kernel. This allows you to do general purpose + convolutions on an image. - Standard mean blurring filter (faster than using morph for this). + ``size`` controls the size of the kernel which must be + ((size*2)+1)x((size*2)+1) elements big. - ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), or higher. + ``kernel`` is the kernel to convolve the image by. It can either be a tuple + or a list of integer values. - If you'd like to adaptive threshold the image on the output of the mean filter + ``mul`` is number to multiply the convolution pixel results by. When not set + it defaults to a value that will prevent scaling in the convolution output. + + ``add`` is a value to add to each convolution pixel result. + + ``mul`` basically allows you to do a global contrast adjustment and ``add`` + allows you to do a global brightness adjustment. Pixels that go outside of + the image mins and maxes for color channels will be clipped. + + If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the - image which sets pixels to 1 or zero based on a pixel's brightness in relation + image which sets pixels to one or zero based on a pixel's brightness in relation to the brightness of the kernel of pixels around them. A negative ``offset`` value sets more pixels to 1 as you make it more negative while a positive value only sets the sharpest contrast changes to 1. Set ``invert`` to invert the binary image resulting output. - ``mask`` may be an optional binary image of the same size used to turn the - mode filter on/off on a pixel-by-pixel level based if a pixel is 1/0 respectively. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. -.. method:: median(size, [percentile=0.5, threshold=False, offset=0, invert=False, mask]) + Not supported on compressed images or bayer images. - Runs the median filter on the image. The median filter is the best filter - for smoothing surfaces while preserving edges but it is very slow. +.. method:: image.gaussian(size, [unsharp=False, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]]) - ``size`` is the kernel size. Use 1 (3x3 kernel) or 2 (5x5 kernel). + Convolves the image by a smoothing guassian kernel. - ``percentile`` controls the percentile of the value used in the kernel. By - default each pixel is replace with the 50th percentile (center) of it's - neighbours. You can set this to 0 for a min filter, 0.25 for a lower quartile - filter, 0.75 for an upper quartile filter, and 1.0 for a max filter. + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. + + If ``unsharp`` is set to the True then instead of doing just a guassian + filtering operation this method will perform an unsharp mask operation which + improves image sharpness on edges. + + ``mul`` is number to multiply the convolution pixel results by. When not set + it defaults to a value that will prevent scaling in the convolution output. + + ``add`` is a value to add to each convolution pixel result. + + ``mul`` basically allows you to do a global contrast adjustment and ``add`` + allows you to do a global brightness adjustment. Pixels that go outside of + the image mins and maxes for color channels will be clipped. - If you'd like to adaptive threshold the image on the output of the mean filter + If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the - image which sets pixels to 1 or zero based on a pixel's brightness in relation + image which sets pixels to one or zero based on a pixel's brightness in relation to the brightness of the kernel of pixels around them. A negative ``offset`` value sets more pixels to 1 as you make it more negative while a positive value only sets the sharpest contrast changes to 1. Set ``invert`` to invert the binary image resulting output. - ``mask`` may be an optional binary image of the same size used to turn the - mode filter on/off on a pixel-by-pixel level based if a pixel is 1/0 respectively. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. - .. note:: + Not supported on compressed images or bayer images. - ``percentile`` is a keyword argument which must be explicitly - invoked in the function call by writing ``percentile=``. +.. method:: image.laplacian(size, [sharpen=False, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]]) -.. method:: image.mode(size, [threshold=False, offset=0, invert=False, mask]) + Convolves the image by a edge detecting laplacian kernel. - Runs the mode filter on the image by replacing each pixel with the mode of - their neighbours. This method works great on grayscale images. However, on - RGB images it creates a lot of artifacts on edges because of the non-linear - nature of the operation. + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. + + If ``sharpen`` is set to the True then instead of just outputting an + unthresholded edge detection image this method will instead sharpen the + image. Increase the kernel size then to increase the image sharpness. + + ``mul`` is number to multiply the convolution pixel results by. When not set + it defaults to a value that will prevent scaling in the convolution output. - If you'd like to adaptive threshold the image on the output of the mean filter + ``add`` is a value to add to each convolution pixel result. + + ``mul`` basically allows you to do a global contrast adjustment and ``add`` + allows you to do a global brightness adjustment. Pixels that go outside of + the image mins and maxes for color channels will be clipped. + + If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the - image which sets pixels to 1 or zero based on a pixel's brightness in relation + image which sets pixels to one or zero based on a pixel's brightness in relation to the brightness of the kernel of pixels around them. A negative ``offset`` value sets more pixels to 1 as you make it more negative while a positive value only sets the sharpest contrast changes to 1. Set ``invert`` to invert the binary image resulting output. - ``mask`` may be an optional binary image of the same size used to turn the - mode filter on/off on a pixel-by-pixel level based if a pixel is 1/0 respectively. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - ``size`` is the kernel size. Use 1 (3x3 kernel) or 2 (5x5 kernel). + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed images or bayer images. -.. method:: image.gaussian(size) +.. method:: image.bilateral(size, [color_sigma=0.1, [space_sigma=1, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]) - Smooths the image with the gaussian kernel. ``size`` may be either 3 or 5 - for a 3x3 or 5x5 kernel. + Convolves the image by a bilateral filter. The bilateral filter smooths the + image while keeping edges in the image. - Not supported on compressed images. + ``size`` is the kernel size. Use 1 (3x3 kernel), 2 (5x5 kernel), etc. -.. method:: image.linpolar([reverse=False]) + ``color_sigma`` controls how closely colors are matched using the bilateral + filter. Increase this to increase color blurring. - Re-project's and image from cartessian coordinates to linear polar coordinates. + ``space_sigma`` controls how closely pixels space-wise are blurred with + each other. Increase this to increase pixel blurring. - Set ``reverse=True`` to re-project in the opposite direction. + If you'd like to adaptive threshold the image on the output of the filter + you can pass ``threshold=True`` which will enable adaptive thresholding of the + image which sets pixels to one or zero based on a pixel's brightness in relation + to the brightness of the kernel of pixels around them. A negative ``offset`` + value sets more pixels to 1 as you make it more negative while a positive + value only sets the sharpest contrast changes to 1. Set ``invert`` to invert + the binary image resulting output. - Linear polar re-projection turns rotation of an image into x-translation. + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. - Not supported on compressed images. + Returns the image object so you can call another method using ``.`` notation. -.. method:: image.logpolar([reverse=False]) + Not supported on compressed images or bayer images. - Re-project's and image from cartessian coordinates to log polar coordinates. +.. method:: image.cartoon(size, [seed_threshold=0.05, [floating_threshold=0.05, [mask=None]]]) - Set ``reverse=True`` to re-project in the opposite direction. + Walks across an image and flood-fills all pixels regions in the image. This + effectively removes texture from the image by flattening the color in all + regions of the image. For the best results, the image should have lots of + contrast such that regions do not bleed into each other too easily. - Log polar re-projection turns rotation of an image into x-translation - and scaling/zooming into y-translation. + ``seed_threshold`` controls how different any pixel in the fill area may be + from the original starting pixel. - Not supported on compressed images. + ``floating_threshold`` controls how different any pixel in the fill area may + be from any neighbor pixels. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: image.remove_shadows([image]) + + Removes shadows from this image. + + If no "shadow-free" version of the current image is passed this method will + attempt to remove shadows from the image without a source of truth. The + curent algorithm for this is suitable for removing shadows from flat uniform + backgrounds. Note that this method takes multiple seconds to run and is only + good for producing a shadow-free version of the image dynamically for real-time + shadow removal. Future versions of this algorithm will be suitable for more + environments but equally slow. + + If a "shadow-free" version of the current image is paassed this method will + remove all shadow in the image using the "source-of-truth" background + shadow-free image to filter out shadows. Non-shadow pixels will not be filtered + out so you may add new objects to the scene that were not previously there and + any non-shadow pixels in those objects will show up. + + This method is incredibly useful for frame differencing motion detection. + + Returns the image object so you can call another method using ``.`` notation. + + Only works on RGB565 images. .. method:: image.chrominvar() - Removes the effect of lighting from the image. + Removes illumination from the input image leaving only color graidients + behind. Faster than `image.illuminvar()` but affected by shadows. - RGB565 images only. + Returns the image object so you can call another method using ``.`` notation. + + Only works on RGB565 images. .. method:: image.illuminvar() - Removes the effect of lighting from the image and shadows. + Removes illumination from the input image leaving only color graidients + behind. Slower than `image.chrominvar()` but unaffected by shadows. + + Returns the image object so you can call another method using ``.`` notation. - RGB565 images only. + Only works on RGB565 images. -.. method:: image.histeq() +.. method:: image.linpolar([reverse=False]) - Runs the histogram equalization algorithm on the image. Histogram - equalization normalizes the contrast and brightness in the image. + Re-project's and image from cartessian coordinates to linear polar coordinates. - Not supported on compressed images. + Set ``reverse=True`` to re-project in the opposite direction. + + Linear polar re-projection turns rotation of an image into x-translation. + + Not supported on compressed images or bayer images. + +.. method:: image.logpolar([reverse=False]) + + Re-project's and image from cartessian coordinates to log polar coordinates. + + Set ``reverse=True`` to re-project in the opposite direction. + + Log polar re-projection turns rotation of an image into x-translation + and scaling/zooming into y-translation. -.. method:: image.lens_corr(strength=1.8, zoom=1.0) + Not supported on compressed images or bayer images. - Performs lens correction to un-fisheye the image due to the lens. +.. method:: image.lens_corr([strength=1.8, [zoom=1.0]]) + + Performs lens correction to un-fisheye the image due to the lens distortion. ``strength`` is a float defining how much to un-fisheye the image. Try 1.8 out by default and then increase or decrease from there until the image @@ -2096,7 +2749,11 @@ Methods ``zoom`` is the amount to zoom in on the image by. 1.0 by default. -.. method:: img.rotation_corr(x_rotation=0.0, y_rotation=0.0, z_rotation=0.0, x_translation=0.0, y_translation=0.0, zoom=1.0) + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + +.. method:: img.rotation_corr([x_rotation=0.0, [y_rotation=0.0, [z_rotation=0.0, [x_translation=0.0, [y_translation=0.0, [zoom=1.0]]]]]]) Corrects perspective issues in the image by doing a 3D rotation of the frame buffer. @@ -2117,53 +2774,118 @@ Methods ``zoom`` is the amount to zoom in on the image by. 1.0 by default. + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer images. + .. method:: image.get_similarity(image) - Returns a ``similarity`` object describing how similar two images are using + Returns a `image.similarity` object describing how similar two images are using the SSIM algorithm to compare 8x8 pixel patches between the two images. - ``image`` can either be an image object or a path to an uncompressed image - file (bmp/pgm/ppm). - - Both images must be the same size and the same type (grayscale/rgb). + ``image`` can either be an image object, a path to an uncompressed image + file (bmp/pgm/ppm), or a scalar value. If a scalar value the value can + either be an RGB888 tuple or the underlying pixel value (e.g. an 8-bit grayscale + for grayscale images or a byte-reversed RGB565 value for RGB images). - Not supported on compressed images. + Not supported on compressed images or bayer images. -.. method:: image.get_histogram(roi=Auto, bins=Auto, l_bins=Auto, a_bins=Auto, b_bins=Auto) +.. method:: image.get_histogram([thresholds, [invert=False, [roi, [bins, [l_bins, [a_bins, [b_bins]]]]]]]) Computes the normalized histogram on all color channels for an ``roi`` and - returns a ``histogram`` object. Please see the ``histogram`` object for more - information. You can also invoke this method by using ``image.get_hist`` or - ``image.histogram``. + returns a `image.histogram` object. Please see the `image.histogram` object for more + information. You can also invoke this method by using ``image.get_hist()`` or + ``image.histogram()``. If you pass a list of ``thresholds`` then the histogram + information will only be computed from pixels within the threshold list. + + ``thresholds`` must be a list of tuples + ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you + want to track. For + grayscale images each tuple needs to contain two values - a min grayscale + value and a max grayscale value. Only pixel regions that fall between these + thresholds will be considered. For RGB565 images each tuple needs to have + six values (l_lo, l_hi, a_lo, a_hi, b_lo, b_hi) - which are minimums and + maximums for the LAB L, A, and B channels respectively. For easy usage this + function will automatically fix swapped min and max values. Additionally, + if a tuple is larger than six values the rest are ignored. Conversely, if the + tuple is too short the rest of the thresholds are assumed to be at maximum + range. + + .. note:: + + To get the thresholds for the object you want to track just select (click + and drag) on the object you want to track in the IDE frame buffer. The + histogram will then update to just be in that area. Then just write down + where the color distribution starts and falls off in each histogram channel. + These will be your low and high values for ``thresholds``. It's best to + manually determine the thresholds versus using the upper and lower + quartile statistics because they are too tight. + + You may also determine color thresholds by going into + ``Tools->Machine Vision->Threshold Editor`` in OpenMV IDE and selecting + thresholds from the GUI slider window. + + ``invert`` inverts the thresholding operation such that instead of matching + pixels inside of some known color bounds pixels are matched that are outside + of the known color bounds. Unless you need to do something advanced with color statistics just use the - ``image.get_statistics`` method instead of this method for looking at pixel + `image.get_statistics()` method instead of this method for looking at pixel areas in an image. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - ``bin_count`` and others are the number of bins to use for the histogram - channels. For grayscale images use ``bin_count`` and for RGB565 images use + ``bins`` and others are the number of bins to use for the histogram + channels. For grayscale images use ``bins`` and for RGB565 images use the others for each channel. The bin counts must be greater than 2 for each channel. Additionally, it makes no sense to set the bin count larger than - the number of unique pixel values for each channel. + the number of unique pixel values for each channel. By default, the historgram + will have the maximum number of bins per channel. - Not supported on compressed images. + Not supported on compressed images or bayer images. + +.. method:: image.get_statistics([thresholds, [invert=False, [roi, [bins, [l_bins, [a_bins, [b_bins]]]]]]]) + + Computes the mean, median, mode, standard deviation, min, max, lower + quartile, and upper quartile for all color channels for an ``roi`` and + returns a `image.statistics` object. Please see the `image.statistics` + object for more information. You can also invoke this method by using + ``image.get_stats`` or ``image.statistics``. If you pass a list of + ``thresholds`` then the histogram information will only be computed from + pixels within the threshold list. + + ``thresholds`` must be a list of tuples + ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you + want to track. For + grayscale images each tuple needs to contain two values - a min grayscale + value and a max grayscale value. Only pixel regions that fall between these + thresholds will be considered. For RGB565 images each tuple needs to have + six values (l_lo, l_hi, a_lo, a_hi, b_lo, b_hi) - which are minimums and + maximums for the LAB L, A, and B channels respectively. For easy usage this + function will automatically fix swapped min and max values. Additionally, + if a tuple is larger than six values the rest are ignored. Conversely, if the + tuple is too short the rest of the thresholds are assumed to be at maximum + range. .. note:: - ``roi``, ``bin_count``, and etc. are keyword arguments which must be - explicitly invoked in the function call by writing ``roi=``, etc. + To get the thresholds for the object you want to track just select (click + and drag) on the object you want to track in the IDE frame buffer. The + histogram will then update to just be in that area. Then just write down + where the color distribution starts and falls off in each histogram channel. + These will be your low and high values for ``thresholds``. It's best to + manually determine the thresholds versus using the upper and lower + quartile statistics because they are too tight. -.. method:: image.get_statistics(roi=Auto, bins=Auto, l_bins=Auto, a_bins=Auto, b_bins=Auto) + You may also determine color thresholds by going into + ``Tools->Machine Vision->Threshold Editor`` in OpenMV IDE and selecting + thresholds from the GUI slider window. - Computes the mean, median, mode, standard deviation, min, max, lower - quartile, and upper quartile for all color channels for an ``roi`` and - returns a ``statistics`` object. Please see the ``statistics`` object for - more information. You can also invoke this method by using - ``image.get_stats`` or ``image.statistics``. + ``invert`` inverts the thresholding operation such that instead of matching + pixels inside of some known color bounds pixels are matched that are outside + of the known color bounds. You'll want to use this method any time you need to get information about the values of an area of pixels in an image. For example, after if you're @@ -2175,20 +2897,16 @@ Methods specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - ``bin_count`` and others are the number of bins to use for the histogram - channels. For grayscale images use ``bin_count`` and for RGB565 images use + ``bins`` and others are the number of bins to use for the histogram + channels. For grayscale images use ``bins`` and for RGB565 images use the others for each channel. The bin counts must be greater than 2 for each channel. Additionally, it makes no sense to set the bin count larger than - the number of unique pixel values for each channel. - - Not supported on compressed images. - - .. note:: + the number of unique pixel values for each channel. By default, the historgram + will have the maximum number of bins per channel. - ``roi``, ``bin_count``, and etc. are keyword arguments which must be - explicitly invoked in the function call by writing ``roi=``, etc. + Not supported on compressed images or bayer images. -.. method:: image.get_regression(thresholds, [roi, x_stride=2, y_stride=1, invert=False, area_threshold=10, pixels_threshold=10, robust=False]) +.. method:: image.get_regression(thresholds, [invert=False, [roi, [x_stride=2, [y_stride=1, [area_threshold=10, [pixels_threshold=10, [robust=False]]]]]]]) Computes a linear regression on all the thresholded pixels in the image. The linear regression is computed using least-squares normally which is fast but @@ -2200,21 +2918,21 @@ Methods thresholding remains low the linear regression will be valid even in the case of up to 30% of the thresholded pixels being outliers (e.g. it's robust). - This method returns a ``line`` object. See this blog post on how to use the + This method returns a `image.line` object. See this blog post on how to use the line object easily: https://openmv.io/blogs/news/linear-regression-line-following ``thresholds`` must be a list of tuples ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you - want to track. You may pass up to 16 threshold tuples in one - ``image.find_blobs`` call. For grayscale images each tuple needs to contain - two values - a min grayscale value and a max grayscale value. Only pixel - regions that fall between these thresholds will be considered. For RGB565 - images each tuple needs to have six values (l_lo, l_hi, a_lo, a_hi, b_lo, - b_hi) - which are minimums and maximums for the LAB L, A, and B channels - respectively. For easy usage this function will automatically fix swapped - min and max values. Additionally, if a tuple is larger than six values the - rest are ignored. Conversely, if the tuple is too short the rest of the - thresholds are assumed to be zero. + want to track. For + grayscale images each tuple needs to contain two values - a min grayscale + value and a max grayscale value. Only pixel regions that fall between these + thresholds will be considered. For RGB565 images each tuple needs to have + six values (l_lo, l_hi, a_lo, a_hi, b_lo, b_hi) - which are minimums and + maximums for the LAB L, A, and B channels respectively. For easy usage this + function will automatically fix swapped min and max values. Additionally, + if a tuple is larger than six values the rest are ignored. Conversely, if the + tuple is too short the rest of the thresholds are assumed to be at maximum + range. .. note:: @@ -2226,55 +2944,46 @@ Methods manually determine the thresholds versus using the upper and lower quartile statistics because they are too tight. - The latest version of OpenMV IDE features a threshold editor to help make - picking thresholds easer. It lets you control the threshold with sliders - so you can see what the thresholds are segmenting. + You may also determine color thresholds by going into + ``Tools->Machine Vision->Threshold Editor`` in OpenMV IDE and selecting + thresholds from the GUI slider window. + + ``invert`` inverts the thresholding operation such that instead of matching + pixels inside of some known color bounds pixels are matched that are outside + of the known color bounds. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - ``x_stride`` is the number of x pixels to skip when searching for a blob. - Once a blob is found the line fill algorithm will be pixel accurate. - Increase ``x_stride`` to speed up finding blobs if blobs are know to be large. + ``x_stride`` is the number of x pixels to skip over when evaluating the image. - ``y_stride`` is the number of y pixels to skip when searching for a blob. - Once a blob is found the line fill algorithm will be pixel accurate. - Increase ``y_stride`` to speed up finding blobs if blobs are know to be large. - - ``invert`` inverts the thresholding operation such that instead of matching - pixels inside of some known color bounds pixels are matched that are outside - of the known color bounds. + ``y_stride`` is the number of y pixels to skip over when evaluating the image. If the regression's bounding box area is less than ``area_threshold`` then None is returned. If the regression's pixel count is less than ``pixel_threshold`` then None is returned. - Not supported on compressed images. - - .. note:: + Not supported on compressed images or bayer images. - All the arguments except ``thresholds`` are keyword arguments and must - be explicitly invoked with their name and an equal sign. - -.. method:: image.find_blobs(thresholds, [roi=Auto, x_stride=2, y_stride=1, invert=False, area_threshold=10, pixels_threshold=10, merge=False, margin=0, threshold_cb=None, merge_cb=None]) +.. method:: image.find_blobs(thresholds, [invert=False, [roi, [x_stride=2, [y_stride=1, [area_threshold=10, [pixels_threshold=10, [merge=False, [margin=0, [threshold_cb=None, [merge_cb=None]]]]]]]]]]) Finds all blobs (connected pixel regions that pass a threshold test) in the - image and returns a list of ``blob`` objects which describe each blob. - Please see the ``blob`` object more more information. + image and returns a list of `image.blob` objects which describe each blob. + Please see the `image.blob` object more more information. ``thresholds`` must be a list of tuples ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you - want to track. You may pass up to 16 threshold tuples in one - ``image.find_blobs`` call. For grayscale images each tuple needs to contain - two values - a min grayscale value and a max grayscale value. Only pixel - regions that fall between these thresholds will be considered. For RGB565 - images each tuple needs to have six values (l_lo, l_hi, a_lo, a_hi, b_lo, - b_hi) - which are minimums and maximums for the LAB L, A, and B channels - respectively. For easy usage this function will automatically fix swapped - min and max values. Additionally, if a tuple is larger than six values the - rest are ignored. Conversely, if the tuple is too short the rest of the - thresholds are assumed to be zero. + want to track. You may pass up to 16 threshold tuples in one call. For + grayscale images each tuple needs to contain two values - a min grayscale + value and a max grayscale value. Only pixel regions that fall between these + thresholds will be considered. For RGB565 images each tuple needs to have + six values (l_lo, l_hi, a_lo, a_hi, b_lo, b_hi) - which are minimums and + maximums for the LAB L, A, and B channels respectively. For easy usage this + function will automatically fix swapped min and max values. Additionally, + if a tuple is larger than six values the rest are ignored. Conversely, if the + tuple is too short the rest of the thresholds are assumed to be at maximum + range. .. note:: @@ -2286,9 +2995,13 @@ Methods manually determine the thresholds versus using the upper and lower quartile statistics because they are too tight. - The latest version of OpenMV IDE features a threshold editor to help make - picking thresholds easer. It lets you control the threshold with sliders - so you can see what the thresholds are segmenting. + You may also determine color thresholds by going into + ``Tools->Machine Vision->Threshold Editor`` in OpenMV IDE and selecting + thresholds from the GUI slider window. + + ``invert`` inverts the thresholding operation such that instead of matching + pixels inside of some known color bounds pixels are matched that are outside + of the known color bounds. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the @@ -2296,30 +3009,26 @@ Methods ``x_stride`` is the number of x pixels to skip when searching for a blob. Once a blob is found the line fill algorithm will be pixel accurate. - Increase ``x_stride`` to speed up finding blobs if blobs are know to be large. + Increase ``x_stride`` to speed up finding blobs if blobs are known to be large. ``y_stride`` is the number of y pixels to skip when searching for a blob. Once a blob is found the line fill algorithm will be pixel accurate. - Increase ``y_stride`` to speed up finding blobs if blobs are know to be large. - - ``invert`` inverts the thresholding operation such that instead of matching - pixels inside of some known color bounds pixels are matched that are outside - of the known color bounds. + Increase ``y_stride`` to speed up finding blobs if blobs are known to be large. If a blob's bounding box area is less than ``area_threshold`` it is filtered out. If a blob's pixel count is less than ``pixel_threshold`` it is filtered out. - ``merge`` if True merges all not filtered out blobs who's bounding + ``merge`` if True merges all not filtered out blobs whos bounding rectangles intersect each other. ``margin`` can be used to increase or decrease the size of the bounding rectangles for blobs during the - intersection test. For example, with a margin of 1 blobs who's bounding + intersection test. For example, with a margin of 1 blobs whos bounding rectangles are 1 pixel away from each other will be merged. Merging blobs allows you to implement color code tracking. Each blob object has a ``code`` value which is a bit vector made up of 1s for each color - threshold. For example, if you pass ``image.find_blobs`` two color + threshold. For example, if you pass `image.find_blobs` two color thresholds then the first threshold has a code of 1 and the second 2 (a third threshold would be 4 and a fourth would be 8 and so on). Merged blobs logically OR all their codes together so that you know what colors produced @@ -2330,7 +3039,7 @@ Methods do not fully track all the pixels of an object you are trying to follow. Finally, if you want to merge blobs, but, don't want two color thresholds to - be merged then just call ``image.find_blobs`` twice with separate thresholds + be merged then just call `image.find_blobs` twice with separate thresholds so that blobs aren't merged. ``threshold_cb`` may be set to the function to call on every blob after its @@ -2343,79 +3052,66 @@ Methods two arguments - the two blob objects to be merged. The call back then must return True to merge the blobs or False to prevent merging the blobs. - Not supported on compressed images. - - .. note:: - - All the arguments except ``thresholds`` are keyword arguments and must - be explicitly invoked with their name and an equal sign. - -.. method:: image.find_lines(roi=Auto, x_stride=2, y_stride=1, threshold=1000, theta_margin=25, rho_margin=25) + Not supported on compressed images or bayer images. - Finds all infinite lines in the image using the hough transform. Returns a list - of ``line`` objects (see above). +.. method:: image.find_lines([roi, [x_stride=2, [y_stride=1, [threshold=1000, [theta_margin=25, [rho_margin=25]]]]]]) - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. + Finds all infinite lines in the image using the hough transform. Returns a list + of `image.line` objects. - ``x_stride`` is the number of x pixels to skip when doing the hough transform. - Only increase this if lines you are searching for are large and bulky. - - ``y_stride`` is the number of y pixels to skip when doing the hough transform. - Only increase this if lines you are searching for are large and bulky. - - ``threshold`` controls what lines are detected from the hough transform. Only - lines with a magnitude greater than or equal to ``threshold`` are returned. The - right value of ``threshold`` for your application is image dependent. Note that - the magnitude of a line is the sum of all sobel filter magnitudes of pixels - that make up that line. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. - ``theta_margin`` controls the merging of detected lines. Lines which are - ``theta_margin`` degrees apart and ``rho_margin`` rho apart are merged. + ``x_stride`` is the number of x pixels to skip when doing the hough transform. + Only increase this if lines you are searching for are large and bulky. - ``rho_margin`` controls the merging of detected lines. Lines which are - ``theta_margin`` degrees apart and ``rho_margin`` rho apart are merged. + ``y_stride`` is the number of y pixels to skip when doing the hough transform. + Only increase this if lines you are searching for are large and bulky. - This method working by running the sobel filter over the image and taking - the magnitude and gradient responses from the sobel filter to feed a hough - transform. It does not require any preprocessing on the image first. However, - my cleaning up the image filter you may get more stable results. + ``threshold`` controls what lines are detected from the hough transform. Only + lines with a magnitude greater than or equal to ``threshold`` are returned. The + right value of ``threshold`` for your application is image dependent. Note that + the magnitude of a line is the sum of all sobel filter magnitudes of pixels + that make up that line. - .. note:: + ``theta_margin`` controls the merging of detected lines. Lines which are + ``theta_margin`` degrees apart and ``rho_margin`` rho apart are merged. - All the arguments are keyword arguments and must be explicitly invoked - with their name and an equal sign. + ``rho_margin`` controls the merging of detected lines. Lines which are + ``theta_margin`` degrees apart and ``rho_margin`` rho apart are merged. -.. method:: image.find_line_segments(roi=Auto, merge_distance=0, max_theta_difference=15) + This method working by running the sobel filter over the image and taking + the magnitude and gradient responses from the sobel filter to feed a hough + transform. It does not require any preprocessing on the image first. However, + my cleaning up the image using filtering you may get more stable results. - Finds line segments in the image using the hough transform. Returns a list - of ``line`` objects (see above). + Not supported on compressed images or bayer images. - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. +.. method:: image.find_line_segments([roi, [merge_distance=0, [max_theta_difference=15]]]) - ``merge_distance`` specifies the maximum number of pixels two line segements - can be seperated by each other (at any point on one line) to be merged. + Finds line segments in the image using the hough transform. Returns a list + of `image.line` objects . - ``max_theta_difference`` is the maximum theta difference in degrees two line - segements that are ``merge_distance`` above to be merged. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. - This method uses the LSD library (also used by OpenCV) to find line segements - in the image. It's somewhat slow but very accurate and lines don't jump around. + ``merge_distance`` specifies the maximum number of pixels two line segements + can be seperated by each other (at any point on one line) to be merged. - Only supported on the OpenMV Cam M7 or better (not enough RAM on the M4). + ``max_theta_difference`` is the maximum theta difference in degrees two line + segements that are ``merge_distance`` apart to be merged. - .. note:: + This method uses the LSD library (also used by OpenCV) to find line segements + in the image. It's somewhat slow but very accurate and lines don't jump around. - All the arguments are keyword arguments and must be explicitly invoked - with their name and an equal sign. + Not supported on compressed images or bayer images. -.. method:: image.find_circles([roi=Auto, x_stride=2, y_stride=1, threshold=1600, x_margin=10, y_margin=10, r_margin=10]) +.. method:: image.find_circles([roi, [x_stride=2, [y_stride=1, [threshold=2000, [x_margin=10, [y_margin=10, [r_margin=10]]]]]]]) Finds circles in the image using the hough transform. Returns a list of - ``circle`` objects (see above). + `image.circle` objects. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the @@ -2442,22 +3138,14 @@ Methods ``r_margin`` controls the merging of detected circles. Circles which are ``x_margin``, ``y_margin``, and ``r_margin`` pixels apart are merged. - .. note:: - - All the arguments are keyword arguments and must be explicitly invoked - with their name and an equal sign. - - .. note:: + Not supported on compressed images or bayer images. - This method is only for the OpenMV Cam M7. +.. method:: image.find_rects([roi=Auto, [threshold=10000]]) -.. method:: image.find_rects([roi=Auto, threshold=10000]) - - Find rectangles in the image using the quad detection algorithm used to find - apriltags. Works best of rectangles that have good contrast against their - background. The apriltag quad detection algorithm can handle - any scale/rotation/shear on rectangles. Returns a list of ``rect`` objects - (see above). + Find rectangles in the image using the same quad detection algorithm used to + find apriltags. Works best of rectangles that have good contrast against the + background. The apriltag quad detection algorithm can handle any + scale/rotation/shear on rectangles. Returns a list of `image.rect` objects. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the @@ -2465,56 +3153,42 @@ Methods Rectangles which have an edge magnitude (which is computed by sliding the sobel operator across all pixels on the edges of the rectangle and summing - the value) less than ``threshold`` are filtered out of the returned list. + their values) less than ``threshold`` are filtered out of the returned list. The correct value of ``threshold`` is depended on your application/scene. - .. note:: - - All the arguments are keyword arguments and must be explicitly invoked - with their name and an equal sign. + Not supported on compressed images or bayer images. - .. note:: +.. method:: image.find_qrcodes([roi]) - This method is only for the OpenMV Cam M7. - -.. method:: image.find_qrcodes(roi=Auto) - - Finds all qrcodes within the ``roi`` and returns a list of ``qrcode`` - objects. Please see the ``qrcode`` object for more information. + Finds all qrcodes within the ``roi`` and returns a list of `image.qrcode` + objects. Please see the `image.qrcode` object for more information. QR Codes need to be relatively flat in the image for this method to work. You can achieve a flatter image that is not effected by lens distortion by - either using the ``sensor.set_windowing`` function to zoom in the on the - center of the lens, ``image.lens_corr`` to undo lens barrel distortion, or + either using the `sensor.set_windowing()` function to zoom in the on the + center of the lens, `image.lens_corr()` to undo lens barrel distortion, or by just changing out the lens for something with a narrower fields of view. There are machine vision lenses available which do not cause barrel distortion but they are much more expensive to than the standard lenses - supplied by OpenMV so we don't stock them (since they wouldn't sell). + supplied by OpenMV. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - Only supported on the OpenMV Cam M7 or better (not enough RAM on the M4). - - Not supported on compressed images. - - .. note:: - - ``roi`` is a keyword argument which must be explicitly invoked in the - function call by writing ``roi=``. + Not supported on compressed images or bayer images. -.. method:: image.find_apriltags(roi=Auto, families=image.TAG36H11, fx=Auto, fy=Auto, cx=Auto, cy=Auto) +.. method:: image.find_apriltags([roi, [families=image.TAG36H11, [fx, [fy, [cx, [cy]]]]]]) - Finds all apriltags within the ``roi`` and returns a list of ``apriltag`` - objects. Please see the ``apriltag`` object for more information. + Finds all apriltags within the ``roi`` and returns a list of `image.apriltag` + objects. Please see the `image.apriltag` object for more information. Unlike QR Codes, AprilTags can be detected at much farther distances, worse lighting, in warped images, etc. AprilTags are robust too all kinds of image distortion issues that QR Codes are not to. That said, AprilTags can only encode a numeric ID as their payload. - AprilTags can also be used for localization purposes. Each ``apriltag`` + AprilTags can also be used for localization purposes. Each `image.apriltag` object returns its translation and rotation from the camera. The units of the translation are determined by ``fx``, ``fy``, ``cx``, and ``cy`` which are the focal lengths and center points of the image in the X and @@ -2531,15 +3205,15 @@ Methods ``families`` is bitmask of tag families to decode. It is the logical OR of: - * image.TAG16H5 - * image.TAG25H7 - * image.TAG25H9 - * image.TAG36H10 - * image.TAG36H11 - * image.ARTOOLKIT + * `image.TAG16H5` + * `image.TAG25H7` + * `image.TAG25H9` + * `image.TAG36H10` + * `image.TAG36H11` + * `image.ARTOOLKIT` - By default it is just ``image.TAG36H11`` which is the best tag family to - use. Note that ``find_apriltags`` slows down a bit per enabled tag family. + By default it is just `image.TAG36H11` which is the best tag family to + use. Note that `image.find_apriltags()` slows down per enabled tag family. ``fx`` is the camera X focal length in pixels. For the standard OpenMV Cam this is (2.8 / 3.984) * 656. Which is the lens focal length in mm, divided @@ -2557,29 +3231,21 @@ Methods ``cy`` is the image center which is just ``image.height()/2``. This is not ``roi.h()/2``. - Not supported on compressed images. - - Only supported on the OpenMV Cam M7 or better (not enough RAM on the M4). - - .. note:: - - ``roi``, ``families``, ``fx``, ``fy``, ``cx``, and ``cy`` are keyword - arguments which must be explicitly invoked in the function call by - writing ``roi=``, ``families=``, ``fx=``, ``fy=``, ``cx=``, and ``cy=``. + Not supported on compressed images or bayer images. -.. method:: image.find_datamatrices(roi=Auto, effort=200) +.. method:: image.find_datamatrices([roi, [effort=200]]) - Finds all datamatrices within the ``roi`` and returns a list of ``datamatrix`` - objects. Please see the ``datamatrix`` object for more information. + Finds all datamatrices within the ``roi`` and returns a list of `image.datamatrix` + objects. Please see the `image.datamatrix` object for more information. Data Matrices need to be relatively flat in the image for this method to work. You can achieve a flatter image that is not effected by lens distortion by - either using the ``sensor.set_windowing`` function to zoom in the on the - center of the lens, ``image.lens_corr`` to undo lens barrel distortion, or + either using the `sensor.set_windowing()` function to zoom in the on the + center of the lens, `image.lens_corr()` to undo lens barrel distortion, or by just changing out the lens for something with a narrower fields of view. There are machine vision lenses available which do not cause barrel distortion but they are much more expensive to than the standard lenses - supplied by OpenMV so we don't stock them (since they wouldn't sell). + supplied by OpenMV. ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the @@ -2594,17 +3260,12 @@ Methods as you like. But, any values above 240 or so do not result in much increase in the detection rate. - Not supported on compressed images. - - .. note:: - - ``roi`` and ``effort`` are keyword arguments which must be explicitly - invoked in the function call by writing ``roi=`` and/or ``effort=``. + Not supported on compressed images or bayer images. -.. method:: image.find_barcodes(roi=Auto) +.. method:: image.find_barcodes([roi]) - Finds all 1D barcodes within the ``roi`` and returns a list of ``barcode`` - objects. Please see the ``barcode`` object for more information. + Finds all 1D barcodes within the ``roi`` and returns a list of `image.barcode` + objects. Please see the `image.barcode` object for more information. For best results use a 640 by 40/80/160 window. The lower the vertical res the faster everything will run. Since bar codes are linear 1D images you @@ -2616,38 +3277,33 @@ Methods This function supports all these 1D barcodes (basically all barcodes): - * image.EAN2 - * image.EAN5 - * image.EAN8 - * image.UPCE - * image.ISBN10 - * image.UPCA - * image.EAN13 - * image.ISBN13 - * image.I25 - * image.DATABAR (RSS-14) - * image.DATABAR_EXP (RSS-Expanded) - * image.CODABAR - * image.CODE39 - * image.PDF417 - * image.CODE93 - * image.CODE128 + * `image.EAN2` + * `image.EAN5` + * `image.EAN8` + * `image.UPCE` + * `image.ISBN10` + * `image.UPCA` + * `image.EAN13` + * `image.ISBN13` + * `image.I25` + * `image.DATABAR` (RSS-14) + * `image.DATABAR_EXP` (RSS-Expanded) + * `image.CODABAR` + * `image.CODE39` + * `image.PDF417` + * `image.CODE93` + * `image.CODE128` ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - Not supported on compressed images. + Not supported on compressed images or bayer images. - .. note:: - - ``roi`` is a keyword argument which must be explicitly invoked in the - function call by writing ``roi=``. - -.. method:: image.find_displacement(template, [roi, template_roi, logpolar=False]) +.. method:: image.find_displacement(template, [roi, [template_roi, [logpolar=False]]]) Find the translation offset of the this image from the template. This - method can be used to do optical flow. This method returns a ``displacement`` + method can be used to do optical flow. This method returns a `image.displacement` object with the results of the displacement calculation using phase correlation. ``roi`` is the region-of-interest rectangle (x, y, w, h) to work in. @@ -2660,42 +3316,55 @@ Methods location in the image. You may slide smaller rois arround a larger image to get an optical flow gradient image... - ``image.find_displacement`` normally computes the x/y translation between two + `image.find_displacement()` normally computes the x/y translation between two images. However, if you pass ``logpolar=True`` it will instead find rotation - and scale changes between the two images. The same ``displacement`` object + and scale changes between the two images. The same `image.displacement` object result encodes both possible repsonses. + Not supported on compressed images or bayer images. + .. note:: - Please use this method on power-of-2 image sizes (e.g. ``sensor.B64X64``). + Please use this method on power-of-2 image sizes (e.g. `sensor.B64X64`). -.. method:: image.midpoint_pooled(x_div, y_div, bias=0.5) + Not supported on compressed images or bayer images. - Finds the midpoint of ``x_div`` * ``y_div`` squares in the image and returns - a new image composed of the midpoint of each square. +.. method:: image.find_number(roi) - A ``bias`` of 0 returns the min of each area while a ``bias`` of 1.0 returns - the max of each area. + Runs a LENET-6 CNN trained with on the MINST data set to detect numers in + a 28x28 ROI located anywhere on the image. Returns a tuple containing a + integer and a float representing the number detected (0-9) and the + confidence of the detection (0-1). - This methods is useful for preparing images for phase_correlation. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. - Not supported on compressed images. + Only works on grayscale images. .. note:: - ``bias`` is a keyword argument which must be explicitly - invoked in the function call by writing ``bias=``. + This method is experimental and likely to be removed in the future once + running any CNN trained on the PC using Caffe is available. -.. method:: image.mean_pooled(x_div, y_div, bias=0.5) +.. method:: image.classify_object(roi) - Finds the mean of ``x_div`` * ``y_div`` squares in the image and returns - a new image composed of the mean of each square. + Runs a CIFAR-10 CNN on an ROI in the image to detect airplanes, automobiles, + birds, cats, deers, dogs, frogs, horses, ships, and trucks. This method + automatically scales the image image to 32x32 internally to feed to the CNN. - This methods is useful for preparing images for phase_correlation. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. - Not supported on compressed images. + Only works on RGB565 images. -.. method:: image.find_template(template, threshold, roi=Auto, step=2, search=image.SEARCH_EX) + .. note:: + + This method is experimental and likely to be removed in the future once + running any CNN trained on the PC using Caffe is available. + +.. method:: image.find_template(template, threshold, [roi, [step=2, [search=image.SEARCH_EX]]]) Tries to find the first location in the image where template matches using Normalized Cross Correlation. Returns a bounding box tuple (x, y, w, h) for @@ -2708,7 +3377,9 @@ Methods prevents false positives while lowering the detection rate while a lower threshold does the opposite. - ``roi`` is the region-of-interest rectangle (x, y, w, h) to search in. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. ``step`` is the number of pixels to skip past while looking for the template. Skipping pixels considerably speeds the algorithm up. This only @@ -2720,24 +3391,17 @@ Methods edges of the image. ``image.SEARCH_EX`` does an exhaustive search for the image but can be much slower than ``image.SEARCH_DS``. - .. note:: - - ``roi``, ``step``, and ``search`` are keyword arguments which must be - explicitly invoked in the function call by writing ``roi=``, ``step=``, - or ``search=``. + Only works on grayscale images. -.. method:: image.find_features(cascade, roi=Auto, threshold=0.5, scale=1.5) +.. method:: image.find_features(cascade, [threshold=0.5, [scale=1.5, [roi]]]) This method searches the image for all areas that match the passed in Haar Cascade and returns a list of bounding box rectangles tuples (x, y, w, h) around those features. Returns an empty list if no features are found. - ``cascade`` is a Haar Cascade object. See ``image.HaarCascade()`` for more + ``cascade`` is a Haar Cascade object. See `image.HaarCascade()` for more details. - ``roi`` is the region-of-interest rectangle (x, y, w, h) to work in. - If not specified, it is equal to the image rectangle. - ``threshold`` is a threshold (0.0-1.0) where a smaller value increase the detection rate while raising the false positive rate. Conversely, a higher value decreases the detection rate while lowering the false positive rate. @@ -2746,13 +3410,11 @@ Methods factor will run faster but will have much poorer image matches. A good value is between 1.35 and 1.5. - Not supported on compressed images. - - .. note:: + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. - ``roi``, ``threshold`` and ``scale`` are keyword arguments which must be - explicitly invoked in the function call by writing ``roi``, - ``threshold=`` or ``scale=``. + Only works on grayscale images. .. method:: image.find_eye(roi) @@ -2760,29 +3422,41 @@ Methods eye. Returns a tuple with the (x, y) location of the pupil in the image. Returns (0,0) if no pupils are found. - To use this function first use ``image.find_features`` with the + To use this function first use `image.find_features()` with the ``frontalface`` HaarCascade to find someone's face. Then use - ``image.find_features`` with the ``eye`` HaarCascade to find the eyes on the - face. Finally, call this method on each eye roi returned by - ``image.find_features`` to get the pupil coordinates. + `image.find_features()` with the ``eye`` HaarCascade to find the eyes on the + face. Finally, call this method on the eye ROI returned by + `image.find_features()` to get the pupil coordinates. - Only for grayscale images. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. + + Only works on grayscale images. .. method:: image.find_lbp(roi) Extracts LBP (local-binary-patterns) keypoints from the region-of-interest - (x, y, w, h) tuple. You can then use then use the ``image.match_descriptor`` + (x, y, w, h) tuple. You can then use then use the `image.match_descriptor()` function to compare two sets of keypoints to get the matching distance. - Only for grayscale images. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. + + Only works on grayscale images. -.. method:: image.find_keypoints(roi=Auto, threshold=20, normalized=False, scale_factor=1.5, max_keypoints=100, corner_detector=CORNER_AGAST) +.. method:: image.find_keypoints([roi, [threshold=20, [normalized=False, [scale_factor=1.5, [max_keypoints=100, [corner_detector=image.CORNER_AGAST]]]]]]) Extracts ORB keypoints from the region-of-interest (x, y, w, h) tuple. You - can then use then use the ``image.match_descriptor`` function to compare + can then use then use the `image.match_descriptor()` function to compare two sets of keypoints to get the matching areas. Returns None if no keypoints were found. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. + ``threshold`` is a number (between 0 - 255) which controls the number of extracted corners. For the default AGAST corner detector this should be around 20. FOr the FAST corner detector this should be around 60-80. The @@ -2801,63 +3475,37 @@ Methods decrease this value. ``corner_detector`` is the corner detector algorithm to use which extracts - keypoints from the image. It can be either ``image.FAST`` or - ``image.AGAST``. The FAST corner detector is faster but much less accurate. + keypoints from the image. It can be either `image.CORNER_FAST` or + `image.CORNER_AGAST`. The FAST corner detector is faster but much less accurate. - Only for grayscale images. - - .. note:: + Only works on grayscale images. - ``roi``, ``threshold``, ``normalized``m ``scale_factor``, ``max_keypoints``, - and ``corner_detector`` are keyword argument which must be explicitly - invoked in the function call by writing ``roi=``, ``threshold=``, etc. +.. method:: image.find_edges(edge_type, [threshold]) -.. method:: image.find_lines(roi=Auto, threshold=50) - - For grayscale images only. Finds the lines in a edge detected image using - the Hough Transform. Returns a list of line tuples (x0, y0, x1, y1). - - ``roi`` is the region-of-interest rectangle (x, y, w, h) to work in. - If not specified, it is equal to the image rectangle. - - ``threshold`` may be between 0-255. The lower the threshold the more lines - are pulled out of the image. - - Only for grayscale images. - - .. note:: - - ``roi`` and ``threshold`` are keyword argument which must be explicitly - invoked in the function call by writing ``roi=`` and ``threshold=``. - -.. method:: image.find_edges(edge_type, threshold=[100,200]) - - For grayscale images only. Does edge detection on the image and replaces the - image with an image that only has edges. ``edge_type`` can either be: + Turns the image to black and white leaving only the edges as white pixels. * image.EDGE_SIMPLE - Simple thresholded high pass filter algorithm. * image.EDGE_CANNY - Canny edge detection algorithm. ``threshold`` is a two valued tuple containing a low threshold and high threshold. You can control the quality of edges by adjusting these values. + It defaults to (100, 200). - Only for grayscale images. - - .. note:: - - ``threshold`` is keyword argument which must be explicitly invoked in the - function call by writing ``threshold=``. + Only works on grayscale images. -Constants ---------- +.. method:: find_hog([roi, [size=8]]) -.. data:: image.LBP + Replaces the pixels in the ROI with HOG (histogram of orientated graidients) + lines. - Switch for descriptor functions for LBP. + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not + specified, it is equal to the image rectangle. Only pixels within the + ``roi`` are operated on. -.. data:: image.ORB + Only works on grayscale images. - Switch for descriptor functions for ORB. +Constants +--------- .. data:: image.SEARCH_EX diff --git a/docs/library/omv.lcd.rst b/docs/library/omv.lcd.rst index f42567e0cfcd8..90496bee90c09 100644 --- a/docs/library/omv.lcd.rst +++ b/docs/library/omv.lcd.rst @@ -13,7 +13,7 @@ Example usage:: # Setup camera. sensor.reset() sensor.set_pixformat(sensor.RGB565) - sensor.set_framesize(sensor.QQVGA2) # special 128x160 lcd shield resolution. + sensor.set_framesize(sensor.LCD) sensor.skip_frames() lcd.init() @@ -24,7 +24,7 @@ Example usage:: Functions --------- -.. function:: lcd.init(type=1) +.. function:: lcd.init([type=1]) Initializes an attached lcd shield using I/O pins P0, P2, P3, P6, P7, and P8. @@ -33,11 +33,6 @@ Functions * 0: None * 1: lcd shield - .. note:: - - ``type`` is keyword arguments which must be explicitly invoked in the - function call by writing ``type=``. - .. function:: lcd.deinit() Deinitializes the lcd shield freeing up I/O pins. @@ -70,7 +65,7 @@ Functions The lcd shield backlight pin is pulled up (on) by default. P6 which controls the backlight is left floating by default. Calling this function puts P6 - into opendrain mode to control the backlight (False=low - True=float). + into open drain mode to control the backlight (False=low - True=float). If you want to reuse P6 while the lcd shield is attached then cut the backlight jumper on the lcd shield (leaving the backlight permanently on). @@ -79,11 +74,11 @@ Functions .. function:: lcd.get_backlight() Returns the backlight state (False for off - True for on) if - ``set_backlight`` had been called previously. + `lcd.set_backlight()` had been called previously. -.. function:: lcd.display(image, roi=Auto) +.. function:: lcd.display(image, [roi]) - Displays an ``image`` (GRAYSCALE or RGB565) on the lcd screen. + Displays an `image` on the lcd screen. ``roi`` is a region-of-interest rectangle tuple (x, y, w, h). If not specified, it is equal to the image rectangle (0, 0, image.w, image.h). @@ -104,11 +99,6 @@ Functions centered on screen and pixels that do not fit will not be shown (i.e. the lcd screen shows the center of the ``roi`` like a window). - .. note:: - - ``roi`` is keyword arguments which must be explicitly invoked in the - function call by writing ``roi=``. - .. function:: lcd.clear() Clears the lcd screen to black. diff --git a/docs/library/omv.mjpeg.rst b/docs/library/omv.mjpeg.rst index 0e36ce56f2585..77e632e61b95b 100644 --- a/docs/library/omv.mjpeg.rst +++ b/docs/library/omv.mjpeg.rst @@ -11,7 +11,7 @@ class Mjpeg -- Mjpeg recorder You can use the mjpeg module to record large video clips. Note that mjpeg files save compressed image data. So, they are best for recording long video clips that -you want to share. Use ``gif`` for short clips. +you want to share. Use `gif` for short clips. Example usage:: @@ -38,7 +38,7 @@ Example usage:: Constructors ------------ -.. class:: mjpeg.Mjpeg(filename, width=Auto, height=Auto) +.. class:: mjpeg.Mjpeg(filename, [width, [height]]) Create a Mjpeg object which you can add frames to. ``filename`` is the path to save the mjpeg recording to. @@ -49,11 +49,6 @@ Constructors ``height`` is automatically set equal to the image sensor vertical resolution unless explicitly overridden. - .. note:: - - ``width`` and ``height`` are keyword arguments which must be explicitly - invoked in the function call by writing ``width=`` and ``height=``. - Methods ------- @@ -69,19 +64,14 @@ Methods Returns the file size of the mjpeg so far. This value is updated after adding frames. -.. method:: mjpeg.add_frame(image, quality=50) +.. method:: mjpeg.add_frame(image, [quality=50]) Add an image to the mjpeg recording. The image width, height, and color mode, must be equal to the same width, height, and color modes used in the constructor for the mjpeg. ``quality`` is the jpeg compression quality to use to compress the image if - it's not in JPEG format (either RGB565 or GRAYSCALE format). - - .. note:: - - ``quality`` is keyword arguments which must be explicitly invoked in the - function call by writing ``quality=``. + it's not in JPEG format (either `sensor.RGB565` or `sensor.GRAYSCALE` format). .. method:: mjpeg.close(fps) diff --git a/docs/library/omv.omv.rst b/docs/library/omv.omv.rst new file mode 100644 index 0000000000000..fa28b10f70c6d --- /dev/null +++ b/docs/library/omv.omv.rst @@ -0,0 +1,41 @@ +:mod:`omv` --- OpenMV Cam Information +===================================== + +.. module:: omv + :synopsis: OpenMV Cam Information + +The ``omv`` module is used to get OpenMV Cam information. + +Functions +--------- + +.. function:: omv.version_major() + + Returns the major version number (int). + +.. function:: omv.version_minor() + + Returns the minor version number (int). + +.. function:: omv.version_patch() + + Returns the patch version number (int). + +.. function:: omv.version_string() + + Returns the version string (e.g. "2.8.0"). + +.. function:: omv.arch() + + Returns the board architecture string. This string is really just meant for + OpenMV IDE but you can get it with this function. + +.. function:: omv.board_type() + + Returns the board type string. This string is really just meant for + OpenMV IDE but you can get it with this function. + +.. function:: omv.board_id() + + Returns the board id string. This string is really just meant for + OpenMV IDE but you can get it with this function. diff --git a/docs/library/omv.sensor.rst b/docs/library/omv.sensor.rst index 596ce0cbe2fa5..3568d5a929e3b 100644 --- a/docs/library/omv.sensor.rst +++ b/docs/library/omv.sensor.rst @@ -27,42 +27,34 @@ Functions Initializes the camera sensor. +.. function:: sensor_sleep(enable) + + Puts the camera to sleep if enable is True. Otherwise, wakes it back up. + .. function:: sensor.flush() Copies whatever was in the frame buffer to the IDE. You should call this method to display the last image your OpenMV Cam takes if it's not running - a script with an infinite loop. + a script with an infinite loop. Note that you'll need to add a delay time + of about a second after your script finishes for the IDE to grab the image + from your camera. Otherwise, this method will have no effect. -.. function:: sensor.snapshot([line_filter=None]) +.. function:: sensor.snapshot() Takes a picture using the camera and returns an ``image`` object. - ``line_filter`` may be a python function callback used to process each line - of pixels as they come in from the camera. For example:: - - # This callback just copies the src to dst. - # Note source is YUYV destination is 1BPP Grayscale - def line_filter_call_back(src, dst): - for i in range(len(src)): - dst[i] = src[i>>1] - sensor.snapshot(line_filter=line_filter_call_back) - - # This callback copies and thresholds src to dst. - # Note source is YUYV destination is 1BPP Grayscale - def line_filter_call_back_2(src, dst): - for i in range(len(src)): - dst[i] = if src[i>>1] > 128 then 0xFF or 0x00 - sensor.snapshot(line_filter=line_filter_call_back_2) - - .. note:: - - The OpenMV Cam M4 is not fast enough to execute the line filter function - on large images per line. Do not use. - - .. note:: + The OpenMV Cam has two memory areas for images. The classical stack/heap + area used for normal MicroPython processing can store small images within + it's heap. However, the MicroPython heap is only about ~100 KB which is not + enough to store larger images. So, your OpenMV Cam has a secondary frame + buffer memory area that stores images taken by `sensor.snapshot()`. Images + are stored on the bottom of this memory area. Any memory that's left + over is then available for use by the frame buffer stack which your OpenMV + Cam's firmware uses to hold large temporary data structures for image + processing algorithms. - ``line_filter`` is keyword arguments which must be explicitly invoked in - the function call by writing ``line_filter=``. + If you need room to hold multiple frames you may "steal" frame buffer space + by calling `sensor.alloc_extra_fb()`. .. function:: sensor.skip_frames([n, time]) @@ -92,15 +84,17 @@ Functions .. function:: sensor.get_fb() (Get Frame Buffer) Returns the image object returned by a previous call of - ``sensor.snapshot()``. If ``sensor.snapshot()`` had not been called before + `sensor.snapshot()`. If `sensor.snapshot()` had not been called before then ``None`` is returned. .. function:: sensor.get_id() Returns the camera module ID. - * sensor.OV7725: Rolling shutter sensor module. - * sensor.MT9V034: Global shutter sensor module. + * `sensor.OV9650`: First gen OpenMV Cam sensor - never released. + * `sensor.OV2640`: Second gen OpenMV Cam sensor - never released. + * `sensor.OV7725`: Rolling shutter sensor module. + * `sensor.MT9V034`: Global shutter sensor module. .. function:: sensor.alloc_extra_fb(width, height, pixformat) @@ -119,7 +113,7 @@ Functions by taking space away from our frame buffer stack memory which we use for computer vision algorithms. That said, this also means you'll run out of memory more easily if you try to execute more memory intensive machine - vision algorithms like ``find_apriltags``. + vision algorithms like `image.find_apriltags`. .. function:: sensor.dealloc_extra_db() @@ -133,7 +127,7 @@ Functions fixed by firmware. The stack then grows down until it hits the heap. Next, frame buffers are stored in a secondary memory region. Memory is liad out with the main frame buffer on the bottom and the frame buffer - stack on the top. When ``snapshot()`` is called it fills the frame bufer + stack on the top. When `sensor.snapshot()` is called it fills the frame bufer from the bottom. The frame buffer stack is then able to use whatever is left over. This memory allocation method is extremely efficent for computer vision on microcontrollers. @@ -142,93 +136,79 @@ Functions Sets the pixel format for the camera module. - * sensor.GRAYSCALE: 8-bits per pixel. - * sensor.RGB565: 16-bits per pixel. - -.. function:: sensor.sleep(enable) - - Puts the camera into sleep mode. This saves about 40 mA. Automatically - cleared on reset. - -.. function:: sensor.set_framerate(rate) - - Sets the frame rate for the camera module. - - .. note:: Deprecated... do not use. + * `sensor.GRAYSCALE`: 8-bits per pixel. + * `sensor.RGB565`: 16-bits per pixel. + * `sensor.BAYER`: 8-bits per pixel bayer pattern. .. function:: sensor.set_framesize(framesize) Sets the frame size for the camera module. - * sensor.QQCIF: 88x72 - * sensor.QCIF: 176x144 - * sensor.CIF: 352x288 - * sensor.QQSIF: 88x60 - * sensor.QSIF: 176x120 - * sensor.SIF: 352x240 - * sensor.QQQQVGA: 40x30 - * sensor.QQQVGA: 80x60 - * sensor.QQVGA: 160x120 - * sensor.QVGA: 320x240 - * sensor.VGA: 640x480 - * sensor.HQQQVGA: 80x40 - * sensor.HQQVGA: 160x80 - * sensor.HQVGA: 240x160 - * sensor.LCD: 128x160 (for use with the lcd shield) - * sensor.QQVGA2: 128x160 (for use with the lcd shield) - * sensor.B40x30: 160x120 (for use with ``image.find_displacement``) - * sensor.B64x32: 160x120 (for use with ``image.find_displacement``) - * sensor.B64x64: 160x120 (for use with ``image.find_displacement``) - * sensor.SVGA: 800x600 (only in JPEG mode for the OV2640 sensor) - * sensor.SXGA: 1280x1024 (only in JPEG mode for the OV2640 sensor) - * sensor.UXGA: 1600x1200 (only in JPEG mode for the OV2640 sensor) + * `sensor.QQCIF`: 88x72 + * `sensor.QCIF`: 176x144 + * `sensor.CIF`: 352x288 + * `sensor.QQSIF`: 88x60 + * `sensor.QSIF`: 176x120 + * `sensor.SIF`: 352x240 + * `sensor.QQQQVGA`: 40x30 + * `sensor.QQQVGA`: 80x60 + * `sensor.QQVGA`: 160x120 + * `sensor.QVGA`: 320x240 + * `sensor.VGA`: 640x480 + * `sensor.HQQQVGA`: 80x40 + * `sensor.HQQVGA`: 160x80 + * `sensor.HQVGA`: 240x160 + * `sensor.B64X32`: 64x32 (for use with `image.find_displacement()`) + * `sensor.B64X64`: 64x64 (for use with `image.find_displacement()`) + * `sensor.B128X64`: 128x64 (for use with `image.find_displacement()`) + * `sensor.B128X128`: 128x128 (for use with `image.find_displacement()`) + * `sensor.LCD`: 128x160 (for use with the lcd shield) + * `sensor.QQVGA2`: 128x160 (for use with the lcd shield) + * `sensor.SVGA`: 800x600 (only in JPEG mode for the OV2640 sensor) + * `sensor.SXGA`: 1280x1024 (only in JPEG mode for the OV2640 sensor) + * `sensor.UXGA`: 1600x1200 (only in JPEG mode for the OV2640 sensor) .. function:: sensor.set_windowing(roi) Sets the resolution of the camera to a sub resolution inside of the current - resolution. For example, setting the resolution to sensor.VGA and then - the windowing to (120, 140, 200, 200) sets sensor.snapshot() to capture + resolution. For example, setting the resolution to `sensor.VGA` and then + the windowing to (120, 140, 200, 200) sets `sensor.snapshot()` to capture the 200x200 center pixels of the VGA resolution outputted by the camera sensor. You can use windowing to get custom resolutions. Also, when using windowing on a larger resolution you effectively are digital zooming. - ``roi`` is a rect tuple (x, y, w, h). + ``roi`` is a rect tuple (x, y, w, h). However, you may just pass (w, h) and + the ``roi`` will be centered on the frame. .. function:: sensor.set_gainceiling(gainceiling) Set the camera image gainceiling. 2, 4, 8, 16, 32, 64, or 128. - .. note:: You should never need to call this function. Don't use. - .. function:: sensor.set_contrast(constrast) Set the camera image contrast. -3 to +3. - .. note:: You should never need to call this function. Don't use. - .. function:: sensor.set_brightness(brightness) Set the camera image brightness. -3 to +3. - .. note:: You should never need to call this function. Don't use. - .. function:: sensor.set_saturation(saturation) Set the camera image saturation. -3 to +3. - .. note:: You should never need to call this function. Don't use. - .. function:: sensor.set_quality(quality) Set the camera image JPEG compression quality. 0 - 100. - .. note:: Only for the OV2640 camera. + .. note:: + + Only for the OV2640 camera. .. function:: sensor.set_colorbar(enable) Turns color bar mode on (True) or off (False). Defaults to off. -.. function:: sensor.set_auto_gain(enable, [gain_db=-1, gain_db_ceiling=-1]) +.. function:: sensor.set_auto_gain(enable, [gain_db=-1, [gain_db_ceiling]]) ``enable`` turns auto gain control on (True) or off (False). The camera will startup with auto gain control on. @@ -246,7 +226,7 @@ Functions Returns the current camera gain value in decibels (float). -.. function:: sensor.set_auto_exposure(enable, [exposure_us=-1]) +.. function:: sensor.set_auto_exposure(enable, [exposure_us]) ``enable`` turns auto exposure control on (True) or off (False). The camera will startup with auto exposure control on. @@ -265,7 +245,7 @@ Functions Returns the current camera exposure value in microseconds (int). -.. function:: sensor.set_auto_whitebal(enable, [rgb_gain_db=(-1,-1,-1)]) +.. function:: sensor.set_auto_whitebal(enable, [rgb_gain_db]) ``enable`` turns auto white balance on (True) or off (False). The camera will startup with auto white balance on. @@ -290,15 +270,6 @@ Functions Turns vertical flip mode on (True) or off (False). Defaults to off. -.. function:: sensor.set_special_effect(effect) - - Sets a camera image special effect: - - * sensor.NORMAL: Normal Image - * sensor.NEGATIVE: Negative Image - - .. note:: Deprecated... do not use. - .. function:: sensor.set_lens_correction(enable, radi, coef) ``enable`` True to enable and False to disable (bool). @@ -307,7 +278,7 @@ Functions .. function:: sensor.set_vsync_output(pin_object) - ``pin_object`` created with ``pyb.Pin``. The VSYNC signal from the camera + ``pin_object`` created with `pyb.Pin()`. The VSYNC signal from the camera will be generated on this pin to power FSIN on another OpenMV Cam to sync both camera image streams for stereo vision applications... @@ -326,6 +297,13 @@ Functions Constants --------- +.. data:: sensor.BAYER + + RAW BAYER image pixel format. If you try to make the frame size too big + to fit in the frame buffer your OpenMV Cam will set the pixel format + to BAYER so that you can capture images but no image processing methods + will be operational. + .. data:: sensor.GRAYSCALE GRAYSCALE pixel format (Y from YUV422). Each pixel is 8-bits, 1-byte. @@ -345,25 +323,21 @@ Constants JPEG mode. Only works for the OV2640 camera. -.. data:: sensor.YUV422 - - Deprecated... do not use. - .. data:: sensor.OV9650 - ``sensor.get_id()`` returns this for the OV9650 camera. + `sensor.get_id()` returns this for the OV9650 camera. .. data:: sensor.OV2640 - ``sensor.get_id()`` returns this for the OV2640 camera. + `sensor.get_id()` returns this for the OV2640 camera. .. data:: sensor.OV7725 - ``sensor.get_id()`` returns this for the OV7725 camera. + `sensor.get_id()` returns this for the OV7725 camera. .. data:: sensor.MT9V034 - ``sensor.get_id()`` returns this for the MT9V034 camera. + `sensor.get_id()` returns this for the MT9V034 camera. .. data:: sensor.QQCIF @@ -426,25 +400,25 @@ Constants 64x32 resolution for the camera sensor. - For use with ``image.find_displacement()`` and any other FFT based algorithm. + For use with `image.find_displacement()` and any other FFT based algorithm. .. data:: sensor.B64X64 64x64 resolution for the camera sensor. - For use with ``image.find_displacement()`` and any other FFT based algorithm. + For use with `image.find_displacement()` and any other FFT based algorithm. .. data:: sensor.B128X64 128x64 resolution for the camera sensor. - For use with ``image.find_displacement()`` and any other FFT based algorithm. + For use with `image.find_displacement()` and any other FFT based algorithm. .. data:: sensor.B128X128 128x128 resolution for the camera sensor. - For use with ``image.find_displacement()`` and any other FFT based algorithm. + For use with `image.find_displacement()` and any other FFT based algorithm. .. data:: sensor.LCD @@ -465,11 +439,3 @@ Constants .. data:: sensor.UXGA 1600x1200 resolution for the camera sensor. Only works for the OV2640 camera. - -.. data:: sensor.NORMAL - - Set the special effect filter to normal. - -.. data:: sensor.NEGATIVE - - Set the special effect filter to negative. diff --git a/docs/library/pyb.Timer.rst b/docs/library/pyb.Timer.rst index e0ea28b0b334a..0c355b4d3bea0 100644 --- a/docs/library/pyb.Timer.rst +++ b/docs/library/pyb.Timer.rst @@ -250,7 +250,7 @@ Methods ch2 = timer.channel(2, pyb.Timer.PWM, pin=pyb.Pin.board.X2, pulse_width=8000) ch3 = timer.channel(3, pyb.Timer.PWM, pin=pyb.Pin.board.X3, pulse_width=16000) - .. only:: port_pyboard + .. only:: port_openmvcam PWM Example:: diff --git a/docs/library/pyb.rst b/docs/library/pyb.rst index 141c270b31ee2..1b61f2fb382c4 100644 --- a/docs/library/pyb.rst +++ b/docs/library/pyb.rst @@ -20,7 +20,7 @@ Time related functions .. function:: millis() Returns the number of milliseconds since the board was last reset. - + The result is always a MicroPython smallint (31-bit signed number), so after 2^30 milliseconds (about 12.4 days) this will start to return negative numbers. @@ -32,7 +32,7 @@ Time related functions .. function:: micros() Returns the number of microseconds since the board was last reset. - + The result is always a MicroPython smallint (31-bit signed number), so after 2^30 microseconds (about 17.8 minutes) this will start to return negative numbers. @@ -44,10 +44,10 @@ Time related functions .. function:: elapsed_millis(start) Returns the number of milliseconds which have elapsed since ``start``. - + This function takes care of counter wrap, and always returns a positive number. This means it can be used to measure periods up to about 12.4 days. - + Example:: start = pyb.millis() @@ -57,10 +57,10 @@ Time related functions .. function:: elapsed_micros(start) Returns the number of microseconds which have elapsed since ``start``. - + This function takes care of counter wrap, and always returns a positive number. This means it can be used to measure periods up to about 17.8 minutes. - + Example:: start = pyb.micros() @@ -117,72 +117,104 @@ Power related functions .. only:: port_pyboard .. function:: freq([sysclk[, hclk[, pclk1[, pclk2]]]]) - + If given no arguments, returns a tuple of clock frequencies: (sysclk, hclk, pclk1, pclk2). These correspond to: - + - sysclk: frequency of the CPU - hclk: frequency of the AHB bus, core memory and DMA - pclk1: frequency of the APB1 bus - pclk2: frequency of the APB2 bus - + If given any arguments then the function sets the frequency of the CPU, and the busses if additional arguments are given. Frequencies are given in Hz. Eg freq(120000000) sets sysclk (the CPU frequency) to 120MHz. Note that not all values are supported and the largest supported frequency not greater than the given value will be selected. - + Supported sysclk frequencies are (in MHz): 8, 16, 24, 30, 32, 36, 40, 42, 48, 54, 56, 60, 64, 72, 84, 96, 108, 120, 144, 168. - + The maximum frequency of hclk is 168MHz, of pclk1 is 42MHz, and of pclk2 is 84MHz. Be sure not to set frequencies above these values. - + The hclk, pclk1 and pclk2 frequencies are derived from the sysclk frequency using a prescaler (divider). Supported prescalers for hclk are: 1, 2, 4, 8, 16, 64, 128, 256, 512. Supported prescalers for pclk1 and pclk2 are: 1, 2, 4, 8. A prescaler will be chosen to best match the requested frequency. - + A sysclk frequency of 8MHz uses the HSE (external crystal) directly and 16MHz uses the HSI (internal oscillator) directly. The higher frequencies use the HSE to drive the PLL (phase locked loop), and then use the output of the PLL. - + Note that if you change the frequency while the USB is enabled then the USB may become unreliable. It is best to change the frequency in boot.py, before the USB peripheral is started. Also note that sysclk frequencies below 36MHz do not allow the USB to function correctly. - + .. function:: wfi() - + Wait for an internal or external interrupt. - + This executes a ``wfi`` instruction which reduces power consumption of the MCU until any interrupt occurs (be it internal or external), at which point execution continues. Note that the system-tick interrupt occurs once every millisecond (1000Hz) so this function will block for at most 1ms. - + .. function:: stop() - + Put the pyboard in a "sleeping" state. - + This reduces power consumption to less than 500 uA. To wake from this sleep state requires an external interrupt or a real-time-clock event. Upon waking execution continues where it left off. - + See :meth:`rtc.wakeup` to configure a real-time-clock wakeup event. - + .. function:: standby() - + Put the pyboard into a "deep sleep" state. - + This reduces power consumption to less than 50 uA. To wake from this sleep state requires a real-time-clock event, or an external interrupt on X1 (PA0=WKUP) or X18 (PC13=TAMP1). Upon waking the system undergoes a hard reset. - + + See :meth:`rtc.wakeup` to configure a real-time-clock wakeup event. + +.. only:: port_openmvcam + + .. function:: wfi() + + Wait for an internal or external interrupt. + + This executes a ``wfi`` instruction which reduces power consumption + of the MCU until any interrupt occurs (be it internal or external), + at which point execution continues. Note that the system-tick interrupt + occurs once every millisecond (1000Hz) so this function will block for + at most 1ms. + + .. function:: stop() + + Put the openmvcam in a "sleeping" state. + + This reduces power consumption to less than 500 uA. To wake from this + sleep state requires an external interrupt or a real-time-clock event. + Upon waking execution continues where it left off. + + See :meth:`rtc.wakeup` to configure a real-time-clock wakeup event. + + .. function:: standby() + + Put the openmvcam into a "deep sleep" state. + + This reduces power consumption to less than 50 uA. To wake from this + sleep state requires a real-time-clock event. + Upon waking the system undergoes a hard reset. + See :meth:`rtc.wakeup` to configure a real-time-clock wakeup event. Miscellaneous functions @@ -191,59 +223,63 @@ Miscellaneous functions .. only:: port_pyboard .. function:: have_cdc() - + Return True if USB is connected as a serial device, False otherwise. - + .. note:: This function is deprecated. Use pyb.USB_VCP().isconnected() instead. - + .. function:: hid((buttons, x, y, z)) - + Takes a 4-tuple (or list) and sends it to the USB host (the PC) to signal a HID mouse-motion event. - + .. note:: This function is deprecated. Use :meth:`pyb.USB_HID.send()` instead. - + +.. only:: port_pyboard or port_openmvcam + .. function:: info([dump_alloc_table]) - + Print out lots of information about the board. -.. function:: main(filename) +.. only:: port_pyboard - Set the filename of the main script to run after boot.py is finished. If - this function is not called then the default file main.py will be executed. + .. function:: main(filename) - It only makes sense to call this function from within boot.py. + Set the filename of the main script to run after boot.py is finished. If + this function is not called then the default file main.py will be executed. -.. only:: port_pyboard + It only makes sense to call this function from within boot.py. + +.. only:: port_pyboard or port_openmvcam .. function:: mount(device, mountpoint, \*, readonly=False, mkfs=False) - + Mount a block device and make it available as part of the filesystem. ``device`` must be an object that provides the block protocol: - + - ``readblocks(self, blocknum, buf)`` - ``writeblocks(self, blocknum, buf)`` (optional) - ``count(self)`` - ``sync(self)`` (optional) - + ``readblocks`` and ``writeblocks`` should copy data between ``buf`` and the block device, starting from block number ``blocknum`` on the device. ``buf`` will be a bytearray with length a multiple of 512. If ``writeblocks`` is not defined then the device is mounted read-only. The return value of these two functions is ignored. - + ``count`` should return the number of blocks available on the device. ``sync``, if implemented, should sync the data on the device. - + The parameter ``mountpoint`` is the location in the root of the filesystem to mount the device. It must begin with a forward-slash. - + If ``readonly`` is ``True``, then the device is mounted read-only, otherwise it is mounted read-write. - + If ``mkfs`` is ``True``, then a new filesystem is created if one does not already exist. - + To unmount a device, pass ``None`` as the device and the mount location as ``mountpoint``. @@ -251,49 +287,51 @@ Miscellaneous functions Get or set the UART object where the REPL is repeated on. -.. only:: port_pyboard +.. only:: port_pyboard or port_openmvcam .. function:: rng() - + Return a 30-bit hardware generated random number. .. function:: sync() Sync all file systems. -.. only:: port_pyboard +.. only:: port_pyboard or port_openmvcam .. function:: unique_id() - + Returns a string of 12 bytes (96 bits), which is the unique ID of the MCU. -.. function:: usb_mode([modestr], vid=0xf055, pid=0x9801, hid=pyb.hid_mouse) +.. only:: port_pyboard + + .. function:: usb_mode([modestr], vid=0xf055, pid=0x9801, hid=pyb.hid_mouse) - If called with no arguments, return the current USB mode as a string. + If called with no arguments, return the current USB mode as a string. - If called with ``modestr`` provided, attempts to set USB mode. - This can only be done when called from ``boot.py`` before - :meth:`pyb.main()` has been called. The following values of - ``modestr`` are understood: + If called with ``modestr`` provided, attempts to set USB mode. + This can only be done when called from ``boot.py`` before + :meth:`pyb.main()` has been called. The following values of + ``modestr`` are understood: - - ``None``: disables USB - - ``'VCP'``: enable with VCP (Virtual COM Port) interface - - ``'MSC'``: enable with MSC (mass storage device class) interface - - ``'VCP+MSC'``: enable with VCP and MSC - - ``'VCP+HID'``: enable with VCP and HID (human interface device) + - ``None``: disables USB + - ``'VCP'``: enable with VCP (Virtual COM Port) interface + - ``'MSC'``: enable with MSC (mass storage device class) interface + - ``'VCP+MSC'``: enable with VCP and MSC + - ``'VCP+HID'``: enable with VCP and HID (human interface device) - For backwards compatibility, ``'CDC'`` is understood to mean - ``'VCP'`` (and similarly for ``'CDC+MSC'`` and ``'CDC+HID'``). + For backwards compatibility, ``'CDC'`` is understood to mean + ``'VCP'`` (and similarly for ``'CDC+MSC'`` and ``'CDC+HID'``). - The ``vid`` and ``pid`` parameters allow you to specify the VID - (vendor id) and PID (product id). + The ``vid`` and ``pid`` parameters allow you to specify the VID + (vendor id) and PID (product id). - If enabling HID mode, you may also specify the HID details by - passing the ``hid`` keyword parameter. It takes a tuple of - (subclass, protocol, max packet length, polling interval, report - descriptor). By default it will set appropriate values for a USB - mouse. There is also a ``pyb.hid_keyboard`` constant, which is an - appropriate tuple for a USB keyboard. + If enabling HID mode, you may also specify the HID details by + passing the ``hid`` keyword parameter. It takes a tuple of + (subclass, protocol, max packet length, polling interval, report + descriptor). By default it will set appropriate values for a USB + mouse. There is also a ``pyb.hid_keyboard`` constant, which is an + appropriate tuple for a USB keyboard. Classes ------- @@ -302,7 +340,7 @@ Classes .. toctree:: :maxdepth: 1 - + pyb.Accel.rst pyb.ADC.rst pyb.CAN.rst @@ -320,3 +358,22 @@ Classes pyb.UART.rst pyb.USB_HID.rst pyb.USB_VCP.rst + +.. only:: port_openmvcam + + .. toctree:: + :maxdepth: 1 + + pyb.ADC.rst + pyb.CAN.rst + pyb.DAC.rst + pyb.ExtInt.rst + pyb.I2C.rst + pyb.LED.rst + pyb.Pin.rst + pyb.RTC.rst + pyb.Servo.rst + pyb.SPI.rst + pyb.Timer.rst + pyb.UART.rst + pyb.USB_VCP.rst diff --git a/docs/license.rst b/docs/license.rst index 5389a66c19e69..ac3e7eb1cf381 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -3,7 +3,7 @@ MicroPython license information The MIT License (MIT) -Copyright (c) 2013-2017 Damien P. George, Paul Sokolovsky, OpenMV LLC, and others +Copyright (c) 2013-2018 Damien P. George, Paul Sokolovsky, OpenMV LLC, and others Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal