From 7bad79321f0a687f9a1eae10d4e7ed8bd3a33885 Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:14:52 -0400 Subject: [PATCH 1/6] Create bitcoin_data --- bitcoin_data | 1 + 1 file changed, 1 insertion(+) create mode 100644 bitcoin_data diff --git a/bitcoin_data b/bitcoin_data new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/bitcoin_data @@ -0,0 +1 @@ + From df6014ee2e2bf190b4b1188afff8890a89bbc902 Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:17:28 -0400 Subject: [PATCH 2/6] Delete bitcoin_data --- bitcoin_data | 1 - 1 file changed, 1 deletion(-) delete mode 100644 bitcoin_data diff --git a/bitcoin_data b/bitcoin_data deleted file mode 100644 index 8b13789..0000000 --- a/bitcoin_data +++ /dev/null @@ -1 +0,0 @@ - From 360540385635d3fe8b9063ab5eba4d54214b81fb Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:18:20 -0400 Subject: [PATCH 3/6] Create test --- bitcoin_data/test | 1 + 1 file changed, 1 insertion(+) create mode 100644 bitcoin_data/test diff --git a/bitcoin_data/test b/bitcoin_data/test new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/bitcoin_data/test @@ -0,0 +1 @@ + From 1c0ad946ff8ba4ec8bef2c43870f379c4b6e420a Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:20:28 -0400 Subject: [PATCH 4/6] Delete test --- bitcoin_data/test | 1 - 1 file changed, 1 deletion(-) delete mode 100644 bitcoin_data/test diff --git a/bitcoin_data/test b/bitcoin_data/test deleted file mode 100644 index 8b13789..0000000 --- a/bitcoin_data/test +++ /dev/null @@ -1 +0,0 @@ - From aab60ef7e9fa3dac02604562b8faf87f22f2ea0b Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:20:56 -0400 Subject: [PATCH 5/6] Create test --- ESN/test | 1 + 1 file changed, 1 insertion(+) create mode 100644 ESN/test diff --git a/ESN/test b/ESN/test new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/ESN/test @@ -0,0 +1 @@ + From 2f8498a83105481d0d189b20407f6e3f658b1053 Mon Sep 17 00:00:00 2001 From: RogerFu18 <44245243+RogerFu18@users.noreply.github.com> Date: Wed, 17 Oct 2018 18:22:39 -0400 Subject: [PATCH 6/6] Add files via upload --- ESN/EchoTorch-master/LICENSE | 674 ++++++++++++++++++ ESN/EchoTorch-master/README.md | 130 ++++ ESN/EchoTorch-master/docs/Makefile | 20 + .../docs/images/echotorch.png | Bin 0 -> 36690 bytes .../docs/images/echotorch_complete.png | Bin 0 -> 45075 bytes ESN/EchoTorch-master/docs/source/conf.py | 164 +++++ .../docs/source/echotorch.datasets.rst | 38 + .../docs/source/echotorch.nn.rst | 32 + .../docs/source/echotorch.rst | 19 + .../docs/source/echotorch.utils.rst | 30 + ESN/EchoTorch-master/docs/source/index.rst | 32 + ESN/EchoTorch-master/docs/source/modules.rst | 7 + .../docs/source/notes/esn_learning.rst | 19 + ESN/EchoTorch-master/echotorch/__init__.py | 12 + .../echotorch/datasets/LogisticMapDataset.py | 92 +++ .../echotorch/datasets/MackeyGlassDataset.py | 75 ++ .../echotorch/datasets/MemTestDataset.py | 61 ++ .../echotorch/datasets/NARMADataset.py | 105 +++ .../datasets/SwitchAttractorDataset.py | 105 +++ .../echotorch/datasets/__init__.py | 12 + .../echotorch/models/HNilsNet.py | 50 ++ .../echotorch/models/NilsNet.py | 78 ++ .../echotorch/models/__init__.py | 24 + ESN/EchoTorch-master/echotorch/nn/BDESN.py | 197 +++++ .../echotorch/nn/BDESNCell.py | 179 +++++ ESN/EchoTorch-master/echotorch/nn/BDESNPCA.py | 209 ++++++ ESN/EchoTorch-master/echotorch/nn/EESN.py | 113 +++ ESN/EchoTorch-master/echotorch/nn/ESN.py | 205 ++++++ ESN/EchoTorch-master/echotorch/nn/ESNCell.py | 373 ++++++++++ ESN/EchoTorch-master/echotorch/nn/GatedESN.py | 301 ++++++++ ESN/EchoTorch-master/echotorch/nn/HESN.py | 103 +++ ESN/EchoTorch-master/echotorch/nn/ICACell.py | 112 +++ ESN/EchoTorch-master/echotorch/nn/Identity.py | 43 ++ ESN/EchoTorch-master/echotorch/nn/LiESN.py | 93 +++ .../echotorch/nn/LiESNCell.py | 150 ++++ .../echotorch/nn/OnlinePCACell.py | 321 +++++++++ ESN/EchoTorch-master/echotorch/nn/PCACell.py | 373 ++++++++++ ESN/EchoTorch-master/echotorch/nn/RRCell.py | 180 +++++ ESN/EchoTorch-master/echotorch/nn/SFACell.py | 342 +++++++++ .../echotorch/nn/StackedESN.py | 303 ++++++++ ESN/EchoTorch-master/echotorch/nn/__init__.py | 23 + .../echotorch/transforms/__init__.py | 9 + .../echotorch/transforms/text/Character.py | 131 ++++ .../transforms/text/Character2Gram.py | 140 ++++ .../transforms/text/Character3Gram.py | 140 ++++ .../echotorch/transforms/text/Compose.py | 68 ++ .../echotorch/transforms/text/Embedding.py | 104 +++ .../echotorch/transforms/text/FunctionWord.py | 118 +++ .../echotorch/transforms/text/GensimModel.py | 111 +++ .../echotorch/transforms/text/GloveVector.py | 89 +++ .../echotorch/transforms/text/PartOfSpeech.py | 76 ++ .../echotorch/transforms/text/Tag.py | 91 +++ .../echotorch/transforms/text/Token.py | 78 ++ .../echotorch/transforms/text/Transformer.py | 94 +++ .../echotorch/transforms/text/__init__.py | 21 + .../echotorch/utils/__init__.py | 11 + .../echotorch/utils/error_measures.py | 165 +++++ .../echotorch/utils/utility_functions.py | 64 ++ .../examples/MNIST/convert_images.py | 37 + .../examples/datasets/logistic_map.py | 18 + .../generation/narma10_esn_feedbacks.py | 103 +++ .../examples/memory/memtest.py | 74 ++ .../examples/models/NilsNet_example.py | 92 +++ .../examples/nodes/pca_tests.py | 63 ++ .../switch_attractor/switch_attractor_esn.py | 98 +++ .../timeserie_prediction/mackey_glass_esn.py | 118 +++ .../timeserie_prediction/narma10_esn.py | 101 +++ .../timeserie_prediction/narma10_esn_sgd.py | 118 +++ .../timeserie_prediction/narma10_gated_esn.py | 134 ++++ .../narma10_stacked_esn.py | 78 ++ .../unsupervised_learning/sfa_logmap.py | 49 ++ .../examples/validation/validation_10cv.py | 55 ++ ESN/EchoTorch-master/requirements.txt | 6 + ESN/EchoTorch-master/setup.py | 18 + 74 files changed, 8071 insertions(+) create mode 100644 ESN/EchoTorch-master/LICENSE create mode 100644 ESN/EchoTorch-master/README.md create mode 100644 ESN/EchoTorch-master/docs/Makefile create mode 100644 ESN/EchoTorch-master/docs/images/echotorch.png create mode 100644 ESN/EchoTorch-master/docs/images/echotorch_complete.png create mode 100644 ESN/EchoTorch-master/docs/source/conf.py create mode 100644 ESN/EchoTorch-master/docs/source/echotorch.datasets.rst create mode 100644 ESN/EchoTorch-master/docs/source/echotorch.nn.rst create mode 100644 ESN/EchoTorch-master/docs/source/echotorch.rst create mode 100644 ESN/EchoTorch-master/docs/source/echotorch.utils.rst create mode 100644 ESN/EchoTorch-master/docs/source/index.rst create mode 100644 ESN/EchoTorch-master/docs/source/modules.rst create mode 100644 ESN/EchoTorch-master/docs/source/notes/esn_learning.rst create mode 100644 ESN/EchoTorch-master/echotorch/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/LogisticMapDataset.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/MackeyGlassDataset.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/MemTestDataset.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/NARMADataset.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/SwitchAttractorDataset.py create mode 100644 ESN/EchoTorch-master/echotorch/datasets/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/models/HNilsNet.py create mode 100644 ESN/EchoTorch-master/echotorch/models/NilsNet.py create mode 100644 ESN/EchoTorch-master/echotorch/models/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/BDESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/BDESNCell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/BDESNPCA.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/EESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/ESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/ESNCell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/GatedESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/HESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/ICACell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/Identity.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/LiESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/LiESNCell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/OnlinePCACell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/PCACell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/RRCell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/SFACell.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/StackedESN.py create mode 100644 ESN/EchoTorch-master/echotorch/nn/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Character.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Character2Gram.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Character3Gram.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Compose.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Embedding.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/FunctionWord.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/GensimModel.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/GloveVector.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/PartOfSpeech.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Tag.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Token.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/Transformer.py create mode 100644 ESN/EchoTorch-master/echotorch/transforms/text/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/utils/__init__.py create mode 100644 ESN/EchoTorch-master/echotorch/utils/error_measures.py create mode 100644 ESN/EchoTorch-master/echotorch/utils/utility_functions.py create mode 100644 ESN/EchoTorch-master/examples/MNIST/convert_images.py create mode 100644 ESN/EchoTorch-master/examples/datasets/logistic_map.py create mode 100644 ESN/EchoTorch-master/examples/generation/narma10_esn_feedbacks.py create mode 100644 ESN/EchoTorch-master/examples/memory/memtest.py create mode 100644 ESN/EchoTorch-master/examples/models/NilsNet_example.py create mode 100644 ESN/EchoTorch-master/examples/nodes/pca_tests.py create mode 100644 ESN/EchoTorch-master/examples/switch_attractor/switch_attractor_esn.py create mode 100644 ESN/EchoTorch-master/examples/timeserie_prediction/mackey_glass_esn.py create mode 100644 ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn.py create mode 100644 ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn_sgd.py create mode 100644 ESN/EchoTorch-master/examples/timeserie_prediction/narma10_gated_esn.py create mode 100644 ESN/EchoTorch-master/examples/timeserie_prediction/narma10_stacked_esn.py create mode 100644 ESN/EchoTorch-master/examples/unsupervised_learning/sfa_logmap.py create mode 100644 ESN/EchoTorch-master/examples/validation/validation_10cv.py create mode 100644 ESN/EchoTorch-master/requirements.txt create mode 100644 ESN/EchoTorch-master/setup.py diff --git a/ESN/EchoTorch-master/LICENSE b/ESN/EchoTorch-master/LICENSE new file mode 100644 index 0000000..9cecc1d --- /dev/null +++ b/ESN/EchoTorch-master/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ESN/EchoTorch-master/README.md b/ESN/EchoTorch-master/README.md new file mode 100644 index 0000000..687d771 --- /dev/null +++ b/ESN/EchoTorch-master/README.md @@ -0,0 +1,130 @@ +

+ +-------------------------------------------------------------------------------- + +EchoTorch is a python module based on pyTorch to implement and test +various flavours of Echo State Network models. EchoTorch is not +intended to be put into production but for research purposes. As it is +based on pyTorch, EchoTorch's layers can be integrated into deep +architectures. +EchoTorch gives two possible ways to train models : +* Classical ESN training with Moore Penrose pseudo-inverse or LU decomposition; +* pyTorch gradient descent optimizer; + + + Tweet + + +Join our community to create datasets and deep-learning models! Chat with us on [Gitter](https://gitter.im/EchoTorch/Lobby) and join the [Google Group](https://groups.google.com/forum/#!forum/echotorch/) to collaborate with us. + +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/echotorch.svg?style=flat-square) +[![Codecov](https://img.shields.io/codecov/c/github/nschaetti/echotorch/master.svg?style=flat-square)](https://codecov.io/gh/nschaetti/EchoTorch) +[![Documentation Status]( https://img.shields.io/readthedocs/echotorch/latest.svg?style=flat-square)](http://echotorch.readthedocs.io/en/latest/?badge=latest&style=flat-square) +[![Build Status](https://img.shields.io/travis/nschaetti/EchoTorch/master.svg?style=flat-square)](https://travis-ci.org/nschaetti/EchoTorch) + +This repository consists of: + +* echotorch.datasets : Pre-built datasets for common ESN tasks +* echotorch.models : Generic pretrained ESN models +* echotorch.transforms : Data transformations specific to echo state networks +* echotorch.utils : Tools, functions and measures for echo state networks + +## Getting started + +These instructions will get you a copy of the project up and running +on your local machine for development and testing purposes. +See deployment for notes on how to deploy the project on a live system. + +### Prerequisites + +You need to following package to install EchoTorch. + +* pyTorch +* TorchVision + +### Installation + + pip install EchoTorch + +## Authors + +* **Nils Schaetti** - *Initial work* - [nschaetti](https://github.com/nschaetti/) + +## License + +This project is licensed under the GPLv3 License - see the [LICENSE](LICENSE) file +for details. + +## Citing + +If you find EchoTorch useful for an academic publication, then please use the following BibTeX to cite it: + +``` +@misc{echotorch, + author = {Schaetti, Nils}, + title = {EchoTorch: Reservoir Computing with pyTorch}, + year = {2018}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\url{https://github.com/nschaetti/EchoTorch}}, +} +``` + +## A short introduction + +### Classical ESN training + +You can simply create an ESN with the ESN or LiESN objects in the nn +module. + +```python +esn = etnn.LiESN( + input_dim, + n_hidden, + output_dim, + spectral_radius, + learning_algo='inv', + leaky_rate=leaky_rate +) +``` + +Where + +* input_dim is the input dimensionality; +* h_hidden is the size of the reservoir; +* output_dim is the output dimensionality; +* spectral_radius is the spectral radius with a default value of 0.9; +* learning_algo allows you to choose with training algorithms to use. +The possible values are inv, LU and sdg; + +You now just have to give the ESN the inputs and the attended outputs. + +```python +for data in trainloader: + # Inputs and outputs + inputs, targets = data + + # To variable + inputs, targets = Variable(inputs), Variable(targets) + + # Give the example to EchoTorch + esn(inputs, targets) +# end for +``` + +After giving all examples to EchoTorch, you just have to call the +finalize method. + +```python +esn.finalize() +``` + +The model is now trained and you can call the esn object to get a +prediction. + +```python +predicted = esn(test_input) +``` + +### ESN training with Stochastic Gradient Descent + diff --git a/ESN/EchoTorch-master/docs/Makefile b/ESN/EchoTorch-master/docs/Makefile new file mode 100644 index 0000000..17f63a6 --- /dev/null +++ b/ESN/EchoTorch-master/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = EchoTorch +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/ESN/EchoTorch-master/docs/images/echotorch.png b/ESN/EchoTorch-master/docs/images/echotorch.png new file mode 100644 index 0000000000000000000000000000000000000000..a3d39014883215db7bb23990ca53480a462ab1c0 GIT binary patch literal 36690 zcmYJa1ymf-(lr`H1ndd)g# zy1VL}s=ceKBa{`T5aICPzI^$DC?hSd`sE8a5Of>~fChb1d>PvWeL)+^Nr`{?{P)W3 zC{6_Z0_z~H?fm5nJo>*k*q8K79MF$2E;0%dFguXYNW55?%GKCkzL0#85f@SSSUKzP z{8ME9{qpr$VCuEgdPVFqJK>^GOcXo<6%!~uxjpP~jUfpx)!&MP0u`()f&wG0jkQJG z^hLlz;x4(RsS2)17E(Rb8<53P|4aO?9kJ%>vgE@L6Mr` zFJXcrb$ia-Nmj>$j(2s##__9B0)5t7NQi0;Ev?D;YutkHB^9b9BeEF zlA)l0G(#&3)i1SVr-Dg9pdz=M%RgD`@_=OK=!Yfy>q|pO7)#c=Z}8vYgN@2(`E!JX zSq^7M|Ad9RXcLp*tYLz$l!C3@Jud)q##1ie-Q=%)nz2PLw+I5!}qf zyq>oPJ&KS}h-Q7b2l!9I*e+l!GW51o8pGeWYg)0z*`|^?(@` zy0%jshDw-RV9EBJp_R$bpAwIN@xI@9X~GvheB`&1*|3we5~O#pfsZ~)3;xt3AZ>0h z{WJVENzFdpAizv{QC5SF6wmcMe5T|yxGWAKfhQeu173!tWQUbhM^vI4)gNgn5rkkaKr&X^rvu7fB5^UI-G15ikHBbA{qh-!#gje z!|42<`3}u3^gJo?>(H0~v+PhBykz)HI9U21jkzR(Bu`MQW#1%AV=Syl<9a_8Evmz$ zv=Ifm52-jC1$q)BZV8)oq$!;6D2801cvvdA$*3MbT*AsylN1V+Ig*mK zHE*4zYc`!{Bw!~piU*ZPE_f1`n!HMYM4_*YD3?Y+e?f%Cijyd@%2LoiKoawA8DY5{dQ~q2- z0_Z~WU}XtO#HGR|*~ulv`V$W{{zfz&QKlIpjpLWCz=>Oi6zm0-1=1xiH#)d-_rue| zp^A`KIw0us07>AmFwEgtwiO&sYErKbP|)Qu?VYJv8zzej5Cgvb`a4GOzP2kq!dkR= zI|mgEce6$VM~;spv6+Y+c$)>kJOle*WfAMQ0Dd;UI|3BQG^dj!IXFZe*c((9MdyWU z#nFFJi7G3p0#sCjlY&iPIUqK9qg2ojAV&yki!>R`st`@hMQg)NZo6U)Z>tTY;~INc z5E94-g5^T%M-pv^d3nSk6rn_W3*sP)197y}GkI94rPccpm_!fI$>Q)NoclZ}oh5Ce z2jTNv_VjhX)xx}YCLkyI(3lAHJh!}ZU($m7%l}=bREt?ApAWA)4z(Om}@Y~s%Q!oor*~1dCcqy4>=T=rPziFhC(ELA>t2f(eM58%qh&63>lB# z$GeT_3A3DSV+n&W5RAMN3#GvkBh@xTi#f#dw<{(*$1RABt-|qUFpA?MP&ne-VXWaT z+3_|+svRsKMj9r~#z{0$Si?h9@A7bHI-p`N`Fy@U5ip=Pew_Zl^zH9GD))v!;#Jcu5hJ< z)3Du;U9BKnI65%&!4$*jE;3VpiUkj^PcT{%IMY2dR)JYkf6qlrQ5(`SkHYoTx|0{@Be?Ubf$S#VaZc ziCM;5+iFK36wmENRx8Lk$X1fkcm>5|0@d7($U?oy8^%J_%brIa9f#Hu#lXxOVVdttD|oFiI9@|f_2bXI$?Vvo=56n zrxw=xhJfoZ=XDiqx1zSu1Yi+TLtu39u7bi z&XdV73JwB!!&QQvFqXHq?Bt(AAeNlKAzz`e!7_x?#H)EwuWKntHPFN(h3!eA!mBHF z+%efZV%dF)==L}%q1<+s+L($HbtIxdFHFq{>0}I+K?J9oGeg_g_g)zkU&uA#hb=sMH_<$uJZ{T<{U1!>1yerI9*;(#+#3l zV867GPZ3J|zJrNGBwG#)3)q6;@z@Mq`Tt#gI9hbsG{xWMP}aO6qhCQ(#hJ9GC?^zi zHHa*WF%Dw=&G43S>M5 zwZjY)oMX~;%Ldd#1kEP#5K3ziF*+&J&*08bPwkA2G91Zx3{c|ebwTl~UA3bp6gzGY zxD0pNXRz=s#UusPJZP9qf!|`GDEkuAwk*tl@S*buq7RQ3RY#gLCDX+dMX{xNr- ze10Wpd*Fr+;`I4$BDx9xK7NE8%pcpFC%Wkn#S`tM7r*O6O~gm^?2`9i1A?#gVpA%} zEmat@88#Ts7>krx6#+%FNFZsrwvJHumu5mIV70z7TwYh)g^+|sPM6H!u** z>m=b|xQ3?XI$x(|8vSitlSxpHUgJwk4xgO-cW_C);ce2DS$Iq3os9fS;S@vI zMt6LhCp__aysRm&W~U>=A8w~i_fCh642*NztnqTT7?^8xiNL6-e=v6_+aNQk(kyhf~?ijcr{RBeIm5|p=m^TWtK)^H>% z4*3u&TvvJVTXD?vB;^acrz~+5dkT!P6-%}gj9E9N22X2qweoxlR&$0s0mrGhQfC5Q z_9Cr|y300+m(A`LxAGTNGGUk<2(#W-TWMhCp!US5@Y8g3miJbBlvhP&ic0ZZ z<0~wQLbrM@?C>;cUwc-yz*&Iv_Y33yVZBMhaUJ|=D$G`GGLagYnE60>r1D@K5~@@J zs9i$Zv^a^Fz6iWgKN3R(Mkt1xC6p#S?PrxFMKC0TEl>PD+uoH%P9+2PiNd zoz%n>wS+vXcy;&v@l2Ax=_5L@dEPbsX|o14M*x3G6Ke1dF+28}-t+9>u9G<7H6?%qg0gY(_jVVW1^39)HXR;Faj0VCx+ zndy>-UKSB)Syg4rLwRZ$+nxxvarl$f(4B*x!pcBR7I`$hiVh)X8cDRG9C*hfgWZSF z2C;a;l!uk8`g&VWbwq3d{3iMdqn=(T$<1ewuWe~mGD$eMWKGjv1+1;Ss#xxxL~t&$ z$l&)X#_@!uEFdy!(6*DNHTd&co(e|>aOpvJP~v<&kj5-sBzJ$G5?}h|xFG@6x1KE? zqZCRnY#cKWwm2sp-Yg;J4K>+Tb@~6SO9b6x5jq(Di;}mz#xm#^A}^w}C5iwZA{Fq& zz{n>>C6an|)p*WXUcu5RUs2LHn^$Lbh((kzBv)CoNLH5c7ZscgqEGn{S!C-R5Wr5V zqbp%Mf>Q0=Ls@xCBZ>w^U=@i`mGzxmlx4I;>W?2yjf-&i#NoH{K*0P+_W|$I(A~(b zX58<&19cr%hC(-^XwgVmdm?zS1d@dZn9;ccyolk(@MwiqfSxidb-(;H!3%CUTz;s| z>ZVHV!=-HU(S4q~pQNVgI43xDr)G^aG6GRzUKALjg03YIiCPF%{YYVy7!Hzywl~kk z+R*?&W9_3I|XoX)CF2?TjV1%32Im zhRRcvOmq3*P~qsUh)mL;2$udU9*)}37+WxgK#M^eZIdSqjZ2kH;ke;lb3RLhBpz)d z93DP3{h;^;6x?^uTS2)QaI?w74exo9&Uee+$bBbNQPUMiU)5eTIzJLsk+LDnhBOHe zU`L#bC__`lbv!_Et;}VY>KZ=6%w`Bm@|G7!CO9CCoQ^U`+s3%6a%G($-ME4m6&%fV z83%Ix(TV?&I?UukVk?|Y7oR|;xRNZ&($kA_LvT8X<;I^LvfI@4Zfn>1eBk_{AozZ) zbO@{D(MxXCS3*Y$X;y~|gG{tHohKGOWc+ZssicBoV`JlRvnPW@z}+1H0khQR&Lp7{ zcj(#l&E4hAp-i&AswRfT(K(PaLY?zTr-sgug}hROy=h`PQQ+-H@o#qE&p0yn5t;&3 zsuJUT6)8;JZ;n{emJsE$-Gl6`*>tn4DloRiH8@)bY~V$%(2md){T$p}RAXq*`EloT z!^vU&@-w;NK?E=@_RiP}%9kikK2rd^v`T2KPzhZEV{oe^YGEw6%x~=VMq^~*lTxT< zD5s6-D?DKfk}Ejq-{^Pdq+|N(g%6(-8ZoV<9N0Y$9g$vqhb%}Km`o}dFc|Kg#YNb+ z72?OIZmG}{_*hw3iwjYwrwpBiZ?h7QBuV^jkh^wd@cvNc(TWI>TUg}?;Td&+U-Bsx z@2b!dbUjbl-%Pr9-e&qI^0}RPhy#Wi8aV8_-rell&N&6|OO>uC?EMm#Z6q}``f*s; zMr$dG#8oivyaY0xO|%CcxcA;}rw&ibs}FXfm<}T8s|*&az9YYS!;xMge_kOI2zcGq z>ngV&NiZ3in(mYO+h$~Byxsr!ytnMC%BS(zr~mOB&NqB%kLq+ifIZ8OC+-%5aDtY%Flx6+BKk<^JGm3n&y5C{q%BjQ>= z!2P-@2wKZ%Gg4<I_P<6m ziLijh#zT-`Zl{tKE8LW-JwwHByoiof%buu&fPud`m0Qy*tj*_hkSH8njy-sgYBn-=@y@T&&z9y5qiCBkla zLkEG8i6)!gz50gsk;Zzj?Rvb-=1fwi==Ba@|M1al9D=}9;M(+I+k6`2nzY~(T;tbL zL1F7t6+OjUU!t&8lyMER$EYO36XrtS-jdmg5VyC)Zfusn6BzxXkywIJW}r?tr(uE@ zGmi$W(jeFb*eH@pTFi;TvGW$l)L=#(y_p%5c9eoe9{aFIiD$AEE)Km2j5DcD;JvvI znbiH<>=HE&j5-&`lCxdX>;FlBkFyWW5ZtH%#0^fH5%0P8IbA7*&|nDAqW5E@Rb8A5 zTm0iHf-A}t^xI78MhTVbWl&gW7^zM6n< zG-xbuh#agm<+QkEZa)`b66u~9KDQ0_ngs33xo;;siDQ=Ik-^4*5tApN-Mv1kRY7E3 z&)^atMqbZ6ZzH3dYTrzKzD>375?*dC=-NwYTyKz*MN1aUeh56;?K2Y~v9Jy9vRryC zwmq$Oz3W!Bp3v#P{7tz@D(2rebd1Ns!kRp~Hb3RaKHk*U)8j9GEM@y1oZ0$)Jo>j~ zNm=Cmy(<#2;P?dB=C}pB(cxqk|GA~ouJ)7{mXg=sL>#2B!@1XwVJX30YN2NtuZ|7! zgC~FHd=j&T2lGtDnldtl!NRgZJj!u!TD;?>B$rpJ1@4>MM}q+rqgV`n*PnEvX$G22 zn6uP_@j>PB^9|B!ja5d{5stIWQSsv4<10I6s`NNYn6b7Aq*}rh4Rlj0ov~O$%JyXM z2**r$%dFc+RA}Gg;oTDt*a`2W{--^~sFG-g`TtX>;pZuYy)Wv}) zL*inb;o}6L$N7ZMad7g0X_z={sB}7>N;OLCbTlk4@HbSxHUZPlFPn&(-lu2}jhm;2 zB5mG@cK`ZfU#m6!81Zkt8S;EH{+e$NRw1lrvkGnd?srHsHKl#Mgi9=xIS2xWQ>+>q z*G!)`OmD|IAGDh<3r#eUfed4c-o^arEG@32@8jlzeda>1+I}C0{@IJ|2OMV`9c`;9 z>tuU^j~Qx@eGFsFY>JuOJ+GUeeu7Uu$WG7uDYuc|+5>Ywj&nMPB(}Eq^w4XI`;hUV zSlAc_8v$m{=%WeT*&+qgG^%30Lo#Q>I3rkBukLvZVX2Av##k||e_MewS{aZ?IS2$o zxTV%gtV22_TCs?;CC;`cKO3qtuxs1TOH=P_AIyo^p&%&-grgDh!12WBsUlF=Y;+83 z0%yKb{3zt*A)gu?4#?^+;bw&loBS-~Cgl)@NXHM1b~q}10JA@ z?3+=&@qxH;KZ5MIUle=j?|s2^Q-H1CZ&4GAD_*m3pfSM6ur+P+Y$X#lPI#s|7w~hig5t&9hJiW|bS^*7QBpEqVw`t&*Q$A&( z0|lANmtxz78j4IR_HhFpR4)3zO`!B;!`x$$sQAb8LS_3yDA#TCkp8HB*Hwz= zjcE!R$BwGaK%Mij1>~Xdq_Me=%NLjBOY;C6a^m+Rsg{Qo>*rqV&Ta!{?$c*2GSRM0 zbKb0*gp~H+$M!>kV3gLFTY!FW`-w*NPD8aXBM32>a1&(z^0lHTKHlmHYJX^`-q)!1 z!u)5YRz#=drIVS|QM4?@np%oqn6%mdw`WF}g7DDVkLshpCS z(QTR#n=Q73BL#>Gh(l!w)k?C!g>#eSc%?HfhNUj|w5SIrr;iONEbQ044#5++vLpbs zYq+{XK5Tp(2^A>?DNigTL-&S&Ny=Gavx79Jj<_DN^fb3kdjKaonlCq297!rKI~+xi z5&f2Wh;rjS7cUdAMs)RZm%pJFl|7*H*HJo|AJ}Ljb15nA4#o&HhMZU}p(p*39-rd0 zXV0r9h1X_i_iD9!SnB1cTN6np%vsYMIWLpmN~GCpa%4TD3X*5?EWOv7+f`fpL+s}0 ztB^Cip=Q2mj3QNk4B}sx$f7$j?@3hDjmOT%>$gLp945c(sP%|kbYQF1(6_;(Y3UK6WnXf(xx7BmEJ8ZtCQ=T!)k!+FEv%}lIc5{_|G z0Fl);4!9#Lm8~7hka4exZ0a}(g-KoE+X|_RUVKgy{xPQQWzSJfsv&8yO;U_(%lmYa z@n)7vHMQbgfPGaA4|};Z7VbgB`k27iwXSiA!`P*oX$U8?Un=}11 zqsp>ghS3?>!4v352Xwqyxe6pY;8_(? zp|3rL_j$t&`YR3R;N_cLaN@Zi%GkZX%?)MVY-tKbZe>wFW;b>0eja>2P5T#D;n$}} z4IKxwXXPXJu`J*c6K*LHbLgwyhiBeojicK zhjlgm$_F@ENyMzbb0Ok9yu6)<3CU-hwm*0Z#X5t}YOgXA{FwpxCkL;#xJ_Pf>}KvF ze{3N)zdx*cuAk~J7LD?+gcPLZm6^XQ{0{5csXk3Vt!e{K`Cd39>wMiUEnm27LK7ss zPBq27^xk}JzUqa4Zus%qU^l1H1n}&B{cFIyMNa$-u(q~-Th4jcc4zgE6WUBdbL5mL zHcp-RvmC9ys0{VjL-hW<03~>*T>yZq!xhIWPFOmkOwaAb?{KDNqzOk|mL62s?Zpn~ z$nX`5q2dk~@in-xVuEZ|SZ-{DK96ojKvJMQnO)6KwY^^v!n+4N(L@^7@JhXU zp0wP~4y=Z({k^>Gw{Lozedml0-YF3?P1P;5hdJl#z|G?<|I0C%O#5QB&ziP%Dfx@e z)fIQw>barwAAf$4^AI3Boqj$B>G^Ga`P`RL>Kz|epCOZKmx2yzv8EStIn11VdLl&! zzWe2bgoL~{o?V*5ETg4Qw#@o%bRa*lmEL&$L2BN7irL({&G~#orohu^=rHJSCd6B_ zvdt}o-VzKAZbXhiD9w`&d{evT*JE>-fiau?4G$~=vVQ;WS+@_(7tb6>o&Ia%@jLU z*uYB6s}1DaZ2Ct&wCM`#oQiCW*~(b;{+EOJ_It^?cAnjV97W;)!?wlMx}0|#5Z5V~2?tS9Weh0L@?LWsBJ9U$c|OQ^ zZt{QS;^g9*YVNg_Cn%qYLAXxtl z3K>(@`nPx=Zy@|%!5zyYsVa-iUF%>q|8$(K<~Kb&a@?uOM$W)zZ?HjW~C+rlUEoiX+3tFm8njL-a8O=E?th z0ifm6`$x26_ItC}c_f$$WJC$0EzY@nb3?rfW~BdU3a6(uI&wrZ6=tEH6KsgEQkKR` zTt$gVhDaeR(G00+X-i@y$A1qb$_|j>3#^7=#agplwi}u~VJ78(rn8+a;F%7lOp7If z#d{R~pvT(~3#7Q9h z*Cjg=9}wkrc)Bta{J4(21%>Vuaw*_i0odtdpSRNY#rADP`>lWApKtY(U?)#6SkLRY zh+UyCPb@4|u{gu=}}$ z53-dsstIQZh4}4UnDuXJc*S%_hlU*!+kS>W_LpSve9+v;Qe60EUu*s()HU? z!OnwLc|j5@$#XO;A=CM3?cj$QK&7aPgA~&=9V0plZuw z@xL0-8lGk_FdZTwpOGja`~Fy8`idevf2G}H!QN`F^^&b)|0=kS7GgGcAAM|t0GRY_ zcYZ{Pb%&Q|)AbllRu9#$-crkXXJ`^Q z;c-=kXmse4q@uFY@%?^LC>LG}5U}3ne%rd(0W=r9O*3B;zwd=-+S_OH=lK_xJVAkI zjBn!_Dbu;w7v;fA05oRmst4IG?#D~fj}u_D^Ut~lK+9f&T;u2G^OV0JX!H6!)aC3G zUV6a%BY2Y*LMuk>c-Nr(S0ac8kMQr}PI(LqlA~_B`yIMJ{Yz89xK!{RK7j`GmH&7@ zIWHOPlA2|ITbp_(=z8?-dW@44QOA@+tsGmhWRR;bm6^pw1E04IgUw)j@tzVJA<;Q) zrwjvrV_6q*&i{>z%tM3*vna<|FpUrc;hn6tQq#@OeX*9=tGv`@9Ug9s)sRIsi(~rB zLOxABz7<21O|0YylS3{jRDUM4*nD=l0*h{B#t%6Z*YPMmdzyU4P2S}$b1O2lXHuf~ zJBPvy%;mKl#7?uFgc@X@h^5xWijSaUrHA}axRoWhB{g{zNZ4Gqw5f8tkd#yGKl~IF ze=H}AzB1_j5oITL^m>gBXtB}YwHv~*|#2L)-74yGN+-RLZ4468#lEnpt91U z#wv+kx3|=d_G-qBNb`d0pwti#H39KNDlXTW1B9t|wu*g?~9vFu7PEL*%3kEv@Z;~pbS|q>;o9)kGhj-49Yw5 zAM#9XgG>nhd8hQ*pYt}7P!lUqNZyQvjqRW7k~Crx=zLL+ z)mSzPB;+Id`&dwxi)u|*|5e7+yRLOGJ$;!l47U6f0TeJz1Q_af9~fYDxSE{?O?(5G zX}On82=p^p*_7A^dWV{B}w%BYkv*uHv4 zUp0^MgSagmwjQ(5kdv+_BY2(V#P^|v=agEYLfEGPXzg=U+H7%t12D0mwd~uqVaiZ-UukeihIWz z>T7HmM=Btdj6ViXLO%2ofjM{@PoMk=@$F*{{^{fd=K1sE{Bk5yDs%H|_%jjyZ4$P| z9?$2EP}gve{pR4u7}J1)WmJ-E58_L>yKUVagTHvTGw*HzI#8(I=Tvud2ZH#i8@GC{ zk#14{Z3kPZ*zmQARyZS&o(s^ekhoFMVFRk$eW=-%sXhv|c?>xRCo7nG}SrP#GY ztv$!ncb?$j-=|gDX0-Q&&w1*nriq-LDs`GvNv8a_g6#cnGp3f`Pvpr-kzG@u*(et% zc{2=LLo_L|#B_0-4q0}-57D|+3eN=5Xys!9!7B8Md6#q5q;S0n1oT-csbl!D&ra)_ zjRP+2lI2yBANCj!VC2iXD^Sf4qs#jvI81r7YOKEv)Mc+7mX$v^?#EsjnHF{P#iB1S zaO&Ef?teQw6#6{uA89}od`+_l(K+HBf1hGuo>;Ra=3){g4pQowRknPbDZtF1d|~KF z4n$j?h@dpmi1WXwES7{rOB=B=r?qw=74$(zE8HwdKG0*n7_rgC2%$txbHx#;Y^2BQ zF|7(cYmdKK8_2n#E(w*qSD@bJlmsHmfSKI-H}}=m*C9;uPk&Pux&7L6@fcKMF?3m( z!%EGyXkH)hd1^GB4<$#Q3na4JProg{&W$~_>29nXEZAW-ty#r7g@0e&{CHrqi{#Ci@BnSd?tc+ehyr_l z>}y-Jd)MoFJBZP?5B1b@=yh&h-okfX!M~kUf6&ExjvKckE;EW=JeqtoHg&}OOG2zU z^dXwhr5At(1|a+VOzFHU;=gc=J>|TxY>xSjY6QU&XU1k>Wn?vtemW<0(T^B9Y>-D- zmx2)m#K25(+BtWhmX{KDt8GnBO+ZaHxa=hD7!VBOk`pz{Y+~)k#I0MQ!N*A%9Z}F) zufU0+%YJOnG=EQl(EE7G!tew-Z=SvZ9_67%Yol);&USdFv=2t22XZ z>AjRqP<}B`O15Fa9x&TR*|ATwG15`k==VE)dR|TPyu1^=`^6HDkrTZxW$~YygEwt< zP8JKf2I|^r?*sX~*eAl*=?8%U@ey0{%T3PRR-F4!h5g#Ga@c<9hwnq*KqZx%lsoTp z8>?kJvkA@Gy>(kZ`IV`MTle)5bma4HCIo3uB-h+{SSS)fdwXPNlXVdE&2r!$Tk|@t zTyq*@G&i|kbOiw+y}l^@yZkU0sjj!1vFARaH!;6QcmI@pns63Y90ZRZc>QkpA5Xm( zMjMQxin?F^0iVrmUKgtQWbNeZho-J9eEaqqzEb}m`#3VLG8Oa{r80Rv3EQLHXVZBP zw|X1P*{ux|$LP*ad#ME4Au&H&v!cKKa&%HBH}w3B8yE<@tWtn|2PqywBt)iC{-pb0 z+}Vcl4-fwB9<_J&YkLR(wTw3{t57wPV4dwj=5B)A*R5Y&jX#|~N5ej!AxAFB8R*iu z+R@vE?OS3AA`8KNHiO1U8vY@sV1QGJ&eE{hApYW9>llR2Mk4FY{o(Mu-`0HgH;e!M_;k(ZmPPP+#Py=x8f-PK zZ+KTSn^jyh*=yWq;$#Gx2})`0nqRQ)RII|jsuIpx;F{eFC*oVU4Aw!(Up56Im5 z=BOn<4WMP0{=QR7;P?@J8_%~}uq%K3wgwY`?0Y0;&ThvD%E&^%c-&@ezU3l?bqQ5< zzF1|0lKWd7ho(&4xc~D09=rIPOA&*1s7{jM04A@QsOE!o>yp!#`kckn`{w;*-RAoR z-8RHG->+MEx=k~pVr6voWq;yIkk&;q#D^DT7D~0d@1RUN#VGNL1xerwWsU z08OOmr55(Bqh_*ufx~lLgXEGxm4#lD{59A7-hF(%*B#WZ`_jm6SEZONg|J>*px)*o~~V5d_DcV&EfDbulLfxPF*BHfISBFhw)$!8n>Iiu?t z-|zF$zY9GkYxil`n4idbR)H&PMg&`QtJ2>0R6C<;J99Vo<0uvT z4g@uFgyE9Xq%zWww4@=)%_3mTK>h@8iTbI#Z<@#oV;;S3Kj)+X$zKLWkjH*Udo zV*cBY33O1XuY2KjzuZ{=w+(J`r!OBpL2yDAupO;8lwsEzaqY2Yd-RG1Oqae#KxAW| zLjg}96NKK$a^I;Xvg5XbC!0fY)&f^4Xtj=3jB&zBvqb_tlXduxML!@dSAerc?|+A) zi%>Pxgg-^!9;ifkqoBVd!QPV@6J*Ldyk}bKd`eaC~{8B4OBIr?r;I zLK7U98!=jM#qH%ZN-2cqaSRHfi9()4WXnPN5msg-gBGc`^`G@o@tVpFwl6N}w~_V;hUXN5t%JBE*=C_)w?7i)EElNR;f zgH=t}8`~)CV6aD|B9%)}=D$R{W~7SoI+RQuhR7c+tz zN9#EbV6KB*`F`azw;sH1S)h4{uLq!_R<&KUvf*{4nBsD+yQnoK`cJtYyrBBrq}>YD=;TLz&xdOJ`16EXZC#-H#w9T;yIL#{ClfI@RLrEGo#Fj18?m zLxXS1uW_Lhlj%-!BgeEc5i4NGbOB?rd*dsS1dBBF>s$6vrvMToIxS7vxzDkU@>Z7E zctDDG(l)n-6BkpcOz0pdu_^|MAuZYOW@&5A$x7Z40gr$$o=ihzxwtX{rA|+^2fgsn z$Sctg-d*Nt*sBO(JWNkNi2LS0UDFzzvhFR7>Y~|MR6vno=t^+4s!Z^{!6s#3e4wDL zhF!;?2fH%-3^9(u+--+jdv*(hL0okfh+M1QMEW-QbmRdzaw72m!pX@As_u6>u6PpB zQdYGbq*v9wKW>A{N9+o>KL=Z~MqodN5y2NA}jWtHil1 zw7e)pwQt>oRK7aBmB7XXcc1xWz)y}fyECC6kXi)(T!<78K*lrG$jq%8o7@qIUiVQK zmidX|5E5(0B2Gf6w`4?NT&fdb5nU~f78KlS&kShpcDc}6Tj#HObZvL#U>(EX@F42z zfmnO^w2NDz#Y-dQxH|X!Ax`gFc)MjdM>C@!8Ixrye#EZJE{?9(trK$YhI_Y&!$?S` zz&HvhV~uZUgdIIV-!T^T$^7WPHYR?>$$6P8ZtT#G04(S{oGsr*`(4uOUq>=}7Z~G$ zGA7)IjqkZ0m$BvVD|Mtvs_n1qmx6TkO<9sE7{&4jKD<<fMHF>p%RM6r=Z5?_-jx`#sx#lj+P?@_ALm zr6zyI0$CA(KC45UM5QPjl?#D`i!bDjp)t`2#|sujHK#U!av}gxKaau=EzKHJF7n%* zr09>g4jacW;>Sk@GK)$JF7%J$9HEr^UocOT-?%Kfbi(xygdR3RE5}ZBBR6$9gE?N- z5{^)RuMk$$TEs8mTcX#pn>W`6re#sdFj6&JRPNIEr+5^x3@V1l!d^RiflC!(xUV_F z45ktiE|A?sBR|$GztjLh&ck|@z3VZ1U_ipm+?<;J)Y8b=`8HS}u^7&Q|9fKGx6Yei zAlNyv+PN{wv#_wBrKgwqQxa92E|AH}jgds~C#0mDTri1&ntHhN0NETGd+3Z4vMt`- zH?OB!ih4YD^VO&EKlN+W=Y@NolGOlp4FhHu{?5O<158uTrHfgy9MnLz@EiX0x=E|J z6GXl5c8xgE3l_%Hf+w^uI!`O!O4@NBZ*4l^@-uAX z6jw!bzepSkaJ8;MhOpAr{?dj&W>l-H%H?A$q>-*;fEx}&qvWI_+LVfN7WI)2k5wb$ zKJfHY_hld2=_RN}u)N&pSe1Mu>wSCUU$=9wZOx{d6mKh;<$z7g$}$BtiOtW?+g3tZ zGlJ?Ki{i;)M_y>65!G=0e1Xm$m)GwKW%C?1UAk(TOFQ)@5Ae&azUuvL# z(8&JtE>9Z>>30!|ucm1Rv&1SmH3_A&>vk&mDHVlS>RmoZ#$ZcJE+81Ckx?c;$;i;q za3_x^!dwIG@3~uAPR^#An{BiqNPFgQ?^vj!B@Ue=PjLOaHpTXj?;@-Wqgw$9>zIXxi5lz9!cAT^=!^{&ysgRcDmXz`n`#xNQz z0*x0sM1>Bi$%5pGTfr85O9WO2R>$X!2oun=9VHCF)PbspbO9&_jc{uarpYnRm`cV# z^@)WmvgyxQ7W2`~&=*P17Qn0)81?VQ67O~S?{wXEeb`@aw>@q~DEwut;9jugH^MS| z%K$5BKjbmbl5_76h7u7MS+C8-IMRgwrjaxzNAp8SDzCSV6b-Juy7LlcHTa3!PwPvd)E$l2peU)IHVwxZ&M& z|E{5NZh`iy4Nrg?3-_Gkmb}(a{%%dvqQx!-W0! zL5aS3;92nxx(h#OwgIRm9< zn$m*&x9E8kbz#7aVGSG#zh=+^+K417(1bvOE=g&c*C3|HJE9{V%{RA!^QX{D*19kA`PGlTE4-8sr+_;$&=bzemQI!-F7?7qEv2~tlvWd zy>bWKkK}L;8cb!nPJd$tPJ7j=>bP@#VGRSH$Q#N|JR{f0;by%`#a97yG-@F#wfU(y zj-`=DMQCuUYa2PyBvn11>wz`D*Yi~)rp8yIoOAEcM?r9lOO(rQz9c|F10{ur%}V&W zH`)CoZD81p4sxaHOF4csxH`2&6Cb3h)_3A~dEIHeOa2Y|QB^H`n=7^I%UbVmQPc~P zN;cfs`6;yvIF-r($@wp;1hU7iDKq&lkcxvpt0+CYcQdKoe4qh=lBjJs0qk}|RTh*q*5MMzVx|Co?4-z)t!BmEYMwv5j z#i4|~uQ9z)jWLr(_S|x64+Ap^DKx~`cy=b0&huK%O=`;};eiCz(712AooWWJoI|#o zhExF}0#-|7p?7}Qnj+lYf(ZxG8l$sfT4w;w4PZeRSpG9|n3wkoN(Wp9P7ShBU6)oO`;bDHXf15&%XbK4Zlmy*aBB`T5~+c3>zr4*ajOVLr?){s zyKtP&DD8_UeuiOPw2D2UUWod28r%k|s7&{eVL9wIO9ZR~e~V%C^25%oPF-y+Yl}sq z#{wm3Vz7nzB3<~XXpu3|Lr||q{w+1DZwau?viJf)_VVVUmoN8P|=;nASRf&z0 z+ypB849Y#MM4dnnI^q&NgG!;D7NNffYtFxo?UQ(UI#kGd|0a;rh*|ZZ7`14_>Uc&2 zMq3z9g+(0nm5JEF3VSW4ER;n6T9h*ocp{>Z(T=j;r&SFp4eF)PrK1O}B=R|A@fQ~Q z9)Fgg`(0VNYFQov<~;T)aeqMxT?c;lBHNFv*d;fY<#>${M*?`)UO;=Ba z#T9X9?h{c-`~5mB{K+6;Rh$rL7QfBVfn_jb`2yc#0=_)OkgjmzR3hh7$gOm{taun7 z$HL(e1|`D9Zb_767Q}%~wN*qlycBkt9}2uac&nGm#87Jl%t2O^$&IUShXrvn#z;}r zhIhJ!)yNtmMnhBTz)HekWN-PL$L;)l;=!O)&Ue5eoaTD!vxW!|5Qc_EwlY_ZuciY{j^Ky_i?D#c&kj`cFbBW7UY!62m=tX7Lz z`wj|yPJRnk9ze^l&i`~_$v?ACFyWCs24J0{xjBe(;HY3?j+#9Ag)9XZ#vAKa4r_vE zl_teQLdJwEYX;PxH(#~PQs48az(K!9N zhsTR)B&rmU$!FG4CLNtqMxKuj6Y|kcVAo@?EFZSgLG&~lX4~G9714f4^bi=_x7r?& zO8jD~@yFVYtIKuYJN1E4^9G_hAa99aM*m3lZeiHxOTmss{38xnB?HwM4cEf(fqOw< zjFQhxaH zG;MgDHhyPIPEKCx@)tVHbe=RZ6&NsjoemT1e|vj#%<3ex5B|ry{XtJIXIIHc0|JVQ zifpSV*cGNM76i6nqg`QqZ4IH0h1 za#0NGw=i(f|4@zIk>L3TKe?L_NFB)@WgK z4^<~#o@iv1#6f%l38jc?MeTpdgJAddzYamJ4Ub)9NJwMhdlQO|*4D^s0XcWCqwryt zlx^H0)0FR^=LS~6fjZX7DJhbx4pr&~Oj7kBg3V79H zTu0ToOlr|L3}sVGX@hPHiz=xxuyYYDX~(U-7A$=x(*D>p4FwKaO zqN3_a&15R7qO_c@mVFmn|FK-p^Ft&0;ht76H~#-)>Mgk9Y=SM^put@R_u#=bxI=Jf za3353BxrDFa1ZY865J)YySoG@xZL48=iGZ|t@jUf_f%C^J$tvdZ5dn`#Z6Exoh%W3 z9vBt4mQJ26CK5CRyIe1O@hhuQn zQTm5$Gaad>l>(*_yQu*=`i}d4mT&#ITJoApYPQPl2Iqkv26yIq96!-eL@A3D1{ToJdf1)Mj|XrybDztw30(uj$IFJ z!{#2$9z2b#!z0;dn+kRdqq!V+F}Rz~AjTxc%qb0Y@G`~DAY5*aA*5hKD$5fpm_yUL z!91GnrPy&ja#GB(F+MM4a8gv!$h7FqMo~{J>y8oL{$X7@;e0>!h^LGf zJ0uh)njn@?cvulT%95=SYH|$R>Bsh$cJbQy@7xjFrB`qK#tuoGp@IBu--r8-<96r; zx+6uxFIM5XDdn|ln*_Xxa)mq8X026L>4E8g@3-+gf1f_lP`b3P*?H~=tt=w2kg`Ov z-fAUjSKh8~Ny822>#8+UiWU2T%ZNM5&8?B5YY;yv)VM2(s5K+~`c-j28OY1IolZN! zSCYH&;UgMK^TB%DccO1p+Ss}BXcR52%Gr*-6P9VqPl@BbVsjs{J4x zt#2?aG+x$AzOXb)RNJQcYM8MJl^D$Sccm%1%4?4?K7uD~MhKBU-2a^xTe)p@9e4Ue zlguhC>;rEhj?uT{AK}Y(?;$g=6#>lxl}HUTZ-gz)yKgFD_=@1i;5J+`H zJz8)Skp!|K=y2o7x)Xi>b7ca1XWHRkOAJE#&eJ6AO#&0|^j0&Z8`bE&skl8VD|YsK zC(4eGk2p234KnKzrmuQ{2$PVWZcp>p>>Y@B4czwxm?^bN>p*E?B57+*`Ute>pSL&GW2n%+Z}o9 zke`=1+)Y#eSgsPw<`UkWz1j7OBqM7}?2kx+0ZpE%jyw+6<_yR?+D$?&qSR(H8) zQ6&I+mMau9To^cAh;*-3VibfXh#mPJiWKD>-r-X8eSLLN(ZGH0VP@3|Zvc9=Iv+LO z@nk!jO$mC*&W(e&*I7g3v5A4FMmTtAevaRp7eX{Oc=3Qffd3D-bJpQaDY(MaKGmOI zK-OV$eo=;)Qfk1Dk?JM=iX)FFnWYEBxi}((*Nd)%k2O z4OHbrQ1gMH%7#j#$3^M4KoIx`u73~FWFa0FI;qm)p8Pg*t33mLzJzkd{g_VGG*{Do0bNQtA}W&(KxfvnmMQ zs?KH*&?LjVZj_QynDyiwsBI)%)Lx@zGjer_Wm}FVkWMR21Fw)eb8vC>X9{}q-mN-! zo)0crW{qiR>GvmU88qANvyHF|y*>CyPJhfhuMb3Ugi?X3??bfCbyuE*Cxg{WFyhP! zZU2`;N(es1sKH=H3hou6kcf~9jekV4?snljy-k-*&*{2c0=Q>FScV1L))41s4oE8* zJ6iB|5z!s|E{j{PdLitD45!|CHbn@k=#Z`&KoQT|9*5zpUrM&g+a~Gu^Ubg zvImyC?IcfdjLe|2seyrDPE-vpiKWL0(+|b2c3<$lixO2~4-kNuk(6{{AQ{O{OFK$! zbvb3X1b+cp9eqOOgtI`!flvg{p#wZb=m&9B0ao~Gpq0U@b>1}0k$xm~=Dbc9;GzZ& zMN)o6aa!!pPS^|=;bTY{X4Zi|l^Sql%_Ve;ydp=$0GZQQ0FByW8OyCM?q4yxn+eMY zwo)uUo9ToWGOm1>+sO$j5|nS)UpxPK7bO=u0O)YvpcPHb&bI)gbA;-?%^?+SSF6)8 zCWKAPV;b7{2UgT_0tY*jvGT@}cClwkkFbt~q*F1-;!{2h#^Y+~yW);nIIry70d0nB zci%y-ys5+A4%F2`l!kUW@bTpj+l->-7lm4^m2Kw#MDz5 z<`6k&<206cDl;ch+FXFRrlzMOw5&9ysMluMkIIX^D&qVv@zdkDsT$PqSd2J%Q0N<- zvZaLbKX5WMqqxK}>6UfmEZ`Kv(fvfoxe%NE`Vhddkx`KrU9mASDIy$f4YSdcRzQmu zKBBUoE7dA^a9ST77IVHVGeqq=$ZwyV=sE73*~{|W!(!`c*ff82zY|^UaLrwgSC1#* z$L(t47ybu+-Y8-H*b}$FsnQ=~g}i6p8D7dJp*Ao#^qd(M129mZwaTz8E61p22D@5Jo&dKT zFr79^CxH|>gQMZch+8sW?+h|0a5t$Q;Fue=+6%W>-mdYE`q>%Hg=H8Jmb+7Y`C+=I zo%@|Ms-ue* zg{4Q_F)bGmZhn@MfbbgnVh-tEZbyEID#~5Ba9z-sPBQ<3^?m+lZGk5og zl#WacXdlSTdE%qch2QzeRlb#6lKFd|+hb<>{5i8ZJC=?N+jD%;q&blHvb4UEwh9g(I#)#9j6MLNTlxo1LEdu| za#W3Qoyuk~91zw&dYoZ69+;CuIK!Ezbm@JhBzHDyGYM11O-<}pSPw0JThKlriV^Pa#lGAq@ zTZOx|qg5G81jpdaMG};Q!j(PI`w3mWq`Y3_!3FNnzACUK+nqTIg66o^e3=)p@UH*6 zyrt{PN(CvInfs7KHyDK%8#-yr{U1!t@-oeGIS0vpDQ!vJMqM9;??vB}U#$6(nnBs6 zSljjZ7x#Cindka-=1!@baM%nBB9yj)!TvkD>zwsO43;vLMwCx7f8PqrXK&mvX9i4G zkA&%B40PvEab~$1u2piUdfr!3z>TUcB<;sP>1P3q=cLe0g#?75T*oN0-aBhKFk zmAGdePsTl%;+TW{+tl^6fnB}m|C*i8*+moU(@@#Vtnt*ZH(?n6dR7~CtQfacfcDd* zNl3xe0fH*k1iEUkVaahYa?E z^=J5OK5kA)&$CGxL#}~q?zv}s#`Blj`dKzr?MX*RM`S~x;DYdh;draGD+j{|{G{-Z zDOf!fsRKJ*VQit=ktq#n>q55Iw#vJ^-k;*W)1lCY8n9Y+OS*Cwy76=GO#^8nT0Q(7 zh<0WI?|ucXx^`ZuVr6uuA2Jz2`rLjqz4N;Mj%r^1QTiDdLiW>5I*v*wsmAB+=g)sy zK8f$gwGE;yw}3#I(hZBZXKN!0JZpXLQRYfugS$&H4)%b!%KOwt2@$!2gKt1~=$)H) zAj%iyLwv{+ex!P*1j*tTt|7acc-Q&%@?OJQKwNWr8|aYMU(SAT2nu=!=Sgk%m8+_6 zPj1^d3yRAB)o@uiO%Njj)AA3)o-5C9_8`Q6igiFjA|tLOj5UjrVl=jM714s0LSFu& zS|)=nSrq^6aEiO>ZKTQltlx6Vw!X3bjkMosqWEg{)gijJ#b8T3duD8igB9uQ>5PK3 z9sPS#i_3DFzznsfaeWM^UyRn&oijHP;E%2i!<>Pc@2?z(#vD>Aq#&!~Dw}6XnxPo7@sP2x zr4NCB&=bm#a?V^ULJX!k_Z%FaknP)!eJD_1G?8;j5?Nw5F-Sbgz+B4s1yr7Z5(CVV zDN`I#>3$*E0`oGa^TF)P(2^H5y9ojm_~(bLH$DhqF_?9WLZl@$&^P$5JrKIdL01^B zw#0UP9`t!0Bv}z4LDnrkI~vibyvXS_!rbHtsaF0jA`Jl}LN8*Yy#kfI zwy~cf$^`)}19uwX=U(B_zkmO#z5YEuW`O+9oBreR-(l^Fh1z(?Ts^t{uQ^02Ta{WK zu12o-9Shp=VpH@#y?RZww`~O9sx0rp@59^(x0zq-oXt72w|=v?q;$EZ2HulpEeDFC z$~8rp6_^#(VFd`dvSq0JzUXeabkgE!cKEKZ>C)jyzZfEuc|mamrR0AzPcJIoydo!j zZco~7i>i2k&Jj*|J&Zj4KKJ9=xA=C3|COT&gL9+Z6TCM_Y^g9Y4T){2R=Gnn_qH}h zT0ExCd3}kQTNd4w)Rx+s_4%SlQoJVsTTSXPBe}!Fgg$t7wuB&Qi79KH{T06TFIcGK zbm?={z#Oq((qxN$i=^&q933~N7l-Xer!}qIRi0}n_Ys}<5$8I+JDO!cZ_8&#BfB0L zbf4t*kdC42J;~h?s68m9m?2_#RDRhDQA(H!(`G``u0GjkUkvLH1SFxpFKIi&SZnH|lyqIBVmnePhS_9C92WQLM`Fy*NGC=<~1&Or}w_E?_XhI;c@U;m*CxMFNCC{HX}K}Aih+S=M4h&Keue@aD2T>yathiYvHbJunj-p@vQP5i8q_6k2*E5Ot# zQA)X{%E{}(CTJc{%nm`z2i27PD`H2u&p4+?u~SDb;SDBDvHXx)|Nil(vAPxjs?>J$ zC%nSeq+H-`-P`jAH4f&=-`iKr^Y_}swL3XkPh`No6Hl#_RQsJ_-#LOj<3B$-1#^KM ze^b+PrUIGj%))w#5e^5kmkImz7OKx$*vj-* zD$4az-#p@SW8N|~4uJ<5u`3?@mEi(ZqJjuUq&i1T@BV4?`rLSLgxp=WkVH5S0!P3+ z>q8FD0}fsjkC#HkvC70yJ=5vYZ`aap8;ox;NAsL?A&gQr=$g=?$e+HVae%P^D}#~o zXmHsbRZ31FNgKoqV=);a%9n(nMFNcoQ=H})!8+A8C=>#IG zp&JGyPg-}pq9Sl`uxNB(DX}W*$AmeI3a$#p@oA~7vom@u(_Gp{s6V;jjaBu{*cbiR zqj}FM;B{QGfDf{OfLaVS zb(k1-^>Cko60w|)`X1}Hj;*UqWOZ4*b1^^!calQcDhN-!;48m=|I&;CH66E>2kg$G z;1`lAS6n*+Y?u;}T+|YkUBp9hWP|uzQq#Zt(=uj?Q&VZ69^@w82I-1fKni6Hg^5>hxb zPcNHMdH3V{_y?9ZQ|w)B;(RzsA@n#EEksp#JtY56Tel$$A)wp$J;3eK`drb{@|>b5 zK&E3G*Xe@smUhn0@sJ9m{T-)@CvxipVngMjc1P11?IhH5f4b$LpFtfyY>`xu){6o7 z&+(qid_k3BBnrezX93uMo;lv79>ZxpBdGi*W1*`(6&mjCD4HEhboae?O;P(z=^u_(oFQ(BSZeE7bA?IXl+M4McYg^^bpEHsu zM^M7JK(ID&d7Oknno!J1Fwv-1UwS2xNy0^CX(x1Si^}h~voUOaa}q|6xWzfP+m5V& zpZLp0IZ=iI9wRW{Ppwr;=)}%werm%dYUVHJ4r!NMrImkagC8(I!Js^GV-jg-NP5r3 zZIGL3AMh)J>d5x2{0Zip7wY-YbfnMAgwK}igeR^g+gH9rY%w^5ZUk;6$94*vyo1x{Bie6&l#_7QQ<;a`fx}Dt}zQE3>9RF2f28(V{!= zHx43*gUBD$Fa7Z`P3@<4Hy-VlUZG&&D}kaZ&?Ja+==vGa=T1I%;pFn2D~ITPe}3D^ zbV-r=Lju;sq-)t^lxLw2claXAD zn!=-vVEMe6rfDK&Q@DT%C;Qi?na#GkLfH|%*UPt-ZS(7e*YI!SyesmK*CrOXTJ?9KE)!qOCa~izJIuE9!6yYY^aT zVNR@AFG%z5Zjx#+Gw9R7xuO+!H)SNe_kQ&v=cHyhpDF{F$?~U^=@~^Fdrq@Ey#eQm zv+a+WTLYn*_CsJw$)Wm58O{p%snP5e&U&I6Jzb>~0ppD*8XG=~r@s*ImC=dk(Fusm z&@>E)*3>`>4-R=_{0Ds5css2s7BfrI=L$dl*xU~!VT7)bnsrrY`*VTiNt8IHTVNAe zN?U$1;fqZ1i4K@iF9=!S7;40UlspT>LCZz5=Wa5{)e&*${x3h%z{f*~7F!I`*=LUz zhRhU=SOFKWcb!>05I3O-IrK|QQ>@zR>aKU+i@3rg`h^dqV%bbz*Pqt!u0}eaN8&Ay zpY*BBBAl?MbU(Hz#?iIs<(eBQ`ztCWgq3Xfxte;Ut-sRLjx)8GD+@}hpq2zvJqBn% zg!ko%d7ijL(WEBF=?_Ev&A`8etiyR4Vrd{?`}OaGD6X@od3?8Z|et_?ePr zOQH2Em~oaN4>i5txW?T*UNG-g+p{K~qeI-D{h+1jXU5Px_kU40OFcl9XmXb4#)>4- zaT+OjWf2CUrR@!J%cSp=_tWDziR_irkC;BqTe*qP02mM#mtV|4MoM*1d0mfkSx1WF zJz}Iq(|@#eS^muUkGq67AYutXNp>k87JH`NQ>|mGKwPN)nYjw8V{N7~)v*nI*6;rM z^JWhcT%M`0w$JZ{HuMemcm)!v5$^=tSPXSrA_U=nmv$A4Opotu*85Q_>>$t9?1}yY zanT_q%z@ZIrc|xWNT$qkB?z;-vOKq(E6$(#24Q)HPS9TM4Z~x{B+j( zWA)27CR1BxS4hUbIM(X)_|~Nq7h`%QPhfigZl?iJMm_%F_y>c=FTpCc!w{s3DCZuC zWUlsHj4owYBheiKf-yze7Sw6&wgyb8`*}5f?cAVJk5XYXvU|X29Y6hmTj#7WpB#cy z0ren{4W>$nnx#|EG{UadG_A;+$u!Pe*!-1sa`|uK?W40N(}4kNKOg>Roy`JghCPQr zrpZGi158;N6`Ej2x1YB-sV692{G$~0=e^Z&;<1MW&aWpvhBdsu*#2tlN`V#;jBv$F zxCj-N$`lOtI z7(9jb;Do?#xv&{A8wT2Jr_ah^gN148MAOHtb47sUlAz5z!}{rvQ;KM*6}CX%yUY+W zp)R|(4W5VPAz3a;7gyV^!o9-^v=h>6>ZlK!XZecRbQv)i7Np) zM6$L{#h%3deRmk4k%jp9aJyhQQ>cJ&<&0CpWTpS@y+lQPZVBytJ?Y$su+O|?uNNUr z!RU&!;$&jkbVv}P%E4tT#NOcDG%Ia2K^E&OvU?I@wB1mI zjrYnq0|mX=nC8_wic9DHP#>{9KZx&%4nm>Zi4YRVmWi?vPG+VWUw83a#cq~C^DO3f zI>p#O*~+7hS;#rOA*rE?Clw(mKl-9g4FVYz;7 z(X;|;)ChorJ&JeUA+skI!K6NjwH*D6XqWpap5_+;S*!(>Q_7ve<&d3@LG4oMejn45 zgG}{f(z{i`=Zl}OFHuj;Pvho~U&H%L1nc=*A0`~0ZT@nVd*?+Hn}oIS)dmqaXxiG| z-mAI`YfVdXp@0=5R2&iqycmyt2*H)Y10LBb`1;DS{&dA+a3E?xx#IU-T_FmgPfp9w zgO?!IzCc@6HrO ziP9;3)QIlWa;rebEAvNz;ZxGIf07GVFxN^!=~?VDFF=K1+OA>nXV{ z|CrgVuLp=>pBm7z2-@&lNP0h8ZR?9nx}neR8JBT|&1++Oa%H$F699?o{rfe{lwM8_W9BVh5D-PS>%^z;MH+%>+y3F0KOD#6aivTkW|nXi)M17^+O| zIw5G8sE=u02m-H*@L8`Rt&8o8CLWcdWvMZi1a!u{42!G8HEs3dROpo#L*6z5uZ~&D z0KCEedBg=@cwClpjVAV zPARiGEh|gf##@~R1~X^QIztuz1tK`wf-v;k5PW{;{Ppbo{-#KH%!qaISeOs*3b5lj z1LC}I&U)g5Y$xzPybSw|DJ(uQFmgoW{o>~a(I^ZAV?2Jy0{B*n$?&h2Y$WM1H_?i4E-=$d zR8C4*eR~MC+-d=GS^14o>~GXCKp2EC6DesuB1mzo5spqr)XltAt(eW7CP52J<|t}x zCq}$KH^sBuH}Q@fvH7jl1t-GbHvc4+%K1MQfMY1nyJu)-jO%m_brOmh-bI8v1c&h# zwRun(JS$4QU2t-^%fO60ijFnNEyVt=n*g|k^6Qs(IYadru@1wZeWF9AmO?W-Nm`rQ z?;sHs+sU;V@+p7JAd=+#=sqhY)xo8!TKG~Zh}%Jh^1ZYQw=dG-GE%Jz&A4R69|_A{ ztMR1_0>bNHYRqZ*cx~`p_yLdM@bh2VrN0g)QQAc+s+Te(Bm_-qxe43n$sN58>33I$ zco6UOUwpY5H#L>CR{^R3?4l!>&zX6{+4VG`;dZe6YyKtg6(ubkZ^teT%T_NWvNn>Y zVL(F&18UhTnC^YPgLE}ZKP6bjva&L_bM%O$Gsj*b+$9Vf=6BzK_iw?_NAMlseg9YA zFE4aR=_Kh+!&#*X6|67cpN#Xh-RW+;`Qx|wz85$9?*K$ZLE@HG02#$#wsx%Ax z=*I8T+UdXwNdGo56(%P(aBHGv=nWCwIin`)efz_f9O9wL5hZ8b&=32yfKnVMLCT*B zA`2RE>W%9|V3?vaO#WIi082z5ce%@<|M*c0q>$AQs{HEk#$S5j#*zQ`PTS*|0`Gf9 z^kz!h*Cjp1CAYs$tDxZ2I=sp8E$VE+(0R9lCTdVUJlThm^D_1tLPn^(JnsyR;9ge*ZGjua7A1xShU`PFqega=d@erpm^UNPknir1~(yFblto!8Y8K2;rp zA_&fxRjG&@uU~9n?KQIM1DTQ}BCM^UrU!gG{h$Vh519JYoxfWss!d>A$T4l9XwO{W zH*YdNJnh>ZAV^q;nr2cxbhn6HN_*iW&^=G{74;xvPOOZ39k~+8Nen1EyEt3p!_kN% z)h=fTTR~9+U)Fty(B`vMwB8fq@Zu~#&l^b?O{X#lmD+ot7e~GNviah5cSW|>6PwrW zN$7wgwIHI#SdCP$!vW!c2V?g$2vMZxwZ&|`S&vgsk>3x*1n3|uHFb*yA~|yUGn)16 znK|aP?tbv!_kJgDYa10u7wlW;xaE2`30>%Xb-g`Z`H@n?yZso656V0cfY=8dG*5Vc z#@Bbjd{^RWYinmO*=F9{B|$*V`>v0RtE*niu%^%JnI$|A-%X8;{i1)~Mg^v|M{6qD5bkwI86V%Y%xVEsvXr zC&Q7WoeI#Vk86|d>{PTS9^h1-mlWQ@_&UeHCWFxu5fSO(4d2hN7IB@>8~&}ph;vay zHNXHDjbilFL(wd(Q0+>kXCQ|L&HnZ%lS^scUiru)lyde&(F$5D>9C~*o1VAeq>wKb z0Y!s+ju0|zOQ}cm*J#iH_lZj;(&PmQ_ISvSz~$S4T-2s*LoYFn{uCRz83nKca{5$m zKZ0mE*~JOaV$bYn(i9LqeC_XwheMy;(R5-6v zH!~|(8q8ckV^Dpcr+1TnaMdM#X9`|}22-jwDGblE&TVno-CR>ccjsi!6RL=&O3c=Ioco^?mM@=lR$#T-=E_6Xi<#M*oWYsF(h3zj={Ph5sGq>3Jxz>!b6puxC)vrjw zjH;wNN&|^Px%`$+^D)pkOfyD_M;1t6UqES_kgrA zGS=3(5A@ui!hP+`W31KfRZ9G3v(=2%KU49Iel`$er9XsK7fWiCjZ0r`9Fh;_x=p#I zJ2RW$(+`ctfQA^v{P-^6_3r9I+h+k8B0xqGtyJ4PK@dGCEU%&oRy^+`bw1Nqi9n{2 zlwZGAj+Whf(kvZjianB~M$XhUGF ziHjqNlb@|UA?gi!1)JUybyEfCq?kmf2pME+4PqMI%uxO(mC`25zh+!dpFl*zKf{Sv zB*V|lfQyiO0VSZK#DNiF3JnchfKdn^ei}l+*7vPfg++{w705Ls6PpD~1V{~G_d-I= z<40~b!onX72#wIV#ZrPEA4yHCo5w+>;Kpfnt3TV46HvsxV;bM}y)ZvbP~nX-^GA^7 z6DC?INBwk-v9(S+dv?AY&Q^$``x?h7W2ab1#93!y2Bz&Br2f?9gcT1dq!q{Ly#G`a zvX=(C8Dfb^BG71XiNHzwoEpQQe6*tF0Q`v&>k_3SR)tT%*4w%)kp42!?Rh3mygQ?2 zTV7swd5AP`!(Bja)d<>GqXwtX_t>}0Acscu?8p@b_NdvS5&VHq)k2YN9Y9$?OnmA z;S?}S^7V1Dm;a%vv%y_!)=ycoqLx(_#c%*r;QW3?Nc@z4t+rcd*M|`jD6u0(%#TUE zd;%QYC{5&-uxtogB#i0$PBF%!vpM$6?+#zuj zHLhON#2G6-ekFa>tSCLK>;K)0PfoB;st@6@L~P%vO0Bh%!x-C$C_Bh;U^LO!z=@Ia zO*yb6oW{Ed=Du{oHntXgb+e_*_wl^ zQ)K7O;e0KPnlJ9}sM6f%svsqMojoXV1Is`leIEt8Je5_r%#F|~xzTYfYo&_j)i>jZo78{b{2GjOjlG{ouWtG{Ga*j` z@l5v)-OOp_??8>EK3q%33|(vq4x*paxx-9lHD@q7zE%kjg&Qe6)Dmlx;wKFfaIy)=N1iL?w?aiAViu%*nXqw6 zb}zXFc2HKzmHIY|A9BqV;fzNs3A|lO^9ZBHHf^-!6hdn%m!xM2IlbP)jQk6FxC;D? z+^-q6(Y9B%@r=5+$B_uGdVLlCz2c2R`XLjC0Gc0xpN>YI+hE9ef3513DhWPDLSEV= zU0F8Lc$=4)V0*F%i6rB{m$#nz^5%y%c|NZMxwqCPqLwRP1p)Eu)k%flHp-J}qqdmS zsJba#m(rOmsw?=z^pG0VE3-1)9(&0!unT8kSd>@MT~7V$C+DJnTwAi!?Tw^rJw15gv+C{O_Hv*VURz#$+$! znjrUIDhw;%x|zh+wHLD7p6{dO(*S#D^q4D1=6^sz%j(kea;ZGCfg?)MUazm{G2oe% zrt;>iL7zv#lk3D*M>7LWCd4B*Y2x!RQAGMU#UI4yWHLK`#>5#Y8dmvJ8sZyHH>x|? zNOKwKPADn3OqRp0#_a}62IGtDrVY6!39g?ED`iSh=qaE_8reyuT?ANDvCgB^nRkn5 zw<%b?+$L^rNeb%EKu-7`6;)2QqJj?J$nvs$-lVV0KPh4#v8s%@y(aq@zwTi7blvR3 zEIgpd5}cifV%6jiU3RT$8B+B^mErd44{I`*(Krk|wpHulA8W=UX~4yOtiy($E5%*{ zE;fo8aC8gIsG`Rn*;7U#hCyKY502uKN*XkRn+I>4ezh=!!v`? z=|XKgrw%>=uv&OD^u}&9QS3+(+I-IRENxF0WC$}~5TFpLDCAL`%m|et`PqY~o=PE4 z**LwwD0hD?MIUtSVNnw_awRri7R|&aXOLYf@bi(yb|H%w^*qeAZ$8{yoMV@}uu(6z zLUrCharnw>|1ztisGR8Te8Uv(=i`?2{)EzOGkdV#NQZokzX_ccq3n(b zp>*((uM{mFrI3dv=C?nydc`)MSU6nub2RgRM=VI_M>3m9JV~8euOLg%XVF$C1lcE2o@t#G$o$zo92=wwBYA zA~;b8$I_DkdWSHAJ2n=l!rtEk9AtrN(A*cOO9PFlww}R(ihQ@iCe8 z3s}#Sj4fZZ(6(=l>d%;OoFhs#9rt=ZyJN~Cb9cK=VpS}+*}opl($4x^ZC2=%o6yr{ zmjW+F2ndelBoXLa*6SA+R0fX1ba2t};j2fcP}&kOF5wQk7T7cThv+Ny4yEx#hQpil zlJItT4m$Fy%*EK5ySq3?(B;nv9&M3=F=hQ&sjD`y+uaq_8(bxQrq1*j|LT0FQ8TPe z?|g%K@Y=iBvKesbC^R$u`P1ZOE7shAmzZ?15$vuj0v+m=y$6pQzs_xI1RK06qRc~8AQW%U;VrGBHsZo{4W^hWG#&aAz5B+NDH0AXQ zf}<$Fric_VlKFTx@CBaaHF@yD#bx_{9_VujT%)eDMgCowKz)1P(5&e$vA;+J7XsG?wcjZOGTA%S)hr(MWgcPZ1b+Er4GT2KJu zeu=xqX*?1h_yO>6nQE6wfQHSEBBMU00x(6L10XO0Jc>_M0&o?$!!LZTCbsHQN(%%7 zNq{OfS{MeEQyQQ!);KL)#$BCP+qhMa{`zkPeF)58RR^WeYI{o@8_&vzLa`6SMV3j| z$XmQQ;B;Aeg_3`cc6N7BNPLDRBl3R|DHQsS#=jKylDgU3Mn^+T$zWF6*aQW#2lKEG zZS_cnYkofVc`PYIQ_xsfl#dJ~A#1?N#&Rn5ae92#j^dr~0cs}wg~VU> zk?#dbL8=ivBfE(w|6(Vi--g%}(xo#+9)Ej{ z4*ob7IF?Qir8KZAW?OAZ)ksue$Wd=Bvb@@pBc|l079*U*m?GD%P=FhqtyM{46iwt@ z#|}{|dn+T=w%?|%Y;V=~d5(R)q`mT@S>3>nzEiv@k7L2FRGJu_u)J!^jzcIdMrv07 zR$|Lnrz2T@$``z7Fo-v(1_rieEnSojBjOGfi5p^M9?}_UjVJ84u`ZkHIpid06 z*)yeVtkNEG4MPjN_=gWlJXCmbC#MPA9 z(o~W5sxdU;(#r10q)Qdf#BQBQ@UJyEUQUPR{GKxNnXzJ^+J1^RusqbnR-wsav7Lwx zW$1PDJ@Z}977~gBHLVt7c_YHkk4j%@u=A6k% zcVA(IzA#Cvj+w{xVG?C{D|vxL35q;FKEJscE>S zHi>MK$+6S@t(b z;S~*>=OaTs5GVs`1>4Cp*EIG2ZwYYuRg1)VY&R`+fdJcd-2y4dPM@x^A=Z_m_~PN{ zi;&N26g?U-Bq{S^F&Q=AgPL2AbehD*;BdQ38)_;<*j z1z1!rOz-BCEbf(iVOkvAqj@h8m(?ZgJE=q+@*EaB4H511P9>SXKcyCnFz=dMxVuvu zl8Q1K^Rf}Iv!S8Ct~$lFWi6x7&EHbsa%8nVbrc%Lr&b{zJxnC~JGM&3#WhCVc>Hz?;2m}V^mFc0CVRpk3T`BdH_0GMUe^oI~ zux!-#HaRM>hj$yY{DBmzxacHg&_SaXjLD}xQu{9}K1S(hY|s>sil%Zfaz^slDjBR& zVfl?L%Txas0o7eOIlg&o5HHl$CW4u(Y~78=I5asJI(Y7mF7E_{yp?AA*hqK_!*Ft= z^CKuU)_Cjo_Wc`I4l>Lhe&$A7%z0%^NyHrIS1)Hu(kW2f2v=f-4_+4I<5)+Mrp^pe zh-OrCQx=zIuUmpse|=Fe)fQMzrUZQ<;hroqw5$Ax&LR()G8+ zgt_0ntX^LxK7+9Z%SLFrs`YVslo^0vLw204zy40OI}lT~!v{+%KYL;&t71f1ByXk2T|~WA_xXjdrz30H#2KqrFHI2p`PN z&NG2Z61cAIh+%B=yweTdLN%5_z?=$AVJ{3z2-aQvF)M@Q-Bgo@{ojMWoy1p*+wMuV zyzZk!OY8+Wv{bHE2D`(1SJ+vkK1zR(2e*c-_+vvsE0@SvdZ;6n+_ei|iPl3^+x|&N zWG^z8O+kj3;Qog*3LXvRIa%WZ44mQ+47m0uaM(fZ_8=W%7oVmTp<%B`+iJ3%^Dz^G* zsmpy+Z%O)!`N_dY(tq*EiU0QGbSxV8F~5yt%vMRlk)UNlxdUWF5J}=T z0Et?;#C^Cc7|$I+`zcYRjrO7e*{*2Mp}6~t&plM<%7So4Dhn| zhvkQ{fvFYNWY;b`acVaF-yeg06My^WU~gU_H!D-{Q*>J|XhahOOR67<7~PPN!1LncdA?I>2g8WEdjI+9sBxYMVDZ_5*N&BpbHn=j0~h3OWceCak`udbpBk zASlPMrPdbXBxmEylUt#)^@?pbZB>UYND2@3O)JZnbXL=w`%(>sjATU~6?Wn1Vy)+- zfj88k98Tn6{ja7K7F|sXtdLVNtJzO?`qR(->FdddWO5k}y zJ8c=w<&5X+uSq=U8>`FqoYdt=J1!zBeFD{j*3>eGa;DN+|I_%uN*uHnDqSC(v(e$3 zuw)3rpe(?V%kNwbYhT!jmF1xJq5~#2Ow^GZNjKSNk{>up?KpQpce%wCQ z75mKi@#XZMWk8N~SK1R_prhKvX!#yTW2`I3(C_Az=er+?n0d+>3%Cy2^k#B@LL~|> zGo1w7sTF6qZb@w?LOC1vM!AJ!lkrUe%CddDxyCNvXgeI}o<1576^EYlCi!nK6iu6W zerpG3{_66GcvFd$!rGftcj4IPb#3&X{kgJa%kX(shMSjuH9C2B7byOlKJsrCQS2%c z$3f7DhcR=|Q;6a9)|x{}0eurfucHV;q?H-5RQ~XoED7i8tJCz<6z>20iKzFc0?GYE z%2Hu$tGNfb!eQCP8o(w5iG3kFJ|moK*Ei3x2Pd@q0vvs@JFFDH!&5m`8+cikG(>t8 z0J%I|vE;=ds&sPA72d{EN_f#Q^)mH;rV6lwxqch6j=FJuI_F7*zuQm@FD!uL#7#5%rTNNh~vJ!y>Pa6Lxnc% zQ;NpM29CYGb1zr|*QPmXb>IQdJF-GWuv9wR#B4;bF?HlojB9J`SLfbXZ`CSnKI9=( zjNCG?6?sr#TIaq>UT0$l=XaO>|0*aJZC6{@z@VOV%f>d962)OhDuvIfKn~?!u>~EtAOR7rVyRAP?VJSdClxFP;2}o@q{}*I1J*<+k12m zg}q_KY9zz_QyP1Qwb>bnElv#RnUsY*d>Tap`T3wOntCMFRwqX4*ghd-^GB&=PI+I_4Vvx2f8x7=8p`QlKt6N}n3T$knP$Ad}YlXOX@v|3V&pBQtFp3inx#rBuAp}GhH8gii zX0UR93x~_kUF;h( z%7da)!XqBZ7{wVM`*<=vaNDqq5385vCu!fOGAH~?cXR~}WK5rGlE^o~v@)8;c=v$1aP zwsWXfO9|uk=S1xi3LoD{hVkqCbGw6lB+lQ=v zqsloIZB%IhIcGJU6B%MiXV0#hgi+?obk&LJ`I(?Hue&mLRysAkiz<$}`kmF3dH zwL_Bza-dFQQ1YHUJ;(pk*Oi8|fpy_l=|`IyYpWEs)nJN>t(ICE5lan1f|Rs08cWaw zp|<%HMH@?tDyFuHQ7Y*oBuY9pQbASiwIuc$rbz~ELX~`Jr=9uty}#~{`{$l}o^#&w zp8MR>7mo@el_n(j7l8A#?}O1vnbsGl?ZyL;&QA!peGFdnj~|o?)|5~$R?CtPu_=tz zW~{Xs&{XUcoVtGRY|_hx{YiM4Zqf4&6tBLok6<~5s868G=S2a-6{Bgd3b*}Uje@Fk?SeAU&>7*4;c|n30k=Ma?-FN#oysg`p6Wbv9txG zjoUc*u-K8btBl$>wTn6pZCr~HDDu?lI$8UO3Z6P>khbO85go{dW8je@g`t5nR$<@j zDl#Bm>%@x^i(Exlt)sL{fRH|#*#Px`|6`s*Nr1hr>-9+=N8fQ!e`+L@x#4X|c^Q;V85r3hmDJSnMBS2rScYCqPH z40>cX0=fe2CSCnlKfR%Z>xqG$e&(sUM!pZep9=8S(}6N8$dz~RTy9AwmdRd|b3EPv zU(1nl&~gQS+Cz*w;}j)*q><}N=BSmFA*=~3E1Dow6<>yf>SxZXPNPDAJi#l9=W4I_ zVbVr!eT>bS7`v2NQ=0@xLZ46fFSPDrQF-d?MV1?Z6eZl4SxF>(z5ZCa6^*%xb3mHa zl~_%Q2!Fj%Ul(!p%Z(~9*R=&%W+HCN1xQ9m_j8sLhtdwx8{%Vl@dHG88#uTGlRw`^BA zu=a$E`g&HF^2T8gf=lj>1Mhsu>09r59B3ai@1&h6i&+SYAg#-YIb6$s*km@M#2$fe zMAh+eeP=e=IadAt+(KKTk4a|JM^gC2SytR^0heY?U$V57(WaRgoRfCSz7o^gvENu} zJ_h()TuNN3>)*hqimWmp?jc#`9zd}DHda?_?1_&f4NTWt8jCeMTQE&5C#IP&Vw|Ja zy6F@&$Ngcyq%u=SUYlx7J&NGB%2*H**z+ESPD_VDP1NAKZYPMb2esi|S;nTd1i7>A z9Dt^WVUvW(SO>?ctnCd}v;SsO^Y(8?GDu6{v3H_;61(6+!%p0z8V<01pig)XuG$R# zc~D^CG*IbAwT9K)D@JXSBUK`=b>2a@jMGkoxlS-qj0aTjqu^lCe0Gm^Te=nm3-pAR zo`sye$}Aq&*xJaM8#Pm9@WKwP)QRK&(QC=fT8dxk5LUJo{2)9|K*?`)|Kafvj*?9f+@$+55+V;cbxhuIl&%qeLaUdJBY+(W%-naj zOzCW0lzeD!^!Ba&XLIS} z*Uf5H&YgTbeyFV6jn7^=J{M|8(3*B9y?on%cDOUfIlysIT2+qzezF&BaAecNwJ z!8NvYG}&wV2WZ&=1A7cuv)JAYdC*g~sKvtd#tAucRuSJO4!+~=nQoBN(E2A({18{! zsQTs(e1@+TM%H2*@zdS$Ig|mR-Lm!oQtsF_g$@e@Naado_1F?}*)G=<>+ja*6&p-h zhH#&4&uHExKa6kUKk?cjxSKo0!S|AA1$59u;HXS7vq${$fa{ban~ zqVruY`8X*m{$D>60;0qVjNxUHi`vAw@~>#*iaC(hR~^|mC77Xx{T7_jNdvM zydGK!ekh?4*5+#$za0CIokO+P`-GI`ThW77W2r%i0!qF=(m)X3d4F=`xy-}RD!tKc zjjcR&Qus$pfiDWM`ZSeI=G@P-zW9k}a6>pldNkQMM787ntT03B;5djyqB`^{L_SDO zeBtuTZAXUGl82_C06Tkm`6=fD4A-6Tx_t5mO(WFeyF@qXAm93-`8b<>!#N9M^VyvQ zcQH_oMJMYYs--HNa#{44b;a#oebdT1Pj8kEjiBgz{Z6o4if@Mn(B}%ay=@wpT`m{(GLYZ6r z24x2WHZ)kQ=!OWb0omM8Xbs2O^u=5JBY>v&4E{<~ZJ$0lRe zZHe2L+=cAL35Q$FBSNcLep-?=!f^DWmw+1f&P%*C;ckk%cqEk{f;>maLV{tl?4_NxYymuI(cU6TB^!p>~0C?G90 z^5AC4*z*paKDYN0x?|?))Z|Ka@}u!h+^4YJVBOqr%pvxZnADPR3Nc6W=Jw8!9Iet~ zt@IM;wW)*g8^12bz~!yIzS+BW!KlJ#cV(P-7&JN+QczjgWiS%qb%4@L0F?&RCaq<9>951B* E4;bqs_W%F@ literal 0 HcmV?d00001 diff --git a/ESN/EchoTorch-master/docs/images/echotorch_complete.png b/ESN/EchoTorch-master/docs/images/echotorch_complete.png new file mode 100644 index 0000000000000000000000000000000000000000..d89df64baa02cd742f11645cfccbece84a8ecd3c GIT binary patch literal 45075 zcmbTdbyOQsw?2#$+@%oQrNy;qaQEU~yl8NDcPnmz;_mLy7N=NoX$kJGKl;A+e*b@W zC0RLhR%SA1%X9X#_ns(KWf=@q5>yx%7z{aCNp%<)xOf;CSTq3A+l+QLGU3}7l9_^x zB+To-S3!4a%G(TzldLWn1_ll9-v<^ZD~I@P64_NwNeX!z0SQw;Fsvkz4+e%3Mov;( z({uS~%`4GD+wJuJbons{Tnc2io7Dk<;C^@pTg9&LWEP&IvC>$Ze}qGe#UK_8?e~NU z8>Bj;AVWVjjfh<{b(2{Mk7q~v!ga+(i5Co54zvh{$sRSwPmz?POSAo6HU2Rde6wzM zO#!v6piL~tC6PA^h}2XUSns;c0|!_gcP|ohaW$}x{d*rhI^BQx{@()^r}6*8paBSM z{O`5?Zv$53(f^+Qe;kbUbNpu|tpCTs|83R(-&y}ZmM_DjWwEn~)EKAR^CtfPF1yn) zJ@%yrmV`hl>ok$o2OpQK(U{x&_DR-;Jg^m3gdU^X|NbQo4?{T`3ZJ~=9|8~#6-;oiArJ`eNV(_TO zrgkJ+6VnH01;XR?4`HiG0!h+}H#MD5&7wUp`_F37LY2`^5y#Ia_gu^{V{~sqe(Gad zzFh-LPZdEm3_Fz=Ln0Cdgl~2LXQnAD#>ox~j-lK~ccUaf!!h}?R&iZ)!C)zySLiYwAnJm4T0TQzGA#u7|Feq3;|jA_6C@?b z9GFfLVguwLQA$xINEYXm$?o%^LP5|nppsN51MtH-S~m>)gn^>u#QUQPEb+yinM`pa zQN9Wl#h@h0omuK|0)vrRnM#P{7qyrXON>#v$ph83;ww14g(JYyTH;y)t3}N1kxBQk zQ~#s>Us$o&5FGfXYL5JO9K@IJxb69lfabS6RNs|(>XqaXa=4O-e;^p6%%C$N(22ty zpeLJ!!6l*&CsWQ)?MC8o)2brl(9%aX01|?W9gbFLB7?aiN?Gg#V?nVUguqRp=SJ9Y zNs^j3gt2f@`vG1?7tjNPGe+>8m1fHMpEaIvQGOt7;_si6otZ|MFtFL3=AK3>u@A!z zyI+;Hr7cEF$0;JAH{_T`m~%rj#2-WzAMP-Gtgm3*B_TscFWTVd`>@Wv`-47ApQ-yb_z#&xlm0_NrMhRDvMx zqIVo5)Ok%hbh&$k-LzceL0%GCoq>#oPO0juTB^gSD=0OX5uQ|1RMghRXz`{#@o#cL z9L@wOdgA4Hv(k473?XO?uN2{CcKj@bkiym4m1y%nl#E8HP!(j4p+ zJ*8C$P%r`Kf6?l3D=r4cdXXYxn%)fKc(trl8W7*YktJQGFPm-+nUw|{$e~g(M<*sH zvKCrycK$S*3l$KTmys_=iwQdnZ$P8cm5d%f9jzPoVW*XC9au(BrWp=Z2zO0k=%hOQ zgN?yY06<9q;pU*4nW#DMGUiaK;c#^U!c-!~Yzf|DK+^`^)${1Fg|Oe~`#_4}I|0!D7ma$_m1Rgh)Tbotzz_}E_Hs;yPb9Kr3o zviYzS;#9}lXI4Sk+4r$1YpTt^A`=h~Jz9X>ev|c`vFF6e_?}?=Nwu~b;Wn;*Hod!{ z4k`RDVy(Z#m}GV=KY#nM+?;;UH4IYQVAoIts(Z^y<3pIy?fT@)kb=j<8ot(9gasj= zUQlNpsWH@6Og6ASnvTbE6xk10;(Rj!)9~hbx0Er`XH3K;hMLHe#{Hh-cNcQGi*&xy zF%fd(PChGv91+Qu@MEb1)Za&k_2>gqfCvhbxHu3sb8U@deV8r{!1)WV_%)wf7i%*Fw*ER;v8WLFz#A z1nr2Snv13eH<#He|?(e<^w#;J>=4Hi~5l#*LdC?lGMD(dsBT6%ni z>I3{RQj*z^L9w$C+^*B`<^S~y90FpkLZxD7_{-2qf=aR`y30&XEKo`)y}QJ?5(`kE zV%3e&`6+|5vL7SVR;mEM=_t!x2G2|#{kKhX3j6qe(O*o-Su+OWLaL-3HT}hs0J+$aPJtTyn#vV_h1h>K*aCmq(8l z98{c^@>Epj*w;Ium1C6NCu0dyy^8yxo-iTbe!YosmUFA&1F%@Mjet`n?8lKc6dl>0 z`Ce@-I2na-vKn2I7Ht?-$`*{*pPWp{@+CGn1Gi`vBfB(|Ph#9&C4Z{6+37T0};jE#kC{2+kn2K9k}4CUjiH^04a_nw=HPqjh8}!)f#5s$lw(nl%*| z8~AT<_G00f@6y*^mIulCzd}VU{~MH8VK&yFTc?n$WzmS&%Z2a+qhVHt5mVA<05MkG z>?TH>Wp?7W@{+e;^a^-rbQZ%)?c)lhE2kwPQAv_SFex+kbd0V(N*?z3GfmcTdAPfs z+T_g$yVFIaBg%%|32Iu2H|QA?!V$fOT!M}$qKw$5>|n}p8}0g)yPMEXOc{R1@7C=_ z8!V9=CI?AU2{LMf*%8HUwEV}+8Op*x`fMbV3z+%g*+77(%|3aRX!#3FH?b{jzD993UkL}f*nC;fkS){pQY z2=j8w-a;&QdA-+=C5}A-O5QX~z06Xe`9`Mmlc(7yes3s&re7J6#57fd4(qS5;Q~;0 zf!1NLd2nKCalz~{S1zp^nsSb*k?IByJ|7nS8nZRQAZ=lPM*?6Z&cH?PfO31*g8P*|F_4DxZ^c*FK{**5I_cQOJ~#YOA<{ z$x>V;mQYZkXR}%o&nr>w`?sUpPO?lIaRQ*cnyw>2xk>n_T^S6xKJQyV zIZN2rNH?TZD$mxi`B9hHL1vDxuIPEG14u-uuMt%(B%ha+bO(L@(kZtaGuQDFukPBb9W7al)!m2|-C@%5j{jypYwgkGECbFE)7rW-Z5I|*%UXgj)71+w z2wSsf6c4`U_^+CMzJnmm?|E~R=`VAqGj?&%;QDFea2)b+F%h#`5>AYQg9>c9EhU>B zCe8NrcDZp5D7k-RFc@~J%vG~j(p(DSf@p21ha%rZF+&ip8LcT@uSpdGho_RBSpt3!&re&tZFYNbPU z4re8j0T$7W(jHBzs3wd%fgc>%@j*2UVoY$wtfD5yEc}gJ0rL`DYEPtL`iIv7_db!nXz$6=q;?%Fbv4c(V@{ z6Ug89^88p{5#7Lg*(f;X&#ytV%)PXm(Z(abBAL~CCtWXST2E;|*VH;by(9c|;4s@0 z9hgYn^dm#YBCjAS>JypUQAKp4iK4zvJ`!(f(z#~lB%ac=10Ny$s#${AK63~^Jnpt$ z4yguyl#4FojV6Ajgju1r>X3L;$gBsLNJ;e+Ti$mHK$2OFoF1;#%0GfImq7uqTj69h zGadFmhO%QRS~YCRwX!@HY;9gV!QtFZuN*fgsL0~VVLaP1x{L#SzwVV2;=jQ`0;p6# zv{N8pdB#=4R`S#+K8x{6L17{bBM#G;1?+vT<0*K)adzcW6={huzNE#_$(~LvFvjL~ z+Ylu&>qb2VD>3+_B8@uw?vQrvke%}HynZ+AbQh)$-WeG|YF+ncX%tfaqL59tgz3G&{2Q} zASmE2B4G1uYp`@Rcsq>Em&(r04(q{wWqEnK_VuAw<=$OOYna98JX&T|+~1gpipa>5 z%6`RVl+2^9t7|+69=UBZ7>yV+BGn>OKU^reBGz*6OL+Wf@i}@f@1BqeET|G&__&&M zkL>+Ort-Hc8ZP`;Tszv!j;8kW%N0KNC{Nf{hqb1#EN9^!M%Km zR*VAFWz@#^;*+=h+6CqNyQ=j1N8#{N_JAA=~rw9LDV?!w?Hw4fbc~Go5-t2Sv4WEh-&Z_4f5#Ym8@6d)SnfB?1{T4 zsOZ$F=!>(+b-7A?PtT;QV9Da)aSr#YoINR0NWZWVyIJf<bgsP~x+*v!CXTp9!R# z4v>QmD*lOPASHbpqhq6tQB`_(|NmI($NB|rAz)w zco+77SB+(^zf8;e;5#?TA(+2Q4eZKQ#}h_esD;HeeGtn#{DHM7h7Vq0Et{ku<3o#O zQY1M1>OrSB;@;vwH|BLR!;J6@S~HWiY1T>;Q{u2>FcgSON_?rLgKAg_3?yCce?QTecvkEYEcLd zCNiyJUkmuF>;6Hk|bH7z@m%gt9ZTGT(~zTk96o z^187q!?AseH*MhNaCT8Fm0HanmiS$qgd2KrY@mi(9K}Oz_L;-7)Fzf|+m`A?#44Bx zYqSz|fwBkcd2W6b@8b)9gB}oPjm2ZM2DaYQ*3dvt>E7QY65^ear$`kX5`{vfzr+oo z{h&7GD#Yv};zlVOOm>|gAXas2+kc~KDeSOJ5wgEbQp{ClkgUjd5BUN-8EDr(X4{{Z zwmhF7n_EIRR*YLO^HU~A&xCtFeSU-z6Xk&te;)A3)N=UCA$*(uSXD;HxS-yb2jR^P;6{_e7xL z8e*#I=tE0IR0SNarpbW%zHdB)cErcxKP?V+xPF?&0+*Cxqak8Wd)wO4c$A9EK{U$@L^@oMli{DfJ=?$zD4MbhXFlTS#`j-#*oD-0ru?N=6?Nms<;A<)jeD{W?SF9nZ#@FOmcHTN)#LM%iO>+Yv@550Yl{i* zA;^5K2&XD&S6?R5!$G~~B)o*6Pl7p0Np3n>H`qp2k#uV_DGM?R-`I{_4*Ku(hWckECOA`#9&p~q(`Z1X=f z6!!paLpOr_*C*&ZbR?{MUsjyLjZ>~%Wv}xXpa|_$`SHE%+rfV(YkmK++#hK7W>F+Ye0NV6;m(KrV&ctD+2vu9G*c|row#Dz zpdu&JIN1oPVx_^?xx!{i#>{AuCX4M{fZyn`7&#w&Zx1hXAYhz5F0ktQu`$-yY|&-7 z{ZYGpbJb#Uy?5xsCBI=&#!xC3x4!@-sY9uW3Kw^XHk6hYT3ET>zb`d-?fF|xS3^FQ^*({;a3=cn^#fcIi|lpK=FD`p|R-ZpZI3g z&w`m)Nbn-B_fz0`E$#E91m93%bN@i{qfKHsXHI&1d)!g;fzR?8g^YY)7!q!WhW(C98d{jN&Wcg%MA^N}kHFUnkLla$olA$y)S27PR3eV!be-}w{K zDq*R44qo1}nIj+hTMCnDPZ+Ox@mf)-MmT3F(Ma3A^O9h=Lp9_V$}&=sRMQca)$yot zn|ymqEPsUT&@wq^E>(?KsmUlQcW_HVe^T&ZQ)NfkLdVQWf%RekM^;oOSI2Y&xV1?1gsa2tZ!0>Ql2@=vcH%f*n#HlXGwmG_o5q!Sa}^aUTcR& zqe^^ht|Q~fD-gUP&{Bi`@hC!TxzD%S&r~5 z8Vp|M-3qOrh4bay-_7q2z$i?L`Et1qY_>ptJkYlN?0pHP~DCyjd^YR2|4A|d8sOmo;|D>2? zh8X{@0rdxIUX3tTi_X<*Fs&>T4(XsSEJ+_>H$@611Dqlduvaxwv?(O3wl!r4a%aPl zYJreVhmD-N#dpmj_ocrGi7|7xVbd1sa|P* z4XSQCW`$SQ_GC@@-!x(^8H}XV>h*4%7^lC5 zX;L1@+6UXULT^-AU}D#m}|Twg7&4zClWzNd0YAB;$D6VVsy zhj)-O`S6`~JDr7}$tV-hBjNU4)GLY)KxRWi`)o#2*-pA0qzk87PCwWDS{ zTHABkPF6Zu{7cq0*+1&Xy`oxmO6S5j5H`b{QGWzKekNzj9+^xHfQ0H z@7PqlaV@0_qd(DY3v&{)R23$FRboSO(DAK;`K^H7{nPdlrt75R z@Ggns9w+Z%u5_1X8{PGracGXOZ@_^@{`Ns-?JnEPHXFIxvGe3!G=y2tqmV;+WbbqI z90mz>2T#e%E)_S(aYHwwvhI`YF9GVY0!$P?UQC&mcqvO}V9w%dgBC$b)L52Uf%*UT0>I{2w6zz3>nN>%Z9buT45}nvF*kR}DygQ{-gY-!$A7v!pcMNlrW?y|eKH1vS;Q!PTQgracRLxz)U;U=@ zqnMs{NSAiKGY+|cja`3@2-5F)1C8SMI~?F)$L@7)fbf*>E|*F>93ypAb#;AvyPSME%inxUPwvKuuGjVJMdPV| z*wYJdCm)49pj_K(CHnXq9SL3hUAzFCRlaM_9DyKsEHar|O$pL>Ja zdVF?Qq1_!F@H86mXB9j(Av65$lZ|iP2T<4TM+nU2y)P8JGAMpK`o^lj`H#R_|Le;8 zOVQWMh~f)kbJ`NuUKv7VBOd<>Xex#4pFf(8m6=J);r!VHzDV{l^BCndNnV3TA6s{* zHGOUh)Tz}7k3B()HS||YLYj^3`@dtIoFKcCUXsK0U6r5fKGkuPu-o8`T48)beanq& zb_esKD6tp0t>7T>%#l`02fM+}cDcq?s`=8bkbg?D=ZX}8SEc~7ARv*fZ~je;jkQ7PjR>_enB`DEg-wzPws`@i6(U6pzUwBI0Vp4`1AAk+wCO&sJ@NGyWkn@@})R1Mn|z;r{Y^ng8nhcICVAw_EvlPS5>>W8T80 z^@=t3Hrc5svWovjiq{BN-g&{A4E4b2^ZMy|cJF=m?V*mx&CO{SswdyH5n?J1^e$$&l657+BNJw5tN2{!lj)=_p?}@) z^Yk;2-=Sj~zu$?SugZ1JSH#)SQ!x;Ad3my~G=ma=D`wTORNH}ORAbzKkeJx_4P&gN z&UKw+CU&S?SbHwk%kXmNb?fgJ#ekfK3;gw8wZIZjq%??Yyb+UTm@0wv6xuJFf=nsz zsz&Y%@Zz5s->uyQ@Y9gm&!QnJ4?Yy*aWA2E+jg$o2AQ<7ARB+CTj#vnJn$ zEaNBXbu=K?z?edPVizdRwxLY8C*s|P+N z;Qju=yE4C^!1nR$>r+17C?D_rcxi9HjEFnw>&^rdGEZs$<8MmDw{xig?8#N)$JWgWoRr!2g z>G9BF555ClMDT`iTKP(_#@YILQ)%v~%Ij51S@!pO2)O5_8RLy>q8LBTBFQ9tnhPPw zN-|?X+GW2+XZgDdegLjmw}HWvzabWRUC_nfuX74_o!RTF6V+=*t1h)1XSw#MM58mV zBGB{|%s1{sa8t8o6?_MmtI`=SH3>O+nNpezOL%LZiP#Ezv3K-YvEh@-ov<|UIc|PB zsC_vIxhV4fw+q*JbjJ{NZ6ed*HE0&6zSyL-2fU9ZolP&zg*lmwsEor@43)OqP&fch|E*~WKG7#z)e^?2MK zt5KxrN&V`r6GrN+g6f(E%u%iD${)#aeos7jo5_po1U*C1_ut!AIR&B<@OG+Np%}1NAy)Q9t)O^K2G-%R zsU6=u{6dnAVW!@!AlF2cERl_x$+HL+Q0$cA%OYGOB*1Oa8FJaU+{^`EJqkZOlXrfS z5xS}G^jerga_GKB$bb05;m~1|yW)5{(?WAo^R5#~nktyV|(YxpQzU30X7U-3lyb-G!z2Lo}={> zXrkK(-_HX4B0~;6_m&PF``>;spYtgVrfD1fKr_A@K)#Oh-!J)kVf=a+@b@<0_4civ zMobgZA87ngV!VyW<|*#aF-1gG3WPDfY5014B|M(jZ!m-fMoqWKqpxJklb#r@AmyRrm`>NWUCjy`xSTuW zy+0`wTw)xK>UU`hIWJz`UFBi9nAvrA=8kkT zCwSE6{qE61=39tT8hpC>NNo7P&q{9{hObwgxf6}+{L%!afey&Hcen`O!k+bG`JXkl z=*MulycILo??(7U3bF?`zW%Gey;TF4JWsv$6^ws=+_+aZdVN@gPG_)zlw%^#28enR zS!%nIzu73?KfYn+8*9Pi+wcYZ2lh^(wT;v)TNl#J^XM(=$j-H{Dd#9MEx5yD%7xQ~ zP!b*^ibVa)=XVnhl#q7t2q((9DM1x`Snp(e16Qzo_`s zFVGa8&9od(Cc3KOzabUdrsK=5(;$!pJba#HB}5J$#Nf+W5GMj{PB{8sO0;x_ zgX2XXAfjeMkm1DOicY=hW2>$zsK3=sd;zz)8-5O zf~k|4NT<6GKMfW5TVsl@$|UEAB2UeG`jpLkb~61Z+qa`xJU1TKM~Qmxq>P@ni7r>6 zB46KXpXc|*Ni6XRN2oLP=A+8@l=fNS|Ga|X%^9)wk5@ZA95LhGsxz=Nr?*~j5r!gQ ztEnQMZ`BEY=bT@3dV_`EmYRo)dwhCY3?LhQM;z&~9>wC_wpFH&T^OD*h#N_AZD{Ul zJ8qTqdm&Ku!>^B4X)`ZW`feuS^&O=kIu7j&o`I++r_m$a z(5%)U;8$aRl&`OU0IdXddyz5gk>N8GnzA$n>BDv10{oQ z$DUOyhNUY^BYp3~yCD!LSt|K_C}Z)T>2_6{klRvG$>-Cj<{0K;Xnn%(;W1-poTPZJ1=V ziCPBP?}9aSS6ctDw7c3oQ^Cw66q$y%kMS#_3s%D;w&3h*7 zsdmcPbT!Z9Ig~RONnJe);IkNhCr;!_#50!rF3x?b{*Y|_BQThd?r)E9XdGUXO#Ezk z`Vzu`ue7b@kbC^Mx~1>zZ!84}oML?Ijrdn1DFwor#~Qf*wz!A66Jg`UnkOH%(t|9? z%t}Q@I$h6e=PxYyNU1r!fyP^Ng@$icuD+?CiDi z_M|_{$l^EDkt}b}XBoQI{V}#XW43L`If2PYd9C|->q%@oy7NhoNc6&e|6ViRQ^ zJsattCTi(qqPZI~RW2*A9pq@K3J|9(L&(AgfhKcZJACqQeZ2Fms@XOzgQ6X(EA1XLEw9v5KAO(*vMHh(b4C{E_!sK9@J3m=MsCG zVe2?t3~AvK44DYAqC-n!HUy$WDlM3^NDoF7YZ+Qny9#AGq}X88WL9w?HmeBnld3rq zBJ{)o$dzzYn%`KFZJ#v}%J;lVzJAu2Qic9W_tb1gJBJ_-xlI;LM
    c$K{sGFXS-rHvnEWz$W$U*$ZZPv|*AaG8Dk_f8Fu1SKf5 zotG#l-}%*uk{C3N09#EE1xLaCvtmiyurznrhq(Q|EHD{{Y7>=W%`8$(u&wTg?1*MF z+zhdC5-k#n+nNcsQ&9P8eO;rM!Rev(f$isXzV3Tpr00mX=T}39<`iyD3ERz&k3F%U z4rHPIgJo((*|8)7W0BJ~E!L6rZGp|GA34N(TYoy8W82uONvozsRW&QA@>1YYm~Zb~ zA;Zqgg5ZGY%D^UeV0sDk#VMF(S`)raH8T|C^6Zm*`=&*KXqfMVsOQLdTsrO){ zrAMa7rL-EWgO&RVP~5aRLcJ$p?0FF4V3QShY!mm%WSNd~W)+opN1t<~5UL4|EeBq& zz|i`ZC+v-9@2~EVusymLztiI-!Rns!yKWl^9mteY6uD``T0e6PVN`PxFO>oqX>2Sb zlPP)5EI4LCum|KDrA=aflyOIlM^Vb)q$%$V&Gj&g{k>rJo_fm5nIf~A;;`+_N?R2# zeNJ_CbWAKQErlBL=sMO;P<-fB2sS-C96(0T2ioAV_pM* z-Ob3zAWo>UifWwsaNxQYIzB#LS6A2f^XJd>k>KX^&eI+r)-efJVU)rlZNzP|eu1vr zJEO0*76dCeir^D|R@_l81rc@&Lqe!uNzP{=JXT4oCL^yt?F5*l)KR`yUec_bC( z0isLyR*Jr{Q3(+LZ&UQ_PE{phpf_lwRg>os6&1ai29CaKM#;>`FmZ5j80)k%M}HHa z@Zo7|BsM7Q60u*KlH53LQ|RwDHF4NvHiY%BrHmT@RNoRL2aayW2qyAfpv^ql`a!{&)Mhn>Ghj$4v5GXp!m0*Mz#aYB- z5>C~Euz@{*P}M>$F$J**7@mw+!}+B4Ib1`_Iazk0&^_)Qf?W5GYW!~XXl`NeI$_pu zq5D7LFOQ`!A72k&Z$x@-Hw~9)*M_2Laz|*qO{MeE#r_IUt+1~61il5Z$>Db0>YH(Q z-x~Oy5AQfMIScL3e`cAQEpI}Z(c=;ooqE3A=q%p(2*A= zoK@-nl*1STPTr*~VJ#}^jDf|4OiOy@V%yf0`limQsrirC0+=a7_v&;I0pyk9c2S=p z02$7BYJ#5()?{@fo83dpvQZ+_dZaBWQsb97|N3X1CN`cXcJEeDryCYHKYkqRzFi-C z%cL~RasKhD5?;G1BZvJ?qdq5J&R9yLszoOX-cKuwLp4s1QhHO8nDl=_@n^QrVXhbumXVyET+`kmwpw0kE!&^gB@8@mLb$k>;|W7T;y zeJQ3oZzog4%JY=wEs=kY*(DLhZeS=GJo+t8r-wnkR|Q81U7K&3xumrro<+<}-L+Ff zH9W1E>PISK_4(vZcM)sY{EMLI{RoQtiS_y1>RuEE6m+G&Kms8^?Jpw{iqaKFw%G0FhyeP$xIXhyDG8j^VbvJjd?qvngZeH+Ow6rDhV_ zuNTwoe)(c&_-HR0XN>Rmr@0W(@uy}4?J`XdMOq0e6mXoR0Cxp;o{X*RE1T_EXrb^P8+cU78reX z3!9^vjj zFoy8v{6$d@k#;<0lo7*;hk36g92GU1EuPKbao9NV82FW4A^~+EB~4xd^&w zLX!g%CO(;On*+@^JzWNg8~^r~Hd}gmdHK$aIFHzaMo5c}4J#?hPZfgMP}YZQe#2-v zl4+-9RMlrr5=A`^z!vJP^?6RzZH!_FCo^AsLQdn>Jxx#4SF}ahF83{P37TI|g!UFp zctMqiml@q$Y$1}N!%{M;;d8-ML#~auO&7#Cso?B|py17}0wHTS*|3>B)81h0I{6}G z?%+@WvH91If|3FGKI;VfMB=#lXi2+N-m03gQ+PE7->vzBy4gvbvh(lyT&!sRPg1X8 z6h`?aN9k{Htq{sA^Bcah$H+eg?}AXa-?%agCT0x8n-v(V%y2U2_~lGCc~ zM1*>D1iANKm6d)=<23Me(G$<*i`knD#&*OcmEX-=r{GWxd3Rd}P$QtIxu7#Hewt>6 z&1T2-i@%Nwg1NhfUFIdOxrWdxEcdxCdz$ZKpplS;q@X5jN|oh65=Fimhy*o16z4Tw z=J`DeGtU=~q*RF6!hM=&vMfIf@jpr*p~2OaH0~VKDXBvWf5xpwfWV`wIQ6*8I*UD# zMB_55@yB9X1EsytG0h1S6dsD0`)JAP*uUoXd_f;`w^Hb;XcD&E| zV^imdvrB%Z#$O}gG5w#4fWA?Qot?<-@}-jCw|oItCTD730S?Yj=EN2n5Yl`ZN05tbHXiTT0#^IEH+J-5unTthXI>`8>bsX1G#rt~le z+<{HOa3CPK6SQNgG{W1zw7Lq^JA`z+qXt#Z3x0rthY7TuR_t5tfAg3-J10Fq-hxls zq46>cT<9$czJF-OCnm@iPgZ6m?|gc1eM%)uH}^YGP^jwK+S>lsXMWq)o#;Ro3Q9UD zsI|X<>)DfWFPo9}UAlTlh|d^D>Ixgj#$rIw7i_be(e$H$ArdhyI2Bc;;O!Aq=FFV= z@yQnDi$)A&OwA+FKR>cTmIx5T2_VRlfH_suA}kVAuBzcg#fu6njiwyL0c%&Jh*dNr z6B!^gf>SY&)Q)S%N|K<4!%RmqJJi@fvUjcGZxt}Nl|qAV7FtAnLG$&MxOU~7&^;V+ z)>3NW%XTM<=^o=4DoXI_ts1Y4SD5@2{hlWM@O9^%Z6qpF2Vtg+d~hm)T>a1Zs05C4 z=LeY4#jahRe?{_FCQ4%1ic*^Wv@`sGdo zHAeW*EQWIn#F^9un>b2;WN2lmpqS85T_lcp( zeF0nmjE>YP$*7emb;6g!HgY*5ac`&_$lCsT+#}FsNv}%%oD9K7fuZqqUagsm#31t? z!oRPuyr@rRoO^!Zc1_l>?kO}BB!%+~&O*N7C~Ji!Rl=n*51VukDgat}L|Tl+P#aX2 zb(U-Hl5NFycC2aU1{un5;j)Cu|0@@W-JLr(W`0QAhllK5US2|OifxQXhlkN^ja?DK z1}f*@c;4m!{a+8s$wJGp_0?5T#t-Md8NB)GdC6mR0r3?TtiJ-Cw)*w;U*CMoDk^Zc z#%kuX({uYSwIwFwe($Z88``3s#Oq%=+{n0tDc5$Q^?L=&B(yzSQjSvYFw&Vxp~pEl z!50BJpVl*9nJ=IrrXwyvMz{sl#%bW6yi`g^rifh9E}D0Y=Jb#8`tdz^uIo~?YPP+} zM?XWfs+vQy73d-$CzmY7>Cba=9aq^~pX-#l3voJRFty7tN%|E_9y%Bj54?-D+u9{F z{3n=|f8p^)=nb>1rKiH%rG?j+1*|cASh2_##bQ!%^bIbkw#10zGzxi$yaRB6Q{VZu z;b^OwIraEL&tekyrGUbJcx-MI`*!$5v@V~UPef#_BFjA1)71&QX(A0MB6L6_zUc+1 zGaqWw`DtCC9sX)%>f;lMJa2D4n)bb78XO)T&g^|XBPcu=Hx|bnf0w~pS=;M#Z5hgc zXqQzVL*!_&#}Uorl#@GX?Hs18>s&}WtJg62-R|NY7Nc;2=>CMK2J%4ULns>B&-eZ1 zn^B*!mLtGvaxp)-L0}pSr-XsIz#nHN%M6P+`y8pmU-E|*xyn3Cg8Rikt3?dl#&R`k zIW8LG8#A>Dl&tA-aT~IG6JRkm1^jXSxBu$a#r)qOq5JM4wL=buyre9{#B*C?)Z{j* zY8JHGYE0JB4`lki#FVuC>z@Tz+d@6j&h)vWu~L5PGesO=2Aj-?RiT}~X8K}7@}HEB zXxXHp`v>wlnxZw{?bXSi698Rbe@WPD+l~M!3_S8q&u=%y_kjtBYEE9B1g?)9(ePea zTnOKFxAN$294Ze=1`FI)7z0^$-C4z0j3VEp{^KPoY5){*8&Xrhh|24shyq2hh3y**k>n4-5%@yp zORjw9wR}i!2OT|I1e-++?NUv{ctX{hSKXhx4)Iz=ZqAuYDwN*(u%_^*gdTH|FXDNq zBZxv@Nm>rDVa~-CqjEH73$h+m9~}-W&ip{4dS@#WFoEj_?0gM(Yr}|O@#RC&sQVwM z?5+NZuzN)?qsWgivCTDL(VCw9a|q`i<5v3 zoV`h03QExd(-P4h2!eVGnIe2Aik>5pU)lleQ($?Y+WPwMO8@jz>9f%1LiykdKpJwl zU38;bca!70Ej%D2V~_caW~+*2*@`5bLhg*cL`k)i>r+E6!W??~^pS~~0b8WM zX>D6E8#$t5mVFHw!96H0j*RC!=MhtXoG?^4+c3+>HkVK}GVMxm0IA00=Ju9__F8JQCfxPh5U>%-0Im-o;e@{=FzCpHn$BIfd{rG4ywv|2AcJ z3PBx9x61f3moh&5RmmC-S(;NAg)7?8I3OK`U7bEnef`Sv-skjt&&Bj@Xd}R*ML*)Q zwGG{lA3p&y)xh((hp=)&#cc;{;vLjdR5*fmQU0R17+)Hz0XzVQ5<4BqZ3tWSyUiK>R~yp#YYc>@3GPl}DYc76ng z>y5vt`_HL+ZZ}WAn{NMIhAdI=CoVXVqMqf<1V-JS_P;fC_<_sa6#@wM&gyzPnfom& zFkFPzSB;V@OOTjg(}w``C+!U``=zICI31+&u>#3=Z^t~*y7gj%tsOE^Z~npAjymozpwyYFBt3d~IC%9Tcb0=Enk<(c8B zU^if;4~0#9PGy~SwZQ{2z`ijsVd|4mgUop(#whi>!CXwupY$a8wF8dMi2&G?842%h z!O)U&^!HsJ(R13m-TLFo83~Wy-5?C4(!*A9T)yTF@49(a@6I8t8rcHP0Z)O>+u4*> z)l|#8@7|?TxCpZr6<;Z3X|0Go2Fo!D(Zaf}zdvdng!4>Y`&UeKGg(jlSKb3M!?7Te zhlLmSx>u*$nz@yqgKtmn26^sKMYepIfffiY+W0w}&BpmKsiynIl!#lLOC2&6z|7y6 z_It8LTImHSo`7^eo}fox46Ds|W2F}21H%UDHPt?th~eP50o&{Ldq z_-4fX6;WDHVPh~`eocghu9KEbtrS12^xYDqx*6Y?# zA&Lr-BXb3Z{91-#{+Onl{Lne73a%3Hmv@m*N=uvWb_?*B{#*IYTUlC$cJ`jQY@miZ z_p?C#S z-zI~F=xs$uiY`h$=!+FIV$Na;tC-Tib`b5)$qUQKOKn;HrVjJhwM7XeQL$8Y4*aUD=Q1=8C^mkMg zkU}RkCdRNrin?<(VNa8Hr8ynGRz+niN0tf0*1XzYG(wIt;vFmBv)5)G8l>VE~kAxWh+z()vB9fQRee){eu?|1G zAjEgX7at$Dr!fkRrb6THQDc80*T)XlziV*8YhZf+0RWAM=jZ47qzZ{pA2kkR(bAVC z^y9zE9aJ)HfhG&>e+biyg43^@uWly0tX+GsZ#2umH~_rC1{4~F&m}bCKYZWVx)LTQ zC!aQ-RZ34n2-!Qgyo4V6>YJ<-tc2mBXftrMq{~L^lu5+SYX^^EjExtqf!)JVpQ8>a z+LL`#uAiO7@AF>yN7})s_uDV%H|Nu}+j=>*HhclnOi6tfWzd)4gHW=}KSRz{Q}S|f z``p7!#u>62ViSZNvPP-xsUSi)yBHFiYNzw|H#X)hs!GNuLDF?t3S6NOSGOSE08@Yz zb4Anlcn)Pi@qV0(�cjhuN)c`88)VklSqCO#4V5Q6xHBJb(92Y|mvnRPs#F*0ry3ukM)oujBg1SNi|`w)Wt<4=y5ndp#7mgGSHrvBkc}pe)oE zK#$H7eR{(G^qvNd>7NJFRBf}NTR1o%@?5(SJb}Bof>a4EE*33Zdw-7_UmTs*Djn-0 zLgvcSVUbh=Za*uL9eQT zARivj)7~VKM0l*|GOT+n$_`GnT;i7^f)K6) zb(Jrp)&B|ITW5T05G<5$u7LfAIh`GC($eooNG>}!_pc^pbBf-ak{`di?!Vto51*{g z!MP!vlLtL6NA>`eBG&ywX-aT(d*%0C@Twb@7M1o+ZR`$GJwO#ZxV6>)v1tI`i#W3C zISo49K!0nBVz9h!@15t@;k#68K4P0?_U7}<$)nh$=-*|P-{qWQC#zVhEhSLC?HdMv zGxXiq*ct^Qn8BOl)z^CAE~i_)p93$Z0AgP1t6!gONC6A>wAdr)Qba-?Xzv*>%=q?F z@ok%J^3|pKIQ_p}dZ2mMcbQEy1FWjI^z{H?i`%wz?$q}V@|FK;RTdW)jT5O5w5=-I z2)Ve%so&^)%?5LRN!mCrf`tcQ(jGbEa9laHT=_l-IMBVzQ7Ki7sy<>VLz$8#)X=2oSiki2q)8-U=(IenFjV{O!ykh4 zYqX|TA=#?>2?2F0G8itx;FAx3W|W)>g7G>;Xm*}bmII6R@PkM7kGb@8+m$j>9pUQn zv~eTxDlJ=!F+Gi;F#X&`%ocf>b;+AEj;T@10PcLNprEgWLD}Am{wxlcpUV~w&mK|O zo_(p)8_jP>Xtk5sQDv5(`c2-(Lw;VvzgJk{AxmL!#c!%CqVi+wiq`mx%7^20Fr=Zp z<0XZ{tah`8j>DST$6I2!WU14GUY_UQd+E1V_HReoU))UCn+?Hol8DdGyH|PvNZ>2; zp3t!xYU0Bg;X{*4UX`^au8@$>?A&#e=%} zgRUW6zjFe-@9$EO7o>w*|zY(tgD{J{M)sBY=u}UzL=R&ct!sel8g%%h*p;bjc z9%BsTqo}LTyf}H-_Yo? z9ZSH~@h*_h?zMd6etfJwJX*L92tNC42LZUltKHD2bUVAz^udz^&Jw%5RhzCCyM%8= z$t{xh%Eu)F1je z;XtK6&aavYj*ks-MMu_af6}11lIj@HOXU?17?57(GfUz@yPuzvpZg<4z6d(fX)a5f z=f$S(#mU2Z|IFm;=w1e5IZ(DbYydoV*Jj%*GC)e`%-e5&jW#oM>${ma(rsP;tuoK` z3*fl0((iWXOP{!^MT0UppRf6EVeI-t?L$b-G6)>a8P!Z+fGUg2rb-}?Kti!L7B5VT zBZC|IMBLFtyfiP7k6hPO%auR9-q6o{6*tIR$!T2tg=N*imqp&F5(43oOZpmA>zh}*r_a!--dX-hWJ{vQyE}P%OYX!eWqz|~ zI6F?QiK4uLl3JEt>zOFge?_4wy=EKSFqC#H?0Y%Dc+WcWPM7x0l*X(Bq~%kxQ8=v0 zfLEnV8%{QBC3ZF((jo9taJ%Fn%j;KuNf^}~%fA4QH5qVKm@;z0 zmUT4F&vlUcGP81)$TfAH^>A&!ge{ z4n3|j$jc!1{?o@Ev+YGi1+0K*F2yr6nkp@obD+YB{?q?(j~6|p+o0=*PUQxa99wta zMUvcBA${0>V-i~fKxpzbABHtb&Nv8C2~y$zl6*TkQ>g0f?0oi^;!(SHSqAPEc4WgQ zXcG*G!QDH2!YH-pB%vtcs88sjY7)y_I`iY$`m)QeyY+H$TA6Z~)SRnoAJWy-gb(Xo zE!x>yscJ#*^nrRU#h6VTe|lR;!DRXu;ls-PUFXAI9B^?m`@UMzk<~oursI>->4&}8 zz6;)@yel}R+X;P|1^JuU>mxnU*9&;BO+xK+bKZo%kpub?Q+f+ZpsB@K-b^()=E$hI zy5nsbLfZHHdBp`vMdXD4i>bwn!_AEwvh_lpEVPXjQI z+?SFQ#WGO7P94kRIjJs&MI)TN6Ux~ zWywORu^*bNk=pKY z2^;yl7o9EX!`-6MK>i*x{uewWnJssD^yahAJ_q6F3s!Ewa}D!fEmAMtcezcbVC9>M z$&wPEl4vt)Ur)ErQG0Cn-rjtE@Qq-n=dj4;Upj6IXi!ddN5(+G-%fn^@Y4=0DzyC@oo+$?&5h)n zinQi#gVZXFf*Oz$DT^;^Y)Hwh_8L03z4xXb$<8~xR^oAPvw@GnUo-B2 zmkPK}+24iutQd+3YqBa21`%%0h>)M!`Jgm#@Ny)#U95__FeoqhhKOfM!b$5f6+&C6 zo@2T=+jJ^e)+s>9LUScgMhopcr-1|kZA>6|?(OZy_H?cJZ;s%j&fvI!(_WvAcIGjD z*s{+5gyCbjcfi0?tq@|32k77WC-6l+^9EP`ON%5Rj-TsNDbWklp(}x|fnFSkYUgoj zpE0g~pa}QXTy||Di4xRjD9R~b*=NbO3$5N{RS*O}+4f9SKaWV3G4PN6QbWDe2CLM*`rzrhrInpFWe zAPM$aHU{(AQKB$=Camu2S(0nUr7y)NC0NJH>QUigc2<;nWNErC{M1eGU&7@qmszU$ z5#kd6;AvCkUHI+p+rHc0CeNL*-kGmiYKPtH5u!~cD}SP#f-cbwhwymn_kj{wtIZ2@Y&R;YCqo+CWqVS25HMOC(k=m(9y6X)NUX44Tgu3NZC6D4ao(zWX5o{dZtlsrXf;caVxXS<7$* z>j`IArr`L5-#h%^4>m zJ9*-g+VrMawZovfDyIp+5!K+)v#wu_TtQD~u+!5h{8o@#!bUzO}yiQ<(XKZt1hs--F

    g{b^wJkBg0**lZJ*xJh!mSFIDO3o1);wnB7^wIFEP4eESEu2pUr4EaJGDX0p_ zhZ4xBJnO3;90a?A6E0=SxVwu}(Y3WAab}fxjS8GUb_PCH!)+LeGfPRo^v+w4y~OX< z8}F(9qwS9Czs3b>of+*+AQ_?Q6qLb2*{3niJpbv_pC{6Ls6NAy#G81|_I2&aUW_eZ zd`JCQpKHV{E;-=SDE@XrN#`Rf-ENzw$`f`95KB@a!J@MT$#S+W#XRc?clOUs7$fAL zb4*kJo*svGH>+y{>8%JC%v*?HDdQ{BIp#6i_K7+4^hqu!uLp5sDc`qy6(+-Vj?Quh z0xV0dR~glNaIvUPL&c?>+u7!*oUH0ANeO{TLbw3{R`6lgmOX1X;LV{fUl}F~9S&$2 zPEMbF8<^2XRJl)Go`WoK2Z{SeX*SFqVB&>3^4e(HQC_XUeR99*;!hH-iY``A;t{Bl zNaj%3tfl;{Q#20#9}6(afWJxv*Px_Al)jg?iJ9G68w^l5q(RN*MTJQU*$1H~(+COP z5c&Q>cuU{@@oDue8Ci4=HxOW);ny0pFq%CfFgAI)*BqVYcN_{BIStmCXnUdEHmJC99O-6RmK~z^pX5Dc=K$xz~0e5JVP$X zfc}L{z?fhHLYNyJ$zW994BN#Zu})d{&;)S9FKCl6cU93k(&_}ZIqZR_FSU&Fi1Pfr zFB62%Ye{%jl+-Nt<1KGw6n^6U@=fBsRQsBHHH~=?LXIZRu!%C_kyT#ju1}8{+A(#e zlcEuji(pj6$3LAtp%azk%&XiuvD6QB`95x?EW9FPKPSZM#3^o^wj>u6YgK^Q#3y-9 zp}nfD@>ePUO3*uBoyV#8#}J_5DOtnhpOLQW;0y%GuI zlptcssNUD|;@hXGz+BhHI5DeynDPj-x0j8cW2{bWzv&%!fABxfGE*&PMwjm~26JQG zKgS7GEY*%H3A?%yIl|{N88|YD54=b#0XE@vK4|B#xn61 zM;K^xfA)WNt+&(G(h~OPUlaULnEhBc1;2UO1DKzCK0W~3?+)<0zuwIRB|bVVrrW-Q zm?c*p3jC-~=sV3H3h2N2_pJ9o;cij$9@OYJ_4l!#6XxneNc%h*aUo}&tUQr_1%IqY1A9-nySZ9vJvUd&{qCI4ow zRGWm=HeQFqMX9Y+s^aW+1x#;NmBX=)OWV=`J3=u*x5nQbVO{hO^(Gq{z~ny@Ol4DO zhl+1njok^}k1!7X7SdxQq%XI%2XnDihO_PKB) zNNSIAV<*g!W>ipXS>eSlk?s-)pAr!pIXQ*v<-s#2HSSN^1RRs!!Ytp?D&zYrh(_?f z-}6gh%~hF*#y*NB7h65yYj#TP*({#-CRL~&E0{-*;F|M(!>;45vZCP0L`;E{K?=e0 z)cG7}(Q6%RsSt6o9I8=IMX;P$pFd0PDB(*cn50r&w)MZBW3XLfdVGwIDC+yL7=ry2d`38WX&04cf6sI(FSE{Lm zGctYW29w<9E(&Mlk|bk<-jE8$DwzpG@}~5n$GUcTUOc?GE-<`DufKV>`lVFghL%)U zMBhP;s=}WalMf;*NrJboBQhqh)tDiD_?Re*1|bMp71JJQ$FKur9X{}Tyf|*AM53tdC&69gzd~=v zFVPWd#regPA)GWizIv=(vWQ01-VA+Vx`2GdVF4b_*q5>-Yn%}o)NV25);XpVwU)il zxb6gXEZ4=xVr_}^zCh+eiYrilLV%))g<7If;1p@V{Lnp~il{c}xpT)T6i!Jf$CNOD zM94I^$Tojw##3nv3q&fJ&QCCNVDdqJI5^aVY2;p)d?NMY;w6Ti~ z^gs|s=&WA=ps7InTlY8E4Vq(_Uk0GxkQaTDUjh8B0hScf)qqj_TYl&(w!1L zShc!kgzSx+>n%1Qp$EDr3RvRHsIZy+5TgEV6}D`(eP&*UABHLgXPYmf3cMyR>nO0| zdRVMjH4!gtoMdQy9Vbch{E4q(0FrFsicO24`_j-5h=jqjNj6pz6#`NNO`W6D2;qHm zUzFGo7u#e=z7r14&~JR&mH4xQ9@VQ%V1agX)&{y zBqG%9`=36Y3W1N}F7{!&b}-HrV5s^0%O0?b=MX*p z3lq&)bMLMBAjrJ5^WHxn*sMmTC0j=9&_|7YRPlcD~`S>hm=OLk(IDSqEwhM%gTfZPj`V zF@sOft+x)rRr&!h<5@wz#WyRMnk3KhAsO})rpl~ZLnXeF9^ zCp&w@BHr!qWyt2yXlJrGB(LYIiNNiq#;KBk8$3RAPjr+o@pXQ#_TN-lAqyk?vp$xH zSRxf9G6^Sf>qpt;3c|L%DM5Tg8cFns5_kWeFYkhJ{jI}CYZ2(*B}ZV)f_XTrF1ht zM*7oa+H@Oy`qSORae-extF7N@|EZ4t{^g;qznw+CC5%E=ff-Cf`k9287a@zy5OyS= zU|3cMJx0cvjb5-fgxJzF{Br+iZJx|h@LCKIF=Lh242drNRf;~d#%Hu0|0zoRi zW1Ghg?yqUlJwg|zQ;pez&(I-U6_qGMZrinA74a^8MWgeUne^8-b*MQI8@WiD)hJjw z2}x6*;*6Uu{6<&Fgaon(_i8=tCMwz8$S0U;yLl1TjcSy;@P&%9JiDm>KA z+~iPuX{G+m&GY4!vgaBpfTmiX~RIzJ_x0Uc@qhwf;WqOJuRL&w(@LkPm}0 zbz9Y_tZb@XF$q!i@dt zC=Sn~yQ}YN+$hXy4Fcyjm48fC3$3WjZ7s;Ee)$K#gesGzx{3+NIpyh zbGADagq`Ean@o`WLl8#B#Pm8oCMISzZ#Tcpi?yIduJKrBoup?Zmtgw4(h&vy`|whM zfe@20L8@;;+&pN;f;+0ulx$pIFVAfX4-DV`VeSEvRXazk*omnje<1a7Y5#M+ZE7CQ^kpSKT)fEcI;RoP`mpBoRVN zkTYm$#Of9uk(N3C>zVMb(sD8JFkKRrkBdQhV0M3{nsIjalpNS|Q;l_JjQ;Tf6ySZy z!4sNLdA#&3iut5e-Fm_gDUyp#ActDGYmTLP4oll&AaOrN-f8`eeSJd)Yu?`c(hY0J zBlHoyM1jQb?ocNOTpn7eU&JNLeCuIoz=1P$)u;ED;(Ku2FY3x2brb)dHjHLPx!UK? zp&6t;r-ZTKq(Jn@e^DtgrR2!4kW}HU+i@y8MWoDC?Y7AyK{Zt1oK#kI{w0PCh(qcq zkIjFNNPt9aX|c8VSLrIlt7Oqh0D-_%Q`UC24ZLWCk4LECG1ZM`k>^pDqZ!sDf(SM^ zQ_N@A#HUw-{)C^mWpb79CPoJb@r>AH$qLEv@SGtSC@JLa<>0K^2sQ_NeqKSfR3YV7 zu4G1bj(G~bq2xV^Tq;h;K%|OB0i>tX*ScvF_^&JS9rrHiw#3TMR!gG~y@;1n*s!58 z=HxbLl>AQfXCzXhA!+0Se4(x!z`1bXq8wRA`m|x`A56P+TI=#(t6vQroCMGhdifu~ z!pqk__|1F`C!7y}qsppS+fZ(rQ+deYqTYZTry-OGzh&zfZMgCbZa0pD>qO{?a&NH zaaf(w^nt7QQt45a9E-dFyKeYNGAFn`W`x||XxG3<+Kl8Amo8GgocpO;Wh znf+?uOz&6gJ-j3?SzG+rLEa8O&Jp>i9&*f17WEZ|nZ7kz$||{bED3hJ%~!G`a+q=s zyWszz(&MX>{7;U-wG7 z6qJ-qLW_hcQSElW# ztTcP3!f6B@s}ITTz7Xx4F!`wb`EsTj1YWszfXZB@%P6IBOJ>_UnnrZxd1wuuR=IN8 zfOE25Wt{5}DKWwt*#GjwbkKe!HXcuvSM!|jFJlRo$p#kJO!Fa>MLp5e!$EilZ1|hd zF7%aFCHAvO+~H;~i@P^p;F9y49h(6>n^UGl(*j*y5NTy_Z0dx%>4Ey2rW3lFMHZbe z5rg82+EnQvQAr&YuT(whgaNn-C;qaPI^qMf6ud&2M4?pQ#pEQYsa`pka*CDNCLFC= zo>>xfg8j)x;~mYP}cV-|h%(( zI4Vz7Y5XiwQ5<7b_d9E!7rdk?a)2t1m(I}S$)rYBa{%3kc+C{Jy09dTo&3NW0iI1SqcgUS}J)q@it8e z?&Jg{P@92NonKPTbIn7qHgpCzLMLP_UQ_4FzkCXlNu5}Q$qQ8lBT3%A^G{-jaCHhd zk#W`^-c34jr&3JJlmtu=t8Rby)2u(q#^E-;=;@;wQPbaDK-Q!n1=)29nbcVqO{@H9 z7Dj3snM#OgQuD0aS~O7%7o&rX67AKD4y3SRfLb+YALNH#duXQ{QVbEO5T3Ftkt)ZI{aL+(EaM z9_?s;#Z*w1K~SblWB@%J@OQRMHn31dn`X}cK#rMTi&b8noo)52@9D$_ZJVAAu!E>L z9DC91@obHgecorg66$7_y!Ix;%+z7(Q6w@UM#`5hpjQZW&oQ{zlqbL#bOtzRdMb{+ zntRO;a_61-n*zoBecc1}`K3V{Pt0*Z7M<#;omE zg~vl(Ato7fSvWmRF>R82``p@VN`+`Aon<0*ww5<#pjMj%x&-9p(VARoHx&F1EjEV( zC1ED)3?LO-a85E3GiQ2MXF-YTF{=LG9-kprw1+s$xO zYQ5KQ#o#bI3GHJh58VCmx0s*m>Ga-5j$p@dZIKN9_{-d$G=X*pft%&}>r(kD>~2b= zONPdoiVJ)EJvSfp(}4h2gbalE7roiYL;i?0B#tTKqpDmf3vvmX6wk{mPE{8y&rljO zJFn%a|Hm#H$jtBFO?|+#u4h%po4-xow?c*mzv5#4Xvt2-WTCU zeKSWVG2GnjHU)m?z-{ERySjw&@`DzG|CXgVZ8vG!CEG@}c_Ii~MWq}>am2f@eeR6djlgqhXXrsjBM%w z%Xg9gz23$FF+)-TKonjlDBp$E$xH`+D8zP^oU>pDd60Aj@SD*fhsS zz96sz{YN_;j$!Z2U^-nAIh>Kn&okbcus}&a_FZh=sBJkJu2-rp4$=fOI;GATuGK-p z@RPi+=P^^$Z4Y3uG9el9RVD{~gYREDn4*|KxtDI@8Iv(sH)$mWmS7=b9jkTP$%)qy z>E*_xUJUBKn#P$%8)#0-sf;Z!&WQMe>U3>Kp(Vpet}V=3T1(oSOcyT^x6!XsUh@o; zbeOEfSn=2jT)i~}-@fV{#9QN;gUbzMYe*SS=ZVMLF6d6>6y7l;9}iN*LtaluiB0LW z0Exb$qEZW28&6y2^r~>34-50qasQj=SfN?u>_JUu%6f47?5G}C4u?(U*`zfXB0nwf z6E-lw-fT0GcFv@rgwUg6BvWA#oF^*Puz@nAWp7jbUqBr0!#b5(#$1~7l{5b?x_dxo z*5gBAYNeidfw^lL_=Rqi{bxmvAccwv3%>$3?WYg*L#EG=0cH#dTU|1R&Ic~fv1VQ~ z+`Vz4&c{cuF`WJQUQ`!q&XhkSZs-jo23I5_pemuRg37=TDy0^;fHKOjk?--4n!-}h zh)H;bP*A9jl6IPeO|=Rv26?N-V2jn{7j0!lW0-ybi^|&R8Nr_AU{p4RFX2RTlsXc_ zgA}ObvZc|dF{UZxihjzSex zw4wuLtaTe!Bwx)ir{+)G^>3mkgnazJY|5tx<=|}^L{R;A3HsS42dmH7m`KKek~1dA z%v*5UBEQ7&@@#N{Gr)+;HpM!~II`}!3eG6oPfO=xIx2Bn2HMgRfpkM(YdQED2HAWD zxjmN-R?mh0>T)=~rM|s$v0g?{yb$?w{R0YS4DL24b}%B0w`04#g5x^5&|=o?R*EDh zDK#Tj>7KI0hW2mNzas+u$XYpO@>l)rhvHDFX3eg@eIcJ2dWJ=o7`aGNGIaMe zm4IVORGF7nlZ;&M5Letbnq+96H9A=+>I|GtAeNw<%A{?hjP@MOauJ%QcT@WRi;^;A z*&n*S4$j2L=U^9Q2OO0&-E6uo%5p3}&yCSj+rTf>;4gGCGM4i z`ebd552tB$$wd^<*7@i_7!Ud&hR#9YcAS`<(xzAMMe40(1A@@F1|HTtp;G>BK%>Zoj*N6+6grimJBs9 zvz40qMV-@O?xL@()DkaP=~PRjCsK*!9VV(qd3%M$m9;0H=wQ56sPQ^$_u>YZaF3TX z=+NEz70P1cx#0EIL(ionezTI8whQ4|-dp^({uO40jGGEb_j|VSF}&1tDvk)%8V^i) zrGR6zjz^a~6<@w4Q>mOy0qG#MWeu~c)yo#q*H=5T==hoO1e+f_Voq6eS9QHtxqF#c zvMqHk1&sqk)K?AZ$jx6cH;Ye_lgw%}!!Q3o7Jw3=X2t3~TnZh7JRVoy_~3VjOOx}; zZHuCx1xcRiiS9iQLr)qqrGRrQjK{^48RQrpEx^AAz0WDzGKZVWR4N8$kXW3OI& zhL(2`y{Xq%h75G~9Y5O&m+31<##pX$L-f=!X&11pwjHV(i)CH(_s+XG0_*=q?GkVi z9I4iE$`O=tGNizT)G`T}ZB6Rw=V7sKN!H?|qT6V0+R}12=!|60TBKIRB)IOH>7H(5 znK4=PC|QLuNK3h1Fyjk{9HH|EPHg4aX_Cb=nc~sJ;z`s|<2rl{sp-h3_gVvLW-4Zo zHUh(TE6`G4DBE9D|CFk9Nt0z4jCz}dhR-^RH=XvHz!l9vO+(nk$dX`t zF&w1UF0xx-%RpJ2BRs29g4H2X`ghwjiKUG+J;UQLzZRrS43a-We(sSGH`9l3al*hA zgN92pDm5lj7h8t^;`PV7Ig+tQHl+jm8u`C8cYQc=K4`MyGjbw=E{sQww-_`H*LWoa z(;x2;A3`pKZQW4QG)wejLD(ktN*Nd)?9jfFgBMsXXsk?MMNQ&Zj*CDUE=jMw58ja& zIo;Ajcx6iCL~^^gV>hYzoaC+4aB*{XEC zz;(9pYh^BKv+&jok2nVPJS>oLYDxv=Afh|fL|Okq6cG~>`&dQ}?Mu|w%@^X${%k2Hj#jP1!7_d0(n1nReKdAoYC}sy) zVj0&Wz@eZPrhJB;+QDXh?otvZ!D)$nJnLQ}B^L9jlS>_@$icYTzS)~uao%c#aJ7BI zu;uHHVI#R*VYrPdoF$y4o9t>Sv)Ie1vojRVn4*(rN7=nr`e}^UFK_-0F@G{&g^+J3 zLv@ml;~4=2_*TVtDs%1kJ?hUq*f>jT?>3<&d`Ufi3! zuRxfm={S8s_yri$ zQ8t_LpmMfcx}(~cGK10?TNb?oVsW5Yn6Of28^w5(Hk3IC%gk^TD7xi{5rlm96?(pj zX!Is5{#j{%I_6GXWR;Ye1dy1qN|bcd#!UHlDo z23P7u3dd+sz7mjO3U*xMRfQlcRF!|zs;M~w#xIlE=3e;!Dm(LV zsNVno>zzs{Bumyrk)%*r#!?DVmSh(bl0x=saP{pFZ#R`}+RA*XN&K*Hx}-I_^2=KKK3F&UrnaH`BreHFd)~OS?1lxSDZg1xGm~ znOoncOtEE}q@OxvB|N{`+D4Z6l=w;g@cHW>na$sc@W%BPaOPiOk+Ujfu0pxDAcAUL z15Y2~r+-uD?tbgO4p08u@Fz0j`a=`4N)a!^3SO%80XqLm1Jn6g{gZj;tB(?S6>x%A zd{4JZT>BFKyJ?2*{n5-kH?z993lTRztB8a|-~FK8e_plYE|=hcD)(N^(O5|)jV$SS zP3=&f0P~5K-&ZFBc~qC9&rL6%d6-%8DOS68DFR2-(n;d$hMh`1%aY-gsm+(IN~fv& zUI$%JW$`RnhVOMtu-v*nd+`sRpNKa&tcLPLR%XsTa}n?4jT9x)~5O zHW`$JP6-4Ue?gsAS$?GXRKP7}o^=?(Y1H?%w?Aj9dsL^Fq|kl5PWp7ty(^QdXf)fh znC=@bQ4?vMwjW_8dQ$~V7QXg6hgVM$UpI3#Z=YlSYU@AYSS_`N=}_6gnKB!wF7y}c zdl}1~+tJzkD{jE@k&J3<@KuDcBR<8(%SrZLuwkY?tKH?hp)p(ndS?&7Ly&X#q&_7I ziN0SA`See$Bh2Zid0#&JV(EnCr5KB%Cj7;Se7PQ4wx1u~9ebX7s^}jKijGu7UeWF7=zJiMeNO+PhV7~B_LvL#4w>#pGY~M5 zkQkl(h@11DbQfq%E`^tlhFbP#sD|nE^%^SN&XeDk;IjK@Ae(zJI*=d}=IYTz_iJ$^E!>$bXoCPgBF>P$W zTQ1{gib?+IydSSe6XF!tw{CP>n%#16t*!q{;K<>_EsWO_dNqPyt9#AlM{?+Id=XL) zuoMkqET+v?WiT6EzSHne5&2vw?h z(XrjrPYyk7#JY&2=r@90$LST+E-o~>UwXAn(HwIqJ|n7pXdeb;qG~>fm#aP>tXpVwJJ(081{-T-6YX z(|N|GZuRfu)?Wpgv6v~5Pam(H54spoE7JJ8t@AI@wE@FRLCkU1#_pahoHiaMwWQI@ zTI5TMhU|w$@~;kj45aUG(zm!rWk2;+HDJ;*{So~|TR~fO9kGb(Lh8EQG4}-2^-b7D z?pFMC?>>9(-97jj$y!7;$AkQ^cK6-nS_)d!<9YK%*^2pdmUoqpqWQVw+C-Oe`W`A6c%SYt-m6!7=(tc#I(6>S)LdQm zwVZPfO^y;ne8!*L+{hPwteJ?6D$#}qQ5brEbnU(H!UV(MDVAq!wZqr+Vy``ammTqr z`#G;((5*6C7~LzGU|C)LLEb3OG*y+;w+DG{<=}L#lMd(8T1wpiXBjix);Xien^`$9 zRd?cR4@~3Xz;wI8Y@fA?P^F#-o0xO>i3|O@>T6|&EL_Gcm$ft9N$@Y@v~?Yc>+dee zO>g|ZlzB$3G6aFX#sBsm#XmP*$K6@I_^}qTuukv{V7Qvv*y)$16Q7T7%6IEzTo&=C z>kR%Z!4`UQQ)U0g+id9}jUcv{h4i7HbY%E{{`HCZZb97xQM+^CIow-Ym^FNZ{8uQw zMrPc`fj6ygP=T{4+qy_b)ujqH3j^Kgg*eiQ#*F-Ak&L^ziVq>oWd^z`+5Z^o5H(dk zvj!Wn2U2)sJ$9lGMO{?$ben^|r8M9zJ z`bL%(sUmkZp?P9H8^*uA@{1mmz6I|(Nhrn3~F+Mi_SZR$5V3}?d^c;XbfeUwJa z%xx^_!zJk0s%rR;a)`=n>zy*svK5%yl}+id^Nkv)z3?7Gzp>lVtfsE-zVGb5U6XG0 zA&+)h+2_?y3%*nDk2m-lA^Ox=Y3UYhOK%T|e~1MEvnLm+)dyWT8%> zbH+JS`UkOE%tkiKim3xS!xM9FMdS!~t-Z##Nd;W`bHiv-F=I8k;p2_XcAJJI)wknU z=A63we-laTl6uL63r1tx8rF-xmo_i;?s_Nt^WV=`wywcuW#_nAyT4`>(iJt1?L@#= zYtLse**-<`?b2I5dp~!3BA@w*Xkqi#jcH*mDa$KG0{!Z9iw{}sU!9FsAE!tdX#M?^ zg{2gGcY6IEx-_S1V6}g@M6&Fp#Q^E@7A5duhX$`6J)G@_5usP~xGaKJUn|A)Yt7pk3%Cd+mj> ze`8*j)x6j?#aHF}GmckxH{Ii}=rcWqSL*lb_J3{H!)Ir$8IEbu?sk$`%jb#X4?o#Z zI7jt5<2zH6^?wD^R9iaD+BvtpG?`mE21w=2D#1w6c1 z(hv9F&}A+duGA{H>S=^DN>Dw1X5<*<>ST_tKqZ~*$54$0@36psS1OhVce@bhczD0T ztbvp^SyB-yT482*YkBTS4M+4e0%0@m%C*vLmGaFfEGF}$`_`pznru^pp*e|!COX~% z%Pa&Un}JDNGp&HZkI-FV=UZh=uMxn-;1)YjG%5Mb zHlr|(%(X1NN@dx4{j5p{^bzg;YFK_8_U1X2)K1Rh|2jq%-$U;gN7vSOJA-W@RZ=q!2#vWr9eD!?}3Eoi?KA05#cagsL!2$3=1zu=wANJo07&1%KEo9M;(>Y$SDM;=HtRn9T#)o zSsSN(2?>tSOukWS(Q>q;W|r%Q!&uvFS29Fle)1$xztG^1#9ZB?^%eX!J}QbZbZ%lL zh}rkZ@zfVc%4lWMv??9oUJj97j3pi)OqARC&Nt`i(^2j1>IfhRy#h6d6~P{@u~(r@HWJ z07uQkmkn*1mdHF|s>%rwwlzGi}C_tWbZ#m+ZM#+6da`Z-cYPg7ywU zlVXFtb1IARA#d@5jc?g5d330A@$(aQC#j0LDJ~icOAz1yNK1yYvP)da$;mOQb{J>; z>9x{y40=I7h1;<%8r(h5y#;+YV3qwYgzt-@ko!bHnQ~q`x5+vC@Rpzl6=P96vxHur z1@alcfIReX#@^pZ>N2(d8PWe|Y}z$PEfU}mnnWpV^gSf-a{qZ2EIxI_!^7tePMa`dhx%rM##UL~ zAijpK86d3`czotE7*CuK2z=b~=0uT6CA|PGAe|{$8!x#ig5Dwi!OpZa`p<%ppw+~| zW;GdT6~snIvw+bH4N|WvrlyeJ^M)J$vJ$UfV z@IF^(&)Fwc{upqV|Ilfgj`80wAAZvH;df-Q>4xk~#N9lbkE7u=V?s^;-sEQQeGvsvM0V;}0L6t{B`j|6^kig!HZ~wV>mOVR`lhUA-jlbV2Rwy4%9l|OaG$49aI7lw3 z&_KjhNbg4+3pFZkY(Lai*m!_W6PxDp0rR-LhN0aatY32X-Gi~0@CBv&m@X2=fZmh7{fdBkTcb&F~{gg!t4K^k^wQas4{VJ3>3=$v}_8(C4N} z(Dl#ofq%JvdBX=W|1+C!#crTzbzmg5wGK?{qm?R@No9u;SdnNI9?wc7!#Dq}acO)x zq_~oqiVBku#%=9Dl*&5VlPeLbEt7yla}f7XYj{Quxx9sNOq5PEY@qa;+tqhk7n@dh zgBdiQ+6E{c3oT{E{Z$Co=ITHNR;{|The2Quf|`zBh)kAt+|<=z+EzNiD^1MrJ5&?7 zBU;1V0ZC$c^7$Bm{KFfl3DtEo56(x1Nf5%kC!M-?Vzm`(_bGO>$a>H|6 zj?Wn?*s{hbn`h73pxv+ufcwGAZgPq}uE?!%&s_krXiE28syXmP5Ipz#4dL}_RaV$5 z^Xp+PKuPYfl7vJl&Ic&^aSUj*L32!~Ljq~cyu__Z>Mn9}ejf-V`?bo!L95y}oo0VJSc{9Gi%a6{0qR&K|l#7f{~u8h>w8o8F?6~X*Ma&$<* zOayD0$nW**5OUZ$ap$_-K%W&927%rFsOBfM>(;?-tOhIM07OVz{M(Jko)(cIWv_tQ zolmh^Yd}7mI-8r5x;lb;7D zV5YGUtf`zj4tPpuMmKt+5N=v*s}MIUg_x{W1-kW6m+76y9uCItlw<%IK3w<=FR-Tv zxb%aH22$0iUoLKym6i1X1QDx~fLXFn*&yye&rPi|0 z88~8RhFr4S2|sYD{|;cIHUkUMjB}en@M0W7!pwRf;5u=#gPwC4!2^>`nIh~#GE4cycE7PY4 z06=$L01{r5E9@9WBEeRSiiqe!??dWG3ophb49UO8=5m)OEm^R)nH1a}bmP9I#Ln;5 z6@DHB661Ad<{#^~4j`+u;0%OPM`u$B@s|uK@;!t{2@$(fLH{xi=@cjp}`_qkPASGQb)_+TEP|{OO)Bjxt`{rPJ}c zQvi##ShvjYx6%Z3a9bm|iLMJ@ z@R7HPHOa4z3q1Wkxb#cqiqC@=HL?xpLv?v178+?`}hv1vPNj{C>l2g!Uj6SwTI8gW_C3M-{15C`EHK!Mcw0!f_OY zP-L^)$hFvXozp7*DJNxqy~K8{BI?eb!m7RGh0)yC)LaD-b~v`&+L1$ z3wc@FAgnTF{Thtsb+u4T4uhCuC(tuso&`uC|rC68*#5TuM?z7M#O>EDkj zDfb&VyMyLkCHKo=mU8W>C)8uao-H;u-rOJKlv%Zsw-Rn6fA}SGI~e(K++lFpAEagk zh?={=@H>*U;qfRZ&P6y=j-P!*`9+m8IsY+wq;Y7wD$H0=>|BBaYh%fz6WX&*REZ&8UwkgodcRwbq1fq`Xgi~qq4;GU>}&- z8OSLTx5U5v3K$9gjJm(nMHnn8$!)VGZ%6#sdG64hINE?P!W zVg=&$F)}e7Ug<|6;!Pdv-CK77+R$r142(e;;Bzm6!nO8J^G(UKfpIy(ReZ&yB<(Ka z286%=>*d&Qpo((Cen`g=4^E~AS5~*h<1u96p7tFCUtK^rpl)=P33#0ia*+8WPd1MU zGYHdN5;`3rJ<$ejxN#GJ78j0UFZnX{R*qJVyQosC_P#tpw&(ro!Ma}&#z%pJ^3_KC zeh+pexv#zDB&&!CK$&+ixkSoV3|8g36p4*kIO?Op7q6xnD4Gu|M z_Myx`MghTzQm-SODMvOyEhUdSQT1Xp(-Es2I%vF~A0*p|gH1D|<;pmrT z5JIuVulH<`IfH|PiB|`KoJDS`#|Wdon;ZsIK?XRk3w&i%dEyzeY;Q#vc{98MW%N*v zvN36s;(Qcx8|Y3vO$OT_e7_&w^(&S`Fja}7^^g6fQ*&m zK49z`xepR5s3eAYMr2p+3tWL5I>cbIZ9V{V>DW4?cnb>fJAsIH4UQN^>r0dv#(4o3 zedJ~u;gQ$xpvtu`PrJCZfY79h*`SrP0;nBT4)qc@ynrIBDfI_bk#UZISfDm;ZWR~2 zlt6rDQ%8kXYvs>Pz%mSlaH){}9|PHyn;dUP*)&M}2_P??2KlZP$V~T8)QyIxc>&Od zMy$M+-)YyL1(mX2AV5C}40(>&Jy4$z0Ku*;bQT|`PcSlmpUbK@8ZAX9@x8^YG&9tI zzpvgMlS>E^)F~tkdWnH@e)q=TMIZ<&wm*z zOBBX7rn;th&%T$(pXvDn7FDA3Mh(@vUuxw9NWL!{1Ew_~$@XsHo@j)Mh@i?ezu`mf zZU{b&_O28G|T4(kYG(U-l!w(2PhW zlcW+!pq$2!hx`MiTCM;G@j9!c4=!1T{9TH}f82kHkb-AT-=?L3z^0xBNu;Rcl$2?Z zBRncjQvf<5h_A$n*2 zN(&P~Dt(6$s&>|!N9E^V1X}L~2lMF%p^!GFuNX?o(jiD_%7WrdDwui9PsJnmq9{^W ze&n9kG8~v+^x}RE9uF#H#Y#%`%Ztim5YLN&8(njwQ^l0}_}NdHeFmjZk?2 zcG2NyAYsNuzEl`PgBJ$}YaW!97c^440@%f~=O3#x-7GX&hV)ZPnjH~6vh ziav|4%=^i|p>x9`z|1DQe;0f%D>H~t@Oh5x7Rc>rCl4BN1t`rEq z3z$;o#i7f2!}gTRrd9Hp)(Y&Gk9l6FPWV#uwf${-O-?GBU||#)MMUUYRsrq0>q|&7 z2%zR$Q{$>Y4x0ij%%Y2NR#sLD@L4?WeGfM5Kw+02Vy-+jI{rO(LDTRVyDQ9tt$UVo=(Gw2iRF`tb%yNI>6E@)+w21VH)CczpmfjU(k4(|5aya6iw4&F zvot@j-XI!_2i4AkgI#x&ubJ7)3gH!_N7@r0q9n(o{IW~BqlSE$Z}Kpp{q+EzWp-O@^sMLO7)slHd?oomkY%t^rq(oJY9X-0e z60GzcppRO-38~m#HU6%(nNFa9C@=pVgrW9ORrL?{sObpDci&gzsU#ytNp5@@-$x{86IaEfLi{9%^ ziB)5OcI!4^5w}62>jNcb_Y$ndOsm4Pq7*+sh6@OD_^6hQM&8B6L1N+1Hm;jfxnc<( zx`=hkfcld+L_zG5o~(;in|tKde$TuaiswK=DhvM8vn<3%tk<>@%9poy8>qgK3Y$g= zSKwL+*3j*$19eaWK*1BtfWSm}!NNC6>D7JTS`X5QEbgX<&|9=*{Ny084tHXNq`F5H zcnkcCN`#4nC<2g4n1agdTZrW?PzXJu9`hMoCIT;wcn#etNMRaOP}4&L@ecx5wBuYV zz$?-9e1M~^pUQ~@<<3ECPjSRL5n6KHKpYnv)FyXNMRA0Hx>ihq{16 zuQgZL*Z*1;)z!1aaCZT`faUsypk#eXuSOQ5!Biq^LlfR2c>S+x^u?X8 znqlahGJiQ;Pt~%Ld=IYnLB3w5k(jD@Vpz*>X5xn2(bu=c5<7|eaQB2&7^NF`Un}>a z7<(8oaobN3N!DXvj-q9|R3%0T_*}*D2Zxrr3T$5rZ{=dQJ-e}o&12#HG?^^mLh5Su zMA;HMXVmOj57#NW@*FZ3I(tOWY0D-;Hj;*BtQloXLlY=SL(|McL-U)C=A{!Y4frjZ zvpk3X{Rys!hEDDOIqbh3{?B3m{QNJY{_oNM?_vMtaOfnU!~XgCe@>ziwoPO8WO4f9 T?m!t0_@l0(b*Du6alroo=a?m= literal 0 HcmV?d00001 diff --git a/ESN/EchoTorch-master/docs/source/conf.py b/ESN/EchoTorch-master/docs/source/conf.py new file mode 100644 index 0000000..3751f45 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/conf.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# +# EchoTorch documentation build configuration file, created by +# sphinx-quickstart on Thu Apr 6 11:30:46 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import echotorch +#import sphinx_bootstrap_theme +sys.path.insert(0, os.path.abspath('../../echotorch')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'EchoTorch' +copyright = u'2017, Nils Schaetti' +author = u'Nils Schaetti' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'0.1' +# The full version, including alpha/beta/rc tags. +release = u'0.1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +#html_theme = 'bootstrap' +#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'EchoTorchdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'EchoTorch.tex', u'EchoTorch Documentation', + u'Nils Schaetti', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'echotorch', u'EchoTorch Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'EchoTorch', u'EchoTorch Documentation', + author, 'EchoTorch', 'One line description of project.', + 'Miscellaneous'), +] + + + diff --git a/ESN/EchoTorch-master/docs/source/echotorch.datasets.rst b/ESN/EchoTorch-master/docs/source/echotorch.datasets.rst new file mode 100644 index 0000000..ec4877e --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/echotorch.datasets.rst @@ -0,0 +1,38 @@ +echotorch\.datasets package +=========================== + +Submodules +---------- + +echotorch\.datasets\.MackeyGlassDataset module +---------------------------------------------- + +.. automodule:: echotorch.datasets.MackeyGlassDataset + :members: + :undoc-members: + :show-inheritance: + +echotorch\.datasets\.MemTestDataset module +------------------------------------------ + +.. automodule:: echotorch.datasets.MemTestDataset + :members: + :undoc-members: + :show-inheritance: + +echotorch\.datasets\.NARMADataset module +---------------------------------------- + +.. automodule:: echotorch.datasets.NARMADataset + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: echotorch.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/ESN/EchoTorch-master/docs/source/echotorch.nn.rst b/ESN/EchoTorch-master/docs/source/echotorch.nn.rst new file mode 100644 index 0000000..6be9d85 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/echotorch.nn.rst @@ -0,0 +1,32 @@ +echotorch.nn +============ + +.. automodule:: torch.nn +.. currentmodule:: torch.nn + +Echo State Layers +----------------- + +ESNCell +~~~~~~~ + +.. autoclass:: nn.ESNCell + :members: + +ESN +~~~ + +.. autoclass:: nn.ESN + :members: + +LiESNCell +~~~~~~~~~ + +.. autoclass:: nn.LiESNCell + :members: + +LiESN +~~~~~ + +.. autoclass:: nn.LiESN + :members: diff --git a/ESN/EchoTorch-master/docs/source/echotorch.rst b/ESN/EchoTorch-master/docs/source/echotorch.rst new file mode 100644 index 0000000..aaed1d2 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/echotorch.rst @@ -0,0 +1,19 @@ +echotorch package +================= + +Subpackages +----------- + +.. toctree:: + + echotorch.datasets + echotorch.nn + echotorch.utils + +Module contents +--------------- + +.. automodule:: echotorch + :members: + :undoc-members: + :show-inheritance: diff --git a/ESN/EchoTorch-master/docs/source/echotorch.utils.rst b/ESN/EchoTorch-master/docs/source/echotorch.utils.rst new file mode 100644 index 0000000..b41a8e1 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/echotorch.utils.rst @@ -0,0 +1,30 @@ +echotorch\.tools package +======================== + +Submodules +---------- + +echotorch\.utils\.error_measures module +--------------------------------- + +.. automodule:: echotorch.utils.error_measures + :members: + :undoc-members: + :show-inheritance: + +echotorch\.utils\.utility\_functions module +------------------------------------------- + +.. automodule:: echotorch.utils.utility_functions + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: echotorch.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/ESN/EchoTorch-master/docs/source/index.rst b/ESN/EchoTorch-master/docs/source/index.rst new file mode 100644 index 0000000..36bd1b2 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/index.rst @@ -0,0 +1,32 @@ +.. EchoTorch documentation master file, created by + sphinx-quickstart on Thu Apr 6 11:30:46 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +EchoTorch documentation +======================= + +EchoTorch is an pyTorch-based library for Reservoir Computing and Echo State Network using GPUs and CPUs. + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Notes + + notes/* + +.. toctree:: + :maxdepth: 1 + :caption: Package Reference + + echotorch + echotorch.datasets + echotorch.nn + echotorch.utils + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` \ No newline at end of file diff --git a/ESN/EchoTorch-master/docs/source/modules.rst b/ESN/EchoTorch-master/docs/source/modules.rst new file mode 100644 index 0000000..96cbe13 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/modules.rst @@ -0,0 +1,7 @@ +echotorch +========= + +.. toctree:: + :maxdepth: 4 + + echotorch diff --git a/ESN/EchoTorch-master/docs/source/notes/esn_learning.rst b/ESN/EchoTorch-master/docs/source/notes/esn_learning.rst new file mode 100644 index 0000000..06add54 --- /dev/null +++ b/ESN/EchoTorch-master/docs/source/notes/esn_learning.rst @@ -0,0 +1,19 @@ +Echo State Network learning mechanics +===================================== + +This note will present an overview of how Echo State Networks works works +and its learning mechanics. It's not mandatory to understand the complete +learning phase, but we recommend understanding the differnce between +classical ESN learning and gradient descent, it will help you to choose +which one to use according to cases. + +.. _esn_model: + +The Echo State Network model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. _esn_learning: + +``esn_learning`` +~~~~~~~~~~~~~~~~ + diff --git a/ESN/EchoTorch-master/echotorch/__init__.py b/ESN/EchoTorch-master/echotorch/__init__.py new file mode 100644 index 0000000..8016bd8 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from . import datasets +from . import models +from . import nn +from . import utils + + +# All EchoTorch's modules +__all__ = ['datasets', 'models', 'nn', 'utils'] diff --git a/ESN/EchoTorch-master/echotorch/datasets/LogisticMapDataset.py b/ESN/EchoTorch-master/echotorch/datasets/LogisticMapDataset.py new file mode 100644 index 0000000..12fa1e7 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/LogisticMapDataset.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from torch.utils.data.dataset import Dataset +import numpy as np + + +# Logistic Map dataset +class LogisticMapDataset(Dataset): + """ + Logistic Map dataset + """ + + # Constructor + def __init__(self, sample_len, n_samples, alpha=5, beta=11, gamma=13, c=3.6, b=0.13, seed=None): + """ + Constructor + :param sample_len: + :param n_samples: + :param alpha: + :param beta: + :param gamma: + :param c: + :param b: + :param seed: + """ + # Properties + self.sample_len = sample_len + self.n_samples = n_samples + self.alpha = alpha + self.beta = beta + self.gamma = gamma + self.c = c + self.b = b + self.p2 = np.pi * 2 + + # Init seed if needed + if seed is not None: + torch.manual_seed(seed) + # end if + # end __init__ + + # Length + def __len__(self): + """ + Length + :return: + """ + return self.n_samples + # end __len__ + + # Get item + def __getitem__(self, idx): + """ + Get item + :param idx: + :return: + """ + # Time and forces + t = np.linspace(0, 1, self.sample_len, endpoint=0) + dforce = np.sin(self.p2 * self.alpha * t) + np.sin(self.p2 * self.beta * t) + np.sin(self.p2 * self.gamma * t) + + # Series + series = torch.zeros(self.sample_len, 1) + series[0] = 0.6 + + # Generate + for i in range(1, self.sample_len): + series[i] = self._logistic_map(series[i-1], self.c + self.b * dforce[i]) + # end for + + return series + # end __getitem__ + + ####################################### + # Private + ####################################### + + # Logistic map + def _logistic_map(self, x, r): + """ + Logistic map + :param x: + :param r: + :return: + """ + return r * x * (1-x) + # end logistic_map + +# end MackeyGlassDataset diff --git a/ESN/EchoTorch-master/echotorch/datasets/MackeyGlassDataset.py b/ESN/EchoTorch-master/echotorch/datasets/MackeyGlassDataset.py new file mode 100644 index 0000000..e87224c --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/MackeyGlassDataset.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from torch.utils.data.dataset import Dataset +import collections + + +# Mackey Glass dataset +class MackeyGlassDataset(Dataset): + """ + Mackey Glass dataset + """ + + # Constructor + def __init__(self, sample_len, n_samples, tau=17, seed=None): + """ + Constructor + :param sample_len: Length of the time-series in time steps. + :param n_samples: Number of samples to generate. + :param tau: Delay of the MG with commonly used value of tau=17 (mild chaos) and tau=30 is moderate chaos. + :param seed: Seed of random number generator. + """ + # Properties + self.sample_len = sample_len + self.n_samples = n_samples + self.tau = tau + self.delta_t = 10 + self.timeseries = 1.2 + self.history_len = tau * self.delta_t + + # Init seed if needed + if seed is not None: + torch.manual_seed(seed) + # end if + # end __init__ + + # Length + def __len__(self): + """ + Length + :return: + """ + return self.n_samples + # end __len__ + + # Get item + def __getitem__(self, idx): + """ + Get item + :param idx: + :return: + """ + # History + history = collections.deque(1.2 * torch.ones(self.history_len) + 0.2 * (torch.rand(self.history_len) - 0.5)) + + # Preallocate tensor for time-serie + inp = torch.zeros(self.sample_len, 1) + + # For each time step + for timestep in range(self.sample_len): + for _ in range(self.delta_t): + xtau = history.popleft() + history.append(self.timeseries) + self.timeseries = history[-1] + (0.2 * xtau / (1.0 + xtau ** 10) - 0.1 * history[-1]) / self.delta_t + # end for + inp[timestep] = self.timeseries + # end for + + # Squash timeseries through tanh + return torch.tan(inp - 1) + # end __getitem__ + +# end MackeyGlassDataset diff --git a/ESN/EchoTorch-master/echotorch/datasets/MemTestDataset.py b/ESN/EchoTorch-master/echotorch/datasets/MemTestDataset.py new file mode 100644 index 0000000..8620125 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/MemTestDataset.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from torch.utils.data.dataset import Dataset + + +# Generates a series of input timeseries and delayed versions as outputs. +class MemTestDataset(Dataset): + """ + Generates a series of input timeseries and delayed versions as outputs. + Delay is given in number of timesteps. Can be used to empirically measure the + memory capacity of a system. + """ + + # Constructor + def __init__(self, sample_len, n_samples, n_delays=10, seed=None): + """ + Constructor + :param sample_len: Length of the time-series in time steps. + :param n_samples: Number of samples to generate. + :param n_delays: Number of step to delay + :param seed: Seed of random number generator. + """ + # Properties + self.sample_len = sample_len + self.n_samples = n_samples + self.n_delays = n_delays + + # Init seed if needed + if seed is not None: + torch.manual_seed(seed) + # end if + # end __init__ + + # Length + def __len__(self): + """ + Length + :return: + """ + return self.n_samples + # end __len__ + + # Get item + def __getitem__(self, idx): + """ + Get item + :param idx: + :return: + """ + inputs = (torch.rand(self.sample_len, 1) - 0.5) * 1.6 + outputs = torch.zeros(self.sample_len, self.n_delays) + for k in range(self.n_delays): + outputs[:, k:k+1] = torch.cat((torch.zeros(k + 1, 1), inputs[:-k - 1, :]), dim=0) + # end for + return inputs, outputs + # end __getitem__ + +# end MemTestDataset diff --git a/ESN/EchoTorch-master/echotorch/datasets/NARMADataset.py b/ESN/EchoTorch-master/echotorch/datasets/NARMADataset.py new file mode 100644 index 0000000..95368e2 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/NARMADataset.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from torch.utils.data.dataset import Dataset + + +# 10th order NARMA task +class NARMADataset(Dataset): + """ + xth order NARMA task + WARNING: this is an unstable dataset. There is a small chance the system becomes + unstable, leading to an unusable dataset. It is better to use NARMA30 which + where this problem happens less often. + """ + + # Constructor + def __init__(self, sample_len, n_samples, system_order=10, seed=None): + """ + Constructor + :param sample_len: Length of the time-series in time steps. + :param n_samples: Number of samples to generate. + :param system_order: th order NARMA + :param seed: Seed of random number generator. + """ + # Properties + self.sample_len = sample_len + self.n_samples = n_samples + self.system_order = system_order + + # System order + self.parameters = torch.zeros(4) + if system_order == 10: + self.parameters[0] = 0.3 + self.parameters[1] = 0.05 + self.parameters[2] = 9 + self.parameters[3] = 0.1 + else: + self.parameters[0] = 0.2 + self.parameters[1] = 0.04 + self.parameters[2] = 29 + self.parameters[3] = 0.001 + # end if + + # Init seed if needed + if seed is not None: + torch.manual_seed(seed) + # end if + + # Generate data set + self.inputs, self.outputs = self._generate() + # end __init__ + + ############################################# + # OVERRIDE + ############################################# + + # Length + def __len__(self): + """ + Length + :return: + """ + return self.n_samples + # end __len__ + + # Get item + def __getitem__(self, idx): + """ + Get item + :param idx: + :return: + """ + return self.inputs[idx], self.outputs[idx] + # end __getitem__ + + ############################################## + # PRIVATE + ############################################## + + # Generate + def _generate(self): + """ + Generate dataset + :return: + """ + inputs = list() + outputs = list() + for i in range(self.n_samples): + ins = torch.rand(self.sample_len, 1) * 0.5 + outs = torch.zeros(self.sample_len, 1) + for k in range(self.system_order - 1, self.sample_len - 1): + outs[k + 1] = self.parameters[0] * outs[k] + self.parameters[1] * outs[k] * torch.sum( + outs[k - (self.system_order - 1):k + 1]) + 1.5 * ins[k - int(self.parameters[2])] * ins[k] + \ + self.parameters[3] + # end for + inputs.append(ins) + outputs.append(outs) + # end for + + return inputs, outputs + # end _generate + +# end NARMADataset diff --git a/ESN/EchoTorch-master/echotorch/datasets/SwitchAttractorDataset.py b/ESN/EchoTorch-master/echotorch/datasets/SwitchAttractorDataset.py new file mode 100644 index 0000000..e37b72e --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/SwitchAttractorDataset.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from torch.utils.data.dataset import Dataset +import numpy as np + + +# Switch attractor dataset +class SwitchAttractorDataset(Dataset): + """ + Generate a dataset where the reservoir must switch + between two attractors. + """ + + # Constructor + def __init__(self, sample_len, n_samples, seed=None): + """ + Constructor + :param sample_len: Length of the time-series in time steps. + :param n_samples: Number of samples to generate. + :param system_order: th order NARMA + :param seed: Seed of random number generator. + """ + # Properties + self.sample_len = sample_len + self.n_samples = n_samples + + # Init seed if needed + if seed is not None: + torch.manual_seed(seed) + # end if + + # Generate data set + self.inputs, self.outputs = self._generate() + # end __init__ + + ############################################# + # OVERRIDE + ############################################# + + # Length + def __len__(self): + """ + Length + :return: + """ + return self.n_samples + # end __len__ + + # Get item + def __getitem__(self, idx): + """ + Get item + :param idx: + :return: + """ + return self.inputs[idx], self.outputs[idx] + # end __getitem__ + + ############################################## + # PRIVATE + ############################################## + + # Generate + def _generate(self): + """ + Generate dataset + :return: + """ + inputs = list() + outputs = list() + + # Generate each sample + for i in range(self.n_samples): + # Start end stop + start = np.random.randint(0, self.sample_len) + stop = np.random.randint(start, start + self.sample_len / 2) + + # Limits + if stop >= self.sample_len: + stop = self.sample_len - 1 + # end if + + # Sample tensor + inp = torch.zeros(self.sample_len, 1) + out = torch.zeros(self.sample_len) + + # Set inputs + inp[start, 0] = 1.0 + inp[stop] = 1.0 + + # Set outputs + out[start:stop] = 1.0 + + # Add + inputs.append(inp) + outputs.append(out) + # end for + + return inputs, outputs + # end _generate + +# end SwitchAttractorDataset diff --git a/ESN/EchoTorch-master/echotorch/datasets/__init__.py b/ESN/EchoTorch-master/echotorch/datasets/__init__.py new file mode 100644 index 0000000..2b9dbb6 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/datasets/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from .LogisticMapDataset import LogisticMapDataset +from .MackeyGlassDataset import MackeyGlassDataset +from .MemTestDataset import MemTestDataset +from .NARMADataset import NARMADataset + +__all__ = [ + 'LogisticMapDataset', 'MackeyGlassDataset', 'MemTestDataset', 'NARMADataset' +] diff --git a/ESN/EchoTorch-master/echotorch/models/HNilsNet.py b/ESN/EchoTorch-master/echotorch/models/HNilsNet.py new file mode 100644 index 0000000..d37a14e --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/models/HNilsNet.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/models/NilsNet.py +# Description : A Hierarchical NilsNet module. +# Date : 09th of April, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +# Imports +import torchvision +import torch.nn as nn + + +# A Hierarchical NilsNet +class HNilsNet(nn.Module): + """ + A Hierarchical NilsNet + """ + + # Constructor + def __init__(self): + """ + Constructor + """ + pass + # end __init__ + + # Forward + def forward(self): + """ + Forward + :return: + """ + pass + # end forward + +# end HNilsNet diff --git a/ESN/EchoTorch-master/echotorch/models/NilsNet.py b/ESN/EchoTorch-master/echotorch/models/NilsNet.py new file mode 100644 index 0000000..0c9381f --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/models/NilsNet.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/models/NilsNet.py +# Description : An NilsNet module. +# Date : 09th of April, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +# Imports +import torchvision +import torch.nn as nn +import torch.legacy.nn as lnn +from echotorch import nn as ecnn + + +# A NilsNet +class NilsNet(nn.Module): + """ + A NilsNet + """ + + # Constructor + def __init__(self, reservoir_dim, sfa_dim, ica_dim, pretrained=False, feature_selector='resnet18'): + """ + Constructor + """ + # Upper class + super(NilsNet, self).__init__() + + # ResNet + if feature_selector == 'resnet18': + self.feature_selector = torchvision.models.resnet18(pretrained=True) + elif feature_selector == 'resnet34': + self.feature_selector = torchvision.models.resnet34(pretrained=True) + elif feature_selector == 'resnet50': + self.feature_selector = torchvision.models.resnet50(pretrained=True) + elif feature_selector == 'alexnet': + self.feature_selector = torchvision.models.alexnet(pretrained=True) + # end if + + # Skip last layer + self.reservoir_input_dim = self.feature_selector.fc.in_features + self.feature_selector.fc = ecnn.Identity() + + # Echo State Network + # self.esn = ecnn.ESNCell(input_dim=self.reservoir_input_dim, output_dim=reservoir_dim) + + # Slow feature analysis layer + # self.sfa = ecnn.SFACell(input_dim=reservoir_dim, output_dim=sfa_dim) + + # Independent Feature Analysis layer + # self.ica = ecnn.ICACell(input_dim=sfa_dim, output_dim=ica_dim) + # end __init__ + + # Forward + def forward(self, x): + """ + Forward + :return: + """ + # ResNet + return self.feature_selector(x) + # end forward + +# end NilsNet diff --git a/ESN/EchoTorch-master/echotorch/models/__init__.py b/ESN/EchoTorch-master/echotorch/models/__init__.py new file mode 100644 index 0000000..83464dd --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/models/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/models/__init__.py +# Description : Models init. +# Date : 09th of April, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +# Imports +from .HNilsNet import HNilsNet +from .NilsNet import NilsNet diff --git a/ESN/EchoTorch-master/echotorch/nn/BDESN.py b/ESN/EchoTorch-master/echotorch/nn/BDESN.py new file mode 100644 index 0000000..8761444 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/BDESN.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .BDESNCell import BDESNCell +from .RRCell import RRCell + + +# Bi-directional Echo State Network module +class BDESN(nn.Module): + """ + Bi-directional Echo State Network module + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, output_dim, leaky_rate=1.0, spectral_radius=0.9, bias_scaling=0, + input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, input_set=[1.0, -1.0], + w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, create_cell=True): + """ + Constructor + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param output_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internal weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + :param learning_algo: Which learning algorithm to use (inv, LU, grad) + """ + super(BDESN, self).__init__() + + # Properties + self.output_dim = output_dim + + # Recurrent layer + if create_cell: + self.esn_cell = BDESNCell( + input_dim=input_dim, hidden_dim=hidden_dim, spectral_radius=spectral_radius, bias_scaling=bias_scaling, + input_scaling=input_scaling, w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set, + w_sparsity=w_sparsity, nonlin_func=nonlin_func, leaky_rate=leaky_rate, create_cell=create_cell + ) + # end if + + # Ouput layer + self.output = RRCell( + input_dim=hidden_dim * 2, output_dim=output_dim, ridge_param=ridge_param, learning_algo=learning_algo + ) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn_cell.hidden + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn_cell.w + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn_cell.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Reset output layer + self.output.reset() + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.output.w_out + # end get_w_out + + # Set W + def set_w(self, w): + """ + Set W + :param w: + :return: + """ + self.esn_cell.w = w + # end set_w + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :return: Output or hidden states + """ + # Compute hidden states + hidden_states = self.esn_cell(u) + + # Learning algorithm + return self.output(hidden_states, y) + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.output.finalize() + + # Not in training mode anymore + self.train(False) + # end finalize + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return self.esn_cell.get_spectral_raduis() + # end spectral_radius + +# end BDESN diff --git a/ESN/EchoTorch-master/echotorch/nn/BDESNCell.py b/ESN/EchoTorch-master/echotorch/nn/BDESNCell.py new file mode 100644 index 0000000..219989e --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/BDESNCell.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from .LiESNCell import LiESNCell +import numpy as np +from torch.autograd import Variable + + +# Bi-directional Echo State Network module +class BDESNCell(nn.Module): + """ + Bi-directional Echo State Network module + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, spectral_radius=0.9, bias_scaling=0, input_scaling=1.0, + w=None, w_in=None, w_bias=None, sparsity=None, input_set=[1.0, -1.0], w_sparsity=None, + nonlin_func=torch.tanh, leaky_rate=1.0, create_cell=True): + """ + Constructor + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internal weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + """ + super(BDESNCell, self).__init__() + + # Recurrent layer + if create_cell: + self.esn_cell = LiESNCell(leaky_rate, False, input_dim, hidden_dim, spectral_radius, bias_scaling, + input_scaling, w, w_in, w_bias, None, sparsity, input_set, w_sparsity, + nonlin_func) + # end if + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn_cell.w + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn_cell.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Reset output layer + self.output.reset() + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.output.w_out + # end get_w_out + + # Set W + def set_w(self, w): + """ + Set W + :param w: + :return: + """ + self.esn_cell.w = w + # end set_w + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Forward compute hidden states + forward_hidden_states = self.esn_cell(u) + + # Backward compute hidden states + backward_hidden_states = self.esn_cell(Variable(torch.from_numpy(np.flip(u.data.numpy(), 1).copy()))) + backward_hidden_states = Variable(torch.from_numpy(np.flip(backward_hidden_states.data.numpy(), 1).copy())) + + return torch.cat((forward_hidden_states, backward_hidden_states), dim=2) + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.output.finalize() + + # Not in training mode anymore + self.train(False) + # end finalize + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return self.esn_cell.get_spectral_raduis() + # end spectral_radius + +# end BDESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/BDESNPCA.py b/ESN/EchoTorch-master/echotorch/nn/BDESNPCA.py new file mode 100644 index 0000000..52c660f --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/BDESNPCA.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch +import torch.nn as nn +import torch.nn.functional as F +from .BDESNCell import BDESNCell +from sklearn.decomposition import IncrementalPCA +import matplotlib.pyplot as plt +from torch.autograd import Variable + + +# Bi-directional Echo State Network module with PCA reduction +class BDESNPCA(nn.Module): + """ + Bi-directional Echo State Network module with PCA reduction + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, output_dim, pca_dim, linear_dim, leaky_rate=1.0, spectral_radius=0.9, bias_scaling=0, + input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, input_set=[1.0, -1.0], + w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, create_cell=True, + pca_batch_size=10): + """ + Constructor + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param output_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internal weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + :param learning_algo: Which learning algorithm to use (inv, LU, grad) + """ + super(BDESNPCA, self).__init__() + + # Properties + self.output_dim = output_dim + self.pca_dim = pca_dim + + # Recurrent layer + if create_cell: + self.esn_cell = BDESNCell( + input_dim=input_dim, hidden_dim=hidden_dim, spectral_radius=spectral_radius, bias_scaling=bias_scaling, + input_scaling=input_scaling, w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set, + w_sparsity=w_sparsity, nonlin_func=nonlin_func, leaky_rate=leaky_rate, create_cell=create_cell + ) + # end if + + # PCA + self.ipca = IncrementalPCA(n_components=pca_dim, batch_size=pca_batch_size) + + # FFNN output + self.linear1 = nn.Linear(pca_dim, linear_dim) + self.linear2 = nn.Linear(linear_dim, output_dim) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn_cell.hidden + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn_cell.w + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn_cell.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Reset output layer + self.output.reset() + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.output.w_out + # end get_w_out + + # Set W + def set_w(self, w): + """ + Set W + :param w: + :return: + """ + self.esn_cell.w = w + # end set_w + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :return: Output or hidden states + """ + # Compute hidden states + hidden_states = self.esn_cell(u) + + # Resulting reduced stated + pca_states = torch.zeros(1, hidden_states.size(1), self.pca_dim) + + # For each batch + pca_states[0] = torch.from_numpy(self.ipca.fit_transform(hidden_states.data[0].numpy()).copy()) + pca_states = Variable(pca_states) + + # FFNN output + return F.relu(self.linear2(F.relu(self.linear1(pca_states)))) + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.output.finalize() + + # Not in training mode anymore + self.train(False) + # end finalize + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return self.esn_cell.get_spectral_raduis() + # end spectral_radius + +# end BDESNPCA diff --git a/ESN/EchoTorch-master/echotorch/nn/EESN.py b/ESN/EchoTorch-master/echotorch/nn/EESN.py new file mode 100644 index 0000000..339d1b1 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/EESN.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/EESN.py +# Description : An ESN with an embedding layer at the beginning. +# Date : 22 March, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +import torch +import torch.sparse +import torch.nn as nn +from .LiESN import LiESN + + +# An ESN with an embedding layer +class EESN(object): + """ + An ESN with an embedding layer + """ + + # Constructor + def __init__(self, voc_size, embedding_dim, hidden_dim, output_dim, spectral_radius=0.9, + bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, + input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, + leaky_rate=1.0, train_leaky_rate=False, feedbacks=False, wfdb_sparsity=None, + normalize_feedbacks=False): + # Embedding layer + self.embedding = nn.Embedding(voc_size, embedding_dim) + + # Li-ESN + self.esn = LiESN(embedding_dim, hidden_dim, output_dim, spectral_radius, bias_scaling, input_scaling, + w, w_in, w_bias, sparsity, input_set, w_sparsity, nonlin_func, learning_algo, ridge_param, + leaky_rate, train_leaky_rate, feedbacks, wfdb_sparsity, normalize_feedbacks) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn.hidden + + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn.w + + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn.w_in + # end w_in + + # Embedding weights + @property + def weights(self): + """ + Embedding weights + :return: + """ + return self.embedding.weight + # end weights + + ############################################### + # PUBLIC + ############################################### + + # Forward + def forward(self, u, y=None): + """ + Forward + :param x: + :return: + """ + # Embedding layer + emb = self.embedding(u) + + # ESN + return self.esn(emb, y) + # end forward + +# end EESN diff --git a/ESN/EchoTorch-master/echotorch/nn/ESN.py b/ESN/EchoTorch-master/echotorch/nn/ESN.py new file mode 100644 index 0000000..53fcbae --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/ESN.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from . import ESNCell +from .RRCell import RRCell + + +# Echo State Network module +class ESN(nn.Module): + """ + Echo State Network module + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9, bias_scaling=0, input_scaling=1.0, + w=None, w_in=None, w_bias=None, w_fdb=None, sparsity=None, input_set=[1.0, -1.0], w_sparsity=None, + nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, create_cell=True, + feedbacks=False, with_bias=True, wfdb_sparsity=None, normalize_feedbacks=False): + """ + Constructor + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param output_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internation weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param w_fdb: Feedback weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + :param learning_algo: Which learning algorithm to use (inv, LU, grad) + """ + super(ESN, self).__init__() + + # Properties + self.output_dim = output_dim + self.feedbacks = feedbacks + self.with_bias = with_bias + self.normalize_feedbacks = normalize_feedbacks + + # Recurrent layer + if create_cell: + self.esn_cell = ESNCell(input_dim, hidden_dim, spectral_radius, bias_scaling, input_scaling, w, w_in, + w_bias, w_fdb, sparsity, input_set, w_sparsity, nonlin_func, feedbacks, output_dim, + wfdb_sparsity, normalize_feedbacks) + # end if + + # Ouput layer + self.output = RRCell(hidden_dim, output_dim, ridge_param, feedbacks, with_bias, learning_algo) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn_cell.hidden + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn_cell.w + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn_cell.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Reset output layer + self.output.reset() + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.output.w_out + # end get_w_out + + # Set W + def set_w(self, w): + """ + Set W + :param w: + :return: + """ + self.esn_cell.w = w + # end set_w + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Compute hidden states + if self.feedbacks and self.training: + hidden_states = self.esn_cell(u, y) + elif self.feedbacks and not self.training: + hidden_states = self.esn_cell(u, w_out=self.output.w_out) + else: + hidden_states = self.esn_cell(u) + # end if + + # Learning algo + return self.output(hidden_states, y) + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.output.finalize() + + # Not in training mode anymore + self.train(False) + # end finalize + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return self.esn_cell.get_spectral_raduis() + # end spectral_radius + +# end ESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/ESNCell.py b/ESN/EchoTorch-master/echotorch/nn/ESNCell.py new file mode 100644 index 0000000..64f4364 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/ESNCell.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESNCell.py +# Description : An Echo State Network layer. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +import torch +import torch.sparse +from torch.autograd import Variable +import torch.nn as nn +import echotorch.utils +import numpy as np + + +# Echo State Network layer +class ESNCell(nn.Module): + """ + Echo State Network layer + """ + + # Constructor + def __init__(self, input_dim, output_dim, spectral_radius=0.9, bias_scaling=0, input_scaling=1.0, w=None, w_in=None, + w_bias=None, w_fdb=None, sparsity=None, input_set=[1.0, -1.0], w_sparsity=None, + nonlin_func=torch.tanh, feedbacks=False, feedbacks_dim=None, wfdb_sparsity=None, + normalize_feedbacks=False): + """ + Constructor + :param input_dim: Inputs dimension. + :param output_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internation weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + """ + super(ESNCell, self).__init__() + + # Params + self.input_dim = input_dim + self.output_dim = output_dim + self.spectral_radius = spectral_radius + self.bias_scaling = bias_scaling + self.input_scaling = input_scaling + self.sparsity = sparsity + self.input_set = input_set + self.w_sparsity = w_sparsity + self.nonlin_func = nonlin_func + self.feedbacks = feedbacks + self.feedbacks_dim = feedbacks_dim + self.wfdb_sparsity = wfdb_sparsity + self.normalize_feedbacks = normalize_feedbacks + + # Init hidden state + self.register_buffer('hidden', self.init_hidden()) + + # Initialize input weights + self.register_buffer('w_in', self._generate_win(w_in)) + + # Initialize reservoir weights randomly + self.register_buffer('w', self._generate_w(w)) + + # Initialize bias + self.register_buffer('w_bias', self._generate_wbias(w_bias)) + + # Initialize feedbacks weights randomly + if feedbacks: + self.register_buffer('w_fdb', self._generate_wfdb(w_fdb)) + # end if + # end __init__ + + ############################################### + # PUBLIC + ############################################### + + # Forward + def forward(self, u, y=None, w_out=None): + """ + Forward + :param u: Input signal + :param y: Target output signal for teacher forcing + :param w_out: Output weights for teacher forcing + :return: Resulting hidden states + """ + # Time length + time_length = int(u.size()[1]) + + # Number of batches + n_batches = int(u.size()[0]) + + # Outputs + outputs = Variable(torch.zeros(n_batches, time_length, self.output_dim)) + outputs = outputs.cuda() if self.hidden.is_cuda else outputs + + # For each batch + for b in range(n_batches): + # Reset hidden layer + self.reset_hidden() + + # For each steps + for t in range(time_length): + # Current input + ut = u[b, t] + + # Compute input layer + u_win = self.w_in.mv(ut) + + # Apply W to x + x_w = self.w.mv(self.hidden) + + # Feedback or not + if self.feedbacks and self.training and y is not None: + # Current target + yt = y[b, t] + + # Compute feedback layer + y_wfdb = self.w_fdb.mv(yt) + + # Add everything + x = u_win + x_w + y_wfdb + self.w_bias + elif self.feedbacks and not self.training and w_out is not None: + # Add bias + bias_hidden = torch.cat((Variable(torch.ones(1)), self.hidden), dim=0) + + # Compute past output + yt = w_out.t().mv(bias_hidden) + + # Normalize + if self.normalize_feedbacks: + yt -= torch.min(yt) + yt /= torch.max(yt) - torch.min(yt) + yt /= torch.sum(yt) + # end if + + # Compute feedback layer + y_wfdb = self.w_fdb.mv(yt) + + # Add everything + x = u_win + x_w + y_wfdb + self.w_bias + else: + # Add everything + x = u_win + x_w + self.w_bias + # end if + + # Apply activation function + x = self.nonlin_func(x) + + # Add to outputs + self.hidden.data = x.view(self.output_dim).data + + # New last state + outputs[b, t] = self.hidden + # end for + # end for + + return outputs + # end forward + + # Init hidden layer + def init_hidden(self): + """ + Init hidden layer + :return: Initiated hidden layer + """ + return Variable(torch.zeros(self.output_dim), requires_grad=False) + # return torch.zeros(self.output_dim) + # end init_hidden + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.hidden.fill_(0.0) + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return echotorch.utils.spectral_radius(self.w) + # end spectral_radius + + ############################################### + # PRIVATE + ############################################### + + # Generate W matrix + def _generate_w(self, w): + """ + Generate W matrix + :return: + """ + # Initialize reservoir weight matrix + if w is None: + w = self.generate_w(self.output_dim, self.w_sparsity) + else: + if callable(w): + w = w(self.output_dim) + # end if + # end if + + # Scale it to spectral radius + w *= self.spectral_radius / echotorch.utils.spectral_radius(w) + + return Variable(w, requires_grad=False) + # end generate_W + + # Generate Win matrix + def _generate_win(self, w_in): + """ + Generate Win matrix + :return: + """ + # Initialize input weight matrix + if w_in is None: + if self.sparsity is None: + w_in = self.input_scaling * ( + np.random.randint(0, 2, (self.output_dim, self.input_dim)) * 2.0 - 1.0) + w_in = torch.from_numpy(w_in.astype(np.float32)) + else: + w_in = self.input_scaling * np.random.choice(np.append([0], self.input_set), + (self.output_dim, self.input_dim), + p=np.append([1.0 - self.sparsity], + [self.sparsity / len(self.input_set)] * len( + self.input_set))) + w_in = torch.from_numpy(w_in.astype(np.float32)) + # end if + else: + if callable(w_in): + w_in = w_in(self.output_dim, self.input_dim) + # end if + # end if + + return Variable(w_in, requires_grad=False) + # end _generate_win + + # Generate Wbias matrix + def _generate_wbias(self, w_bias): + """ + Generate Wbias matrix + :return: + """ + # Initialize bias matrix + if w_bias is None: + w_bias = self.bias_scaling * (torch.rand(1, self.output_dim) * 2.0 - 1.0) + else: + if callable((w_bias)): + w_bias = w_bias(self.output_dim) + # end if + # end if + + return Variable(w_bias, requires_grad=False) + # end _generate_wbias + + # Generate Wfdb matrix + def _generate_wfdb(self, w_fdb): + """ + Generate Wfdb matrix + :return: + """ + # Initialize feedbacks weight matrix + if w_fdb is None: + if self.wfdb_sparsity is None: + w_fdb = self.input_scaling * ( + np.random.randint(0, 2, (self.output_dim, self.feedbacks_dim)) * 2.0 - 1.0) + w_fdb = torch.from_numpy(w_fdb.astype(np.float32)) + else: + w_fdb = self.input_scaling * np.random.choice(np.append([0], self.input_set), + (self.output_dim, self.feedbacks_dim), + p=np.append([1.0 - self.wfdb_sparsity], + [self.wfdb_sparsity / len( + self.input_set)] * len( + self.input_set))) + w_fdb = torch.from_numpy(w_fdb.astype(np.float32)) + # end if + else: + if callable(w_fdb): + w_fdb = w_fdb(self.output_dim, self.feedbacks_dim) + # end if + # end if + + return Variable(w_fdb, requires_grad=False) + # end _generate_wfdb + + ############################################ + # STATIC + ############################################ + + # Generate W matrix + @staticmethod + def generate_w(output_dim, w_sparsity=None): + """ + Generate W matrix + :param output_dim: + :param w_sparsity: + :return: + """ + # Sparsity + if w_sparsity is None: + w = torch.rand(output_dim, output_dim) * 2.0 - 1.0 + else: + w = np.random.choice([0.0, 1.0], (output_dim, output_dim), + p=[1.0 - w_sparsity, w_sparsity]) + w[w == 1] = np.random.rand(len(w[w == 1])) * 2.0 - 1.0 + w = torch.from_numpy(w.astype(np.float32)) + + # Return + return w + # end if + return w + # end generate_w + + # To sparse matrix + @staticmethod + def to_sparse(m): + """ + To sparse matrix + :param m: + :return: + """ + # Rows, columns and values + rows = torch.LongTensor() + columns = torch.LongTensor() + values = torch.FloatTensor() + + # For each row + for i in range(m.shape[0]): + # For each column + for j in range(m.shape[1]): + if m[i, j] != 0.0: + rows = torch.cat((rows, torch.LongTensor([i])), dim=0) + columns = torch.cat((columns, torch.LongTensor([j])), dim=0) + values = torch.cat((values, torch.FloatTensor([m[i, j]])), dim=0) + # end if + # end for + # end for + + # Indices + indices = torch.cat((rows.unsqueeze(0), columns.unsqueeze(0)), dim=0) + + # To sparse + return torch.sparse.FloatTensor(indices, values) + # end to_sparse + +# end ESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/GatedESN.py b/ESN/EchoTorch-master/echotorch/nn/GatedESN.py new file mode 100644 index 0000000..e944b14 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/GatedESN.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch +import torch.nn as nn +import torch.nn.functional as F +from .LiESNCell import LiESNCell +from sklearn.decomposition import IncrementalPCA +from .PCACell import PCACell +import matplotlib.pyplot as plt +from torch.autograd import Variable + + +# Gated Echo State Network +class GatedESN(nn.Module): + """ + Gated Echo State Network + """ + + # Constructor + def __init__(self, input_dim, reservoir_dim, pca_dim, hidden_dim, leaky_rate=1.0, spectral_radius=0.9, + bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, + input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, + create_cell=True): + """ + Constructor + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param reservoir_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internal weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + :param learning_algo: Which learning algorithm to use (inv, LU, grad) + """ + super(GatedESN, self).__init__() + + # Properties + self.reservoir_dim = reservoir_dim + self.pca_dim = pca_dim + self.hidden_dim = hidden_dim + self.finalized = False + + # Recurrent layer + if create_cell: + self.esn_cell = LiESNCell( + input_dim=input_dim, output_dim=reservoir_dim, spectral_radius=spectral_radius, bias_scaling=bias_scaling, + input_scaling=input_scaling, w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set, + w_sparsity=w_sparsity, nonlin_func=nonlin_func, leaky_rate=leaky_rate + ) + # end if + + # PCA + if self.pca_dim > 0: + self.pca_cell = PCACell(input_dim=reservoir_dim, output_dim=pca_dim) + # end if + + # Initialize input update weights + self.register_parameter('wzp', nn.Parameter(self.init_wzp())) + + # Initialize hidden update weights + self.register_parameter('wzh', nn.Parameter(self.init_wzh())) + + # Initialize update bias + self.register_parameter('bz', nn.Parameter(self.init_bz())) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn_cell.hidden + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn_cell.w + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn_cell.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Init hidden vector + def init_hidden(self): + """ + Init hidden layer + :return: Initiated hidden layer + """ + return Variable(torch.zeros(self.hidden_dim), requires_grad=False) + # end init_hidden + + # Init update vector + def init_update(self): + """ + Init hidden layer + :return: Initiated hidden layer + """ + return self.init_hidden() + # end init_hidden + + # Init update-reduced matrix + def init_wzp(self): + """ + Init update-reduced matrix + :return: Initiated update-reduced matrix + """ + return torch.rand(self.pca_dim, self.hidden_dim) + # end init_hidden + + # Init update-hidden matrix + def init_wzh(self): + """ + Init update-hidden matrix + :return: Initiated update-hidden matrix + """ + return torch.rand(self.pca_dim, self.hidden_dim) + # end init_hidden + + # Init update bias + def init_bz(self): + """ + Init update bias + :return: + """ + return torch.rand(self.hidden_dim) + # end init_bz + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Reset PCA layer + self.pca_cell.reset() + + # Reset reservoir + self.reset_reservoir() + + # Training mode again + self.train(True) + # end reset + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :return: Output or hidden states + """ + # Time length + time_length = int(u.size()[1]) + + # Number of batches + n_batches = int(u.size()[0]) + + # Compute reservoir states + reservoir_states = self.esn_cell(u) + reservoir_states.required_grad = False + + # Reduce + if self.pca_dim > 0: + # Reduce states + pca_states = self.pca_cell(reservoir_states) + pca_states.required_grad = False + + # Stop here if we learn PCA + if self.finalized: + return + # end if + + # Hidden states + hidden_states = Variable(torch.zeros(n_batches, time_length, self.hidden_dim)) + hidden_states = hidden_states.cuda() if pca_states.is_cuda else hidden_states + else: + # Hidden states + hidden_states = Variable(torch.zeros(n_batches, time_length, self.hidden_dim)) + hidden_states = hidden_states.cuda() if reservoir_states.is_cuda else hidden_states + # end if + + # For each batch + for b in range(n_batches): + # Reset hidden layer + hidden = self.init_hidden() + + # TO CUDA + if u.is_cuda: + hidden = hidden.cuda() + # end if + + # For each steps + for t in range(time_length): + # Current reduced state + if self.pca_dim > 0: + pt = pca_states[b, t] + else: + pt = reservoir_states[b, t] + # end if + + # Compute update vector + zt = F.sigmoid(self.wzp.mv(pt) + self.wzh.mv(hidden) + self.bz) + + # Compute hidden state + ht = (1.0 - zt) * hidden + zt * pt + + # Add to outputs + hidden = ht.view(self.hidden_dim) + + # New last state + hidden_states[b, t] = hidden + # end for + # end for + + # Return hidden states + return hidden_states + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.pca_cell.finalize() + + # Finalized + self.finalized = True + # end finalize + + # Reset reservoir layer + def reset_reservoir(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_reservoir + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.hidden.fill_(0.0) + # end reset_hidden + +# end GatedESN diff --git a/ESN/EchoTorch-master/echotorch/nn/HESN.py b/ESN/EchoTorch-master/echotorch/nn/HESN.py new file mode 100644 index 0000000..4584899 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/HESN.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/HESN.py +# Description : ESN with input pre-trained and used with transfer learning. +# Date : 22 March, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +import torch +import torch.sparse +import torch.nn as nn +from .LiESN import LiESN + + +# ESN with input pre-trained and used with transfer learning +class HESN(object): + """ + ESN with input pre-trained and used with transfer learning + """ + + # Constructor + def __init__(self, model, input_dim, hidden_dim, output_dim, spectral_radius=0.9, + bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, + input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, + leaky_rate=1.0, train_leaky_rate=False, feedbacks=False, wfdb_sparsity=None, + normalize_feedbacks=False): + # Embedding layer + self.mode = model + + # Li-ESN + self.esn = LiESN(input_dim, hidden_dim, output_dim, spectral_radius, bias_scaling, input_scaling, + w, w_in, w_bias, sparsity, input_set, w_sparsity, nonlin_func, learning_algo, ridge_param, + leaky_rate, train_leaky_rate, feedbacks, wfdb_sparsity, normalize_feedbacks) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + return self.esn.hidden + + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + return self.esn.w + + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + return self.esn.w_in + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Forward + def forward(self, u, y=None): + """ + Forward + :param x: + :return: + """ + # Selected features + selected_features = self.model(u) + + # ESN + return self.esn(selected_features, y) + # end forward + +# end HESN diff --git a/ESN/EchoTorch-master/echotorch/nn/ICACell.py b/ESN/EchoTorch-master/echotorch/nn/ICACell.py new file mode 100644 index 0000000..b1f7370 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/ICACell.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable + + +# Independent Component Analysis layer +class ICACell(nn.Module): + """ + Principal Component Analysis layer. It can be used to handle different batch-mode algorithm for ICA. + """ + + # Constructor + def __init__(self, input_dim, output_dim): + """ + Constructor + :param input_dim: Inputs dimension. + :param output_dim: Reservoir size + """ + super(ICACell, self).__init__() + pass + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Training mode again + self.train(True) + # end reset + + # Forward + def forward(self, x, y=None): + """ + Forward + :param x: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Batch size + batch_size = x.size()[0] + + # Time length + time_length = x.size()[1] + + # Add bias + if self.with_bias: + x = self._add_constant(x) + # end if + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization or Pseudo-inverse + """ + pass + # end finalize + + ############################################### + # PRIVATE + ############################################### + + # Add constant + def _add_constant(self, x): + """ + Add constant + :param x: + :return: + """ + bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False) + return torch.cat((bias, x), dim=2) + # end _add_constant + +# end ICACell diff --git a/ESN/EchoTorch-master/echotorch/nn/Identity.py b/ESN/EchoTorch-master/echotorch/nn/Identity.py new file mode 100644 index 0000000..ff41e76 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/Identity.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/Identity.py +# Description : An Leaky-Integrated Echo State Network layer. +# Date : 09th of April, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +import torch +import torch.nn as nn +from torch.autograd import Variable + + +# Identity layer +class Identity(nn.Module): + """ + Identity layer + """ + + # Forward + def forward(self, x): + """ + Forward + :return: + """ + return x + # end forward + +# end Identity diff --git a/ESN/EchoTorch-master/echotorch/nn/LiESN.py b/ESN/EchoTorch-master/echotorch/nn/LiESN.py new file mode 100644 index 0000000..8530d04 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/LiESN.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +import torch +from .LiESNCell import LiESNCell +from .ESN import ESN + + +# Leaky-Integrated Echo State Network module +class LiESN(ESN): + """ + Leaky-Integrated Echo State Network module + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, output_dim, spectral_radius=0.9, + bias_scaling=0, input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, + input_set=[1.0, -1.0], w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, + leaky_rate=1.0, train_leaky_rate=False, feedbacks=False, wfdb_sparsity=None, + normalize_feedbacks=False): + """ + Constructor + :param input_dim: + :param hidden_dim: + :param output_dim: + :param spectral_radius: + :param bias_scaling: + :param input_scaling: + :param w: + :param w_in: + :param w_bias: + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: + :param learning_algo: + :param ridge_param: + :param leaky_rate: + :param train_leaky_rate: + :param feedbacks: + """ + super(LiESN, self).__init__(input_dim, hidden_dim, output_dim, spectral_radius=spectral_radius, + bias_scaling=bias_scaling, input_scaling=input_scaling, + w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set, + w_sparsity=w_sparsity, nonlin_func=nonlin_func, learning_algo=learning_algo, + ridge_param=ridge_param, create_cell=False, feedbacks=feedbacks, + wfdb_sparsity=wfdb_sparsity, normalize_feedbacks=normalize_feedbacks) + + # Recurrent layer + self.esn_cell = LiESNCell(leaky_rate, train_leaky_rate, input_dim, hidden_dim, spectral_radius=spectral_radius, + bias_scaling=bias_scaling, input_scaling=input_scaling, + w=w, w_in=w_in, w_bias=w_bias, sparsity=sparsity, input_set=input_set, + w_sparsity=w_sparsity, nonlin_func=nonlin_func, feedbacks=feedbacks, + feedbacks_dim=output_dim, wfdb_sparsity=wfdb_sparsity, + normalize_feedbacks=normalize_feedbacks) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + ############################################### + # PUBLIC + ############################################### + + ############################################### + # PRIVATE + ############################################### + +# end ESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/LiESNCell.py b/ESN/EchoTorch-master/echotorch/nn/LiESNCell.py new file mode 100644 index 0000000..2644396 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/LiESNCell.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/LiESNCell.py +# Description : An Leaky-Integrated Echo State Network layer. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +import torch +import torch.sparse +import torch.nn as nn +from torch.autograd import Variable +from .ESNCell import ESNCell +import matplotlib.pyplot as plt + + +# Leak-Integrated Echo State Network layer +class LiESNCell(ESNCell): + """ + Leaky-Integrated Echo State Network layer + """ + + # Constructor + def __init__(self, leaky_rate=1.0, train_leaky_rate=False, *args, **kwargs): + """ + Constructor + :param leaky_rate: Reservoir's leaky rate (default 1.0, normal ESN) + :param train_leaky_rate: Train leaky rate as parameter? (default: False) + """ + super(LiESNCell, self).__init__(*args, **kwargs) + + # Params + if train_leaky_rate: + self.leaky_rate = nn.Parameter(torch.Tensor(1).fill_(leaky_rate), requires_grad=True) + else: + # Initialize bias + self.register_buffer('leaky_rate', Variable(torch.Tensor(1).fill_(leaky_rate), requires_grad=False)) + # end if + # end __init__ + + ############################################### + # PUBLIC + ############################################### + + # Forward + def forward(self, u, y=None, w_out=None): + """ + Forward + :param u: Input signal. + :return: Resulting hidden states. + """ + # Time length + time_length = int(u.size()[1]) + + # Number of batches + n_batches = int(u.size()[0]) + + # Outputs + outputs = Variable(torch.zeros(n_batches, time_length, self.output_dim)) + outputs = outputs.cuda() if self.hidden.is_cuda else outputs + + # For each batch + for b in range(n_batches): + # Reset hidden layer + self.reset_hidden() + + # For each steps + for t in range(time_length): + # Current input + ut = u[b, t] + + # Compute input layer + u_win = self.w_in.mv(ut) + + # Apply W to x + x_w = self.w.mv(self.hidden) + + # Feedback or not + if self.feedbacks and self.training and y is not None: + # Current target + yt = y[b, t] + + # Compute feedback layer + y_wfdb = self.w_fdb.mv(yt) + + # Add everything + x = u_win + x_w + y_wfdb + self.w_bias + # x = u_win + x_w + self.w_bias + elif self.feedbacks and not self.training and w_out is not None: + # Add bias + bias_hidden = torch.cat((Variable(torch.ones(1)), self.hidden), dim=0) + + # Compute past output + yt = w_out.t().mv(bias_hidden) + + # Normalize + if self.normalize_feedbacks: + yt -= torch.min(yt) + yt /= torch.max(yt) - torch.min(yt) + yt /= torch.sum(yt) + # end if + + # Compute feedback layer + y_wfdb = self.w_fdb.mv(yt) + + # Add everything + x = u_win + x_w + y_wfdb + self.w_bias + # x = u_win + x_w + self.w_bias + else: + # Add everything + x = u_win + x_w + self.w_bias + # end if + + # Apply activation function + x = self.nonlin_func(x) + + # Add to outputs + self.hidden.data = (self.hidden.mul(1.0 - self.leaky_rate) + x.view(self.output_dim).mul(self.leaky_rate)).data + + # New last state + outputs[b, t] = self.hidden + # end for + # end for + + return outputs + # end forward + + ############################################### + # PRIVATE + ############################################### + +# end LiESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/OnlinePCACell.py b/ESN/EchoTorch-master/echotorch/nn/OnlinePCACell.py new file mode 100644 index 0000000..b3e1fd3 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/OnlinePCACell.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable + + +# Online PCA cell +# We extract the principal components from the input data incrementally. +class OnlinePCACell(nn.Module): + """ + Online PCA cell + We extract the principal components from the input data incrementally. + Weng J., Zhang Y. and Hwang W., + Candid covariance-free incremental principal component analysis, + IEEE Trans. Pattern Analysis and Machine Intelligence, + vol. 25, 1034--1040, 2003. + """ + + # Constructor + def __init__(self, input_dim, output_dim, amn_params=(20, 200, 2000, 3), init_eigen_vectors=None, var_rel=1, numx_rng=None): + """ + Constructor + :param input_dim: + :param output_dim: + :param amn_params: + :param init_eigen_vectors: + :param var_rel: + :param numx_rng: + """ + # Super call + super(OnlinePCACell, self).__init__() + + # Properties + self.input_dim = input_dim + self.output_dim = output_dim + self.amn_params = amn_params + self._init_v = init_eigen_vectors + self.var_rel = var_rel + self._train_iteration = 0 + self._training_type = None + + # (Internal) eigenvectors + self._v = None + self.v = None + self.d = None + + # Total and reduced + self._var_tot = 1.0 + self._reduced_dims = self.output_dim + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Initial eigen vectors + @property + def init_eigen_vectors(self): + """ + Initial eigen vectors + :return: + """ + return self._init_v + # end init_eigen_vectors + + # Set initial eigen vectors + @init_eigen_vectors.setter + def init_eigen_vectors(self, init_eigen_vectors=None): + """ + Set initial eigen vectors + :param init_eigen_vectors: + :return: + """ + self._init_v = init_eigen_vectors + + # Set input dim + if self._input_dim is None: + self._input_dim = self._init_v.shape[0] + else: + # Check input dim + assert( + self.input_dim == self._init_v.shape[0]), \ + Exception(u"Dimension mismatch. init_eigen_vectors shape[0] must be {}, given {}".format( + self.input_dim, + self._init_v.shape[0] + ) + ) + # end if + + # Set output dim + if self._output_dim is None: + self._output_dim = self._init_v.shape[1] + else: + # Check output dim + assert( + self.output_dim == self._init_v.shape[1], + Exception(u"Dimension mismatch, init_eigen_vectors shape[1] must be {}, given {}".format( + self.output_dim, + self._init_v.shape[1]) + ) + ) + # end if + + # Set V + if self.v is None: + self._v = self._init_v.copy() + self.d = torch.norm(self._v, p=2, dim=0) + self.v = self._v / self.d + # end if + # end init_eigen_vectors + + ############################################### + # PUBLIC + ############################################### + + # Get variance explained by PCA + def get_var_tot(self): + """ + Get variance explained by PCA + :return: + """ + return self._var_tot + # end get_var_tot + + # Get reducible dimensionality based on the set thresholds + def get_reduced_dimensionality(self): + """ + Return reducible dimensionality based on the set thresholds. + :return: + """ + return self._reduced_dims + # end get_reduced_dimensionality + + # Get projection matrix + def get_projmatrix(self, transposed=1): + """ + Get projection matrix + :param transposed: + :return: + """ + if transposed: + return self.v + # end if + return self.v.t() + # end get_projmatrix + + # Get back-projection matrix (reconstruction matrix) + def get_recmatrix(self, transposed=1): + """ + Get reconstruction matrix + :param transposed: + :return: + """ + if transposed: + return self.v.t() + # end if + return self.v + # end get_recmatrix + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Training mode again + self.train(True) + # end reset + + # Forward + def forward(self, x, y=None): + """ + Forward + :param x: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Update components + self._update_pca(x) + + # Execute + return self._execute(x) + # end forward + + ############################################### + # PRIVATE + ############################################### + + # Project the input on the first 'n' components + def _execute(self, x, n=None): + """ + Project the input on the first 'n' components + :param x: + :param n: + :return: + """ + if n is not None: + return x.mm(self.v[:, :n]) + # end if + return x.mm(self.v) + # end _execute + + # Update the principal components. + def _update_pca(self, x): + """ + Update the principal components + :param x: + :return: + """ + # Params + [w1, w2] = self._amnesic(self.get_current_train_iteration() + 1) + red_j = self.output_dim + red_j_flag = False + explained_var = 0.0 + + # For each output + r = x + for j in range(self.output_dim): + v = self._v[:, j:j + 1] + d = self.d[j] + + v = w1 * v + w2 * r.mv(v) / d * r.t() + d = torch.norm(v) + vn = v / d + r = r - r.mv(vn) * vn.t() + explained_var += d + + # Red flag + if not red_j_flag: + ratio = explained_var / self._var_tot + if ratio > self.var_rel: + red_j = j + red_j_flag = True + # end if + # end if + + self._v[:, j:j + 1] = v + self.v[:, j:j + 1] = vn + self.d[j] = d + # end for + + self._var_tot = explained_var + self._reduced_dims = red_j + # end update_pca + + # Initialize parameters + def _check_params(self, *args): + """ + Initialize parameters + :param args: + :return: + """ + if self._init_v is None: + if self.output_dim is not None: + self.init_eigen_vectors = 0.1 * torch.randn(self.input_dim, self.output_dim) + else: + self.init_eigen_vectors = 0.1 * torch.randn(self.input_dim, self.input_dim) + # end if + # end if + # end _check_params + + # Return amnesic weights + def _amnesic(self, n): + """ + Return amnesic weights + :param n: + :return: + """ + _i = float(n + 1) + n1, n2, m, c = self.amn_params + if _i < n1: + l = 0 + elif (_i >= n1) and (_i < n2): + l = c * (_i - n1) / (n2 - n1) + else: + l = c + (_i - n2) / m + # end if + _world = float(_i - 1 - l) / _i + _wnew = float(1 + l) / _i + return [_world, _wnew] + # end _amnesic + + # Add constant + def _add_constant(self, x): + """ + Add constant + :param x: + :return: + """ + bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False) + return torch.cat((bias, x), dim=2) + # end _add_constant + +# end PCACell diff --git a/ESN/EchoTorch-master/echotorch/nn/PCACell.py b/ESN/EchoTorch-master/echotorch/nn/PCACell.py new file mode 100644 index 0000000..a25d5f9 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/PCACell.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable + + +# Filter the input data through the most significatives principal components. +class PCACell(nn.Module): + """ + Filter the input data through the most significatives principal components + """ + + # Constructor + def __init__(self, input_dim, output_dim, svd=False, reduce=False, var_rel=1E-12, var_abs=1E-15, var_part=None): + """ + Constructor + :param input_dim: + :param output_dim: + :param svd: If True use Singular Value Decomposition instead of the standard eigenvalue problem solver. Use it when PCANode complains about singular covariance matrices. + :param reduce: Keep only those principal components which have a variance larger than 'var_abs' + :param val_rel: Variance relative to first principal component threshold. Default is 1E-12. + :param var_abs: Absolute variance threshold. Default is 1E-15. + :param var_part: Variance relative to total variance threshold. Default is None. + """ + # Super + super(PCACell, self).__init__() + + # Properties + self.input_dim = input_dim + self.output_dim = output_dim + self.svd = svd + self.var_abs = var_abs + self.var_rel = var_rel + self.var_part = var_part + self.reduce = reduce + + # Set it as buffer + self.register_buffer('xTx', Variable(torch.zeros(input_dim, input_dim), requires_grad=False)) + self.register_buffer('xTx_avg', Variable(torch.zeros(input_dim), requires_grad=False)) + + # Eigen values + self.d = None + + # Eigen vectors, first index for coordinates + self.v = None + + # Total variance + self.total_variance = None + + # Len, average and explained variance + self.tlen = 0 + self.avg = None + self.explained_variance = None + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Initialize the covariance matrix one for + # the input data. + self._init_internals() + + # Training mode again + self.train(True) + # end reset + + # Forward + def forward(self, x, y=None): + """ + Forward + :param x: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Number of batches + n_batches = int(x.size()[0]) + + # Time length + time_length = x.size()[1] + + # Outputs + outputs = Variable(torch.zeros(n_batches, time_length, self.output_dim)) + outputs = outputs.cuda() if x.is_cuda else outputs + + # For each batch + for b in range(n_batches): + # Sample + s = x[b] + + # Train or execute + if self.training: + self._update_cov_matrix(s) + else: + outputs[b] = self._execute_pca(s) + # end if + # end for + + return outputs + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization or Pseudo-inverse + """ + # Reshape average + xTx, avg, tlen = self._fix(self.xTx, self.xTx_avg, self.tlen) + + # Reshape + self.avg = avg.unsqueeze(0) + + # We need more observations than variables + if self.tlen < self.input_dim: + raise Exception(u"The number of observations ({}) is larger than the number of input variables ({})".format(self.tlen, self.input_dim)) + # end if + + # Total variance + total_var = torch.diag(xTx).sum() + + # Compute and sort eigenvalues + d, v = torch.symeig(xTx, eigenvectors=True) + + # Check for negative eigenvalues + if float(d.min()) < 0: + raise Exception(u"Got negative eigenvalues ({}). You may either set output_dim to be smaller".format(d)) + # end if + + # Indexes + indexes = range(d.size(0)-1, -1, -1) + + # Sort by descending order + d = torch.take(d, Variable(torch.LongTensor(indexes))) + v = v[:, indexes] + + # Explained covariance + self.explained_variance = torch.sum(d) / total_var + + # Store eigenvalues + self.d = d[:self.output_dim] + + # Store eigenvectors + self.v = v[:, :self.output_dim] + + # Total variance + self.total_variance = total_var + + # Stop training + self.train(False) + # end finalize + + # Get explained variance + def get_explained_variance(self): + """ + The explained variance is the fraction of the original variance that can be explained by the + principal components. + :return: + """ + return self.explained_variance + # end get_explained_variance + + # Get the projection matrix + def get_proj_matrix(self, tranposed=True): + """ + Get the projection matrix + :param tranposed: + :return: + """ + # Stop training + self.train(False) + + # Transposed + if tranposed: + return self.v + # end if + return self.v.t() + # end get_proj_matrix + + # Get the reconstruction matrix + def get_rec_matrix(self, tranposed=1): + """ + Returns the reconstruction matrix + :param tranposed: + :return: + """ + # Stop training + self.train(False) + + # Transposed + if tranposed: + return self.v.t() + # end if + return self.v + # end get_rec_matrix + + ############################################### + # PRIVATE + ############################################### + + # Project the input on the first 'n' principal components + def _execute_pca(self, x, n=None): + """ + Project the input on the first 'n' principal components + :param x: + :param n: + :return: + """ + if n is not None: + return (x - self.avg).mm(self.v[:, :n]) + # end if + return (x - self.avg).mm(self.v) + # end _execute + + # Project data from the output to the input space using the first 'n' components. + def _inverse(self, y, n=None): + """ + Project data from the output to the input space using the first 'n' components. + :param y: + :param n: + :return: + """ + if n is None: + n = y.shape[1] + # end if + + if n > self.output_dim: + raise Exception(u"y has dimension {} but should but at most {}".format(n, self.output_dim)) + # end if + + # Get reconstruction matrix + v = self.get_rec_matrix() + + # Reconstruct + if n is not None: + return y.mm(v[:n, :]) + self.avg + else: + return y.mm(v) + self.avg + # end if + # end _inverse + + # Adjust output dim + def _adjust_output_dim(self): + """ + If the output dimensions is small than the input dimension + :return: + """ + # If the number of PC is not specified, keep all + if self.desired_variance is None and self.ouput_dim is None: + self.output_dim = self.input_dim + return None + # end if + + # Define the range of eigenvalues to compute if the number of PC to keep + # has been specified directly. + if self.output_dim is not None and self.output_dim >= 1: + return (self.input_dim - self.output_dim + 1, self.input_dim) + else: + return None + # end if + # end _adjust_output_dim + + # Fix covariance matrix + def _fix(self, mtx, avg, tlen, center=True): + """ + Returns a triple containing the covariance matrix, the average and + the number of observations. + :param mtx: + :param center: + :return: + """ + mtx /= tlen - 1 + + # Substract the mean + if center: + avg_mtx = torch.ger(avg, avg) + avg_mtx /= tlen * (tlen - 1) + mtx -= avg_mtx + # end if + + # Fix the average + avg /= tlen + + return mtx, avg, tlen + # end fix + + # Update covariance matrix + def _update_cov_matrix(self, x): + """ + Update covariance matrix + :param x: + :return: + """ + # Init + if self.xTx is None: + self._init_internals() + # end if + + # Update + self.xTx.data.add_(x.t().mm(x).data) + self.xTx_avg.add_(torch.sum(x, dim=0)) + self.tlen += x.size(0) + # end _update_cov_matrix + + # Initialize covariance + def _init_cov_matrix(self): + """ + Initialize covariance matrix + :return: + """ + self.xTx.data = torch.zeros(self.input_dim, self.input_dim) + self.xTx_avg.data = torch.zeros(self.input_dim) + # end _init_cov_matrix + + # Initialize internals + def _init_internals(self): + """ + Initialize internals + :param x: + :return: + """ + # Init covariance matrix + self._init_cov_matrix() + # end _init_internals + + # Add constant + def _add_constant(self, x): + """ + Add constant + :param x: + :return: + """ + bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False) + return torch.cat((bias, x), dim=2) + # end _add_constant + +# end PCACell diff --git a/ESN/EchoTorch-master/echotorch/nn/RRCell.py b/ESN/EchoTorch-master/echotorch/nn/RRCell.py new file mode 100644 index 0000000..e43f92f --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/RRCell.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +from torch.autograd import Variable + + +# Ridge Regression cell +class RRCell(nn.Module): + """ + Ridge Regression cell + """ + + # Constructor + def __init__(self, input_dim, output_dim, ridge_param=0.0, feedbacks=False, with_bias=True, learning_algo='inv'): + """ + Constructor + :param input_dim: Inputs dimension. + :param output_dim: Reservoir size + """ + super(RRCell, self).__init__() + + # Properties + self.input_dim = input_dim + self.output_dim = output_dim + self.ridge_param = ridge_param + self.feedbacks = feedbacks + self.with_bias = with_bias + self.learning_algo = learning_algo + + # Size + if self.with_bias: + self.x_size = input_dim + 1 + else: + self.x_size = input_dim + # end if + + # Set it as buffer + self.register_buffer('xTx', Variable(torch.zeros(self.x_size, self.x_size), requires_grad=False)) + self.register_buffer('xTy', Variable(torch.zeros(self.x_size, output_dim), requires_grad=False)) + self.register_buffer('w_out', Variable(torch.zeros(1, input_dim), requires_grad=False)) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + """self.xTx.data = torch.zeros(self.x_size, self.x_size) + self.xTy.data = torch.zeros(self.x_size, self.output_dim) + self.w_out.data = torch.zeros(1, self.input_dim)""" + self.xTx.data.fill_(0.0) + self.xTy.data.fill_(0.0) + self.w_out.data.fill_(0.0) + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.w_out + # end get_w_out + + # Forward + def forward(self, x, y=None): + """ + Forward + :param x: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Batch size + batch_size = x.size()[0] + + # Time length + time_length = x.size()[1] + + # Add bias + if self.with_bias: + x = self._add_constant(x) + # end if + + # Learning algo + if self.training: + for b in range(batch_size): + self.xTx.data.add_(x[b].t().mm(x[b]).data) + self.xTy.data.add_(x[b].t().mm(y[b]).data) + # end for + return x + elif not self.training: + # Outputs + outputs = Variable(torch.zeros(batch_size, time_length, self.output_dim), requires_grad=False) + outputs = outputs.cuda() if self.w_out.is_cuda else outputs + + # For each batch + for b in range(batch_size): + outputs[b] = torch.mm(x[b], self.w_out) + # end for + + return outputs + # end if + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization or Pseudo-inverse + """ + if self.learning_algo == 'inv': + inv_xTx = self.xTx.inverse() + # inv_xTx = torch.inverse(self.xTx + self.ridge_param * torch.eye(self._input_dim + self.with_bias)) + self.w_out.data = torch.mm(inv_xTx, self.xTy).data + else: + self.w_out.data = torch.gesv(self.xTy, self.xTx + torch.eye(self.esn_cell.output_dim).mul(self.ridge_param)).data + # end if + + # Not in training mode anymore + self.train(False) + # end finalize + + ############################################### + # PRIVATE + ############################################### + + # Add constant + def _add_constant(self, x): + """ + Add constant + :param x: + :return: + """ + if x.is_cuda: + bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)).cuda(), requires_grad=False) + else: + bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False) + # end if + return torch.cat((bias, x), dim=2) + # end _add_constant + +# end RRCell diff --git a/ESN/EchoTorch-master/echotorch/nn/SFACell.py b/ESN/EchoTorch-master/echotorch/nn/SFACell.py new file mode 100644 index 0000000..c018b55 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/SFACell.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +import numpy as np +from past.utils import old_div + + +# Slow Feature Analysis layer +class SFACell(nn.Module): + """ + Extract the slowly varying components from input data. + """ + + # Type keys + _type_keys = ['f', 'd', 'F', 'D'] + + # Type conv + _type_conv = {('f', 'd'): 'd', ('f', 'F'): 'F', ('f', 'D'): 'D', + ('d', 'F'): 'D', ('d', 'D'): 'D', + ('F', 'd'): 'D', ('F', 'D'): 'D'} + + # Constructor + def __init__(self, input_dim, output_dim, include_last_sample=True, rank_deficit_method='none', use_bias=True): + """ + Constructor + :param input_dim: Input dimension + :param output_dim: Number of slow feature + :param include_last_sample: If set to False, the training method discards the last sample in every chunk during training when calculating the matrix. + :param rank_deficit_method: 'none', 'reg', 'pca', 'svd', 'auto'. + """ + super(SFACell, self).__init__() + self.include_last_sample = include_last_sample + self.use_bias = use_bias + self.input_dim = input_dim + self.output_dim = output_dim + + # Initialie the two covariance matrices one for + # the input data, and the other for the derivatives. + self.xTx = torch.zeros(input_dim, input_dim) + self.xTx_avg = torch.zeros(input_dim) + self.dxTdx = torch.zeros(input_dim, input_dim) + self.dxTdx_avg = torch.zeros(input_dim) + + # Set routine for eigenproblem + self.set_rank_deficit_method(rank_deficit_method) + self.rank_threshold = 1e-12 + self.rank_deficit = 0 + + # Will be set after training + self.d = None + self.sf = None + self.avg = None + self.bias = None + self.tlen = None + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + ############################################### + # PUBLIC + ############################################### + + # Time derivative + def time_derivative(self, x): + """ + Compute the approximation of time derivative + :param x: + :return: + """ + return x[1:, :] - x[:-1, :] + # end time_derivative + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + # Training mode again + self.train(True) + # end reset + + # Forward + def forward(self, x): + """ + Forward + :param x: Input signal. + :return: Output or hidden states + """ + # For each batch + for b in np.arange(0, x.size(0)): + # If training or execution + if self.training: + # Last sample + last_sample_index = None if self.include_last_sample else -1 + + # Sample and derivative + xs = x[b, :last_sample_index, :] + xd = self.time_derivative(x[b]) + + # Update covariance matrix + self.xTx.data.add(xs.t().mm(xs)) + self.dxTdx.data.add(xd.t().mm(xd)) + + # Update average + self.xTx_avg += torch.sum(xs, axis=1) + self.dxTdx_avg += torch.sum(xd, axis=1) + + # Length + self.tlen += x.size(0) + else: + x[b].mv(self.sf) - self.bias + # end if + # end if + return x + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization or Pseudo-inverse + """ + # Covariance + self.xTx, self.xTx_avg, self.tlen = self._fix(self.xtX, self.xTx_avg, self.tlen, center=True) + self.dxTdx, self.dxTdx_avg, self.tlen = self._fix(self.dxTdx, self.dxTdx_avg, self.tlen, center=False) + + # Range + rng = (1, self.output_dim) + + # Resolve system + self.d, self.sf = self._symeig( + self.dxTdx, self.xTx, rng + ) + d = self.d + + # We want only positive values + if torch.min(d) < 0: + raise Exception(u"Got negative values in {}".format(d)) + # end if + + # Delete covariance matrix + del self.xTx + del self.dxTdx + + # Store bias + self.bias = self.xTx_avg * self.sf + # end finalize + + ############################################### + # PRIVATE + ############################################### + + # Solve standard and generalized eigenvalue problem for symmetric (hermitian) definite positive matrices + def _symeig(self, A, B, range, eigenvectors=True): + """ + Solve standard and generalized eigenvalue problem for symmetric (hermitian) definite positive matrices. + :param A: An N x N matrix + :param B: An N x N matrix + :param range: (lo, hi), the indexes of smallest and largest eigenvalues to be returned. + :param eigenvectors: Return eigenvalues and eigenvector or only engeivalues + :return: w, the eigenvalues and Z the eigenvectors + """ + # To numpy + A = A.numpy() + B = B.numpy() + + # Type + dtype = np.dtype() + + # Make B the identity matrix + wB, ZB = np.linalg.eigh(B) + + # Check eigenvalues + self._assert_eigenvalues_real(wB) + + # No negative values + if wB.real.min() < 0: + raise Exception(u"Got negative eigenvalues: {}".format(wB)) + # end if + + # Old division + ZB = old_div(ZB.real, np.sqrt(wB.real)) + + # A = ZB^T * A * ZB + A = np.matmul(np.matmul(ZB.T, A), ZB) + + # Diagonalize A + w, ZA = np.linalg.eigh(A) + Z = np.matmul(ZB, ZA) + + # Check eigenvalues + self._assert_eigenvalues_real(w, dtype) + + # Read + w = w.real + Z = Z.real + + # Sort + idx = w.argsort() + w = w.take(idx) + Z = Z.take(idx, axis=1) + + # Sanitize range + n = A.shape[0] + lo, hi = range + if lo < 1: + lo = 1 + # end if + if lo > n: + lo = n + # end if + if hi > n: + hi = n + # end if + if lo > hi: + lo, hi = hi, lo + # end if + + # Get values + Z = Z[:, lo-1:hi] + w = w[lo-1:hi] + + # Cast + w = self.refcast(w, dtype) + Z = self.refcast(Z, dtype) + + # Eigenvectors + if eigenvectors: + return torch.FloatTensor(w), torch.FloatTensor(Z) + else: + return torch.FloatTensor(w) + # end if + # end _symeig + + # Ref cast + def refcast(self, array, dtype): + """ + Cast the array to dtype only if necessary, otherwise return a reference. + """ + dtype = np.dtype(dtype) + if array.dtype == dtype: + return array + return array.astype(dtype) + # end refcast + + # Check eigenvalues + def _assert_eigenvalues_real(self, w, dtype): + """ + Check eigenvalues + :param w: + :param dtype: + :return: + """ + tol = np.finfo(dtype.type).eps * 100 + if abs(w.imag).max() > tol: + err = "Some eigenvalues have significant imaginary part: %s " % str(w) + raise Exception(err) + # end if + # end _assert_eigenvalues_real + + # Greatest common type + def _greatest_common_dtype(self, alist): + """ + Apply conversion rules to find the common conversion type + dtype 'd' is default for 'i' or unknown types + (known types: 'f','d','F','D'). + """ + dtype = 'f' + for array in alist: + if array is None: + continue + tc = array.dtype.char + if tc not in self._type_keys: + tc = 'd' + transition = (dtype, tc) + if transition in self._type_conv: + dtype = self._type_conv[transition] + return dtype + # end _greatest_common_dtype + + # Fix covariance matrix + def _fix(self, mtx, avg, tlen, center=True): + """ + Returns a triple containing the covariance matrix, the average and + the number of observations. + :param mtx: + :param center: + :return: + """ + if self.use_bias: + mtx /= tlen + else: + mtx /= tlen - 1 + # end if + + # Substract the mean + if center: + avg_mtx = np.outer(avg, avg) + if self.use_bias: + avg_mtx /= tlen * tlen + else: + avg_mtx /= tlen * (tlen - 1) + # end if + mtx -= avg_mtx + # end if + + # Fix the average + avg /= tlen + + return mtx, avg, tlen + # end fix + +# end SFACell diff --git a/ESN/EchoTorch-master/echotorch/nn/StackedESN.py b/ESN/EchoTorch-master/echotorch/nn/StackedESN.py new file mode 100644 index 0000000..f268fbc --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/StackedESN.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# +# File : echotorch/nn/ESN.py +# Description : An Echo State Network module. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti, University of Neuchâtel + +""" +Created on 26 January 2018 +@author: Nils Schaetti +""" + +# Imports +import torch.sparse +import torch +import torch.nn as nn +import echotorch.utils +from torch.autograd import Variable +from . import LiESNCell +from .RRCell import RRCell +from .ESNCell import ESNCell +import numpy as np + + +# Stacked Echo State Network module +class StackedESN(nn.Module): + """ + Stacked Echo State Network module + """ + + # Constructor + def __init__(self, input_dim, hidden_dim, output_dim, leaky_rate=1.0, spectral_radius=0.9, bias_scaling=0, + input_scaling=1.0, w=None, w_in=None, w_bias=None, sparsity=None, input_set=(1.0, -1.0), + w_sparsity=None, nonlin_func=torch.tanh, learning_algo='inv', ridge_param=0.0, with_bias=True): + """ + Constructor + + Arguments: + :param input_dim: Inputs dimension. + :param hidden_dim: Hidden layer dimension + :param output_dim: Reservoir size + :param spectral_radius: Reservoir's spectral radius + :param bias_scaling: Scaling of the bias, a constant input to each neuron (default: 0, no bias) + :param input_scaling: Scaling of the input weight matrix, default 1. + :param w: Internation weights matrix + :param w_in: Input-reservoir weights matrix + :param w_bias: Bias weights matrix + :param w_fdb: Feedback weights matrix + :param sparsity: + :param input_set: + :param w_sparsity: + :param nonlin_func: Reservoir's activation function (tanh, sig, relu) + :param learning_algo: Which learning algorithm to use (inv, LU, grad) + """ + super(StackedESN, self).__init__() + + # Properties + self.n_layers = len(hidden_dim) + self.esn_layers = list() + + # Number of features + self.n_features = 0 + + # Recurrent layer + for n in range(self.n_layers): + # Input dim + layer_input_dim = input_dim if n == 0 else hidden_dim[n-1] + + # Final state size + self.n_features += hidden_dim[n] + + # Parameters + layer_leaky_rate = leaky_rate[n] if type(leaky_rate) is list or type(leaky_rate) is np.ndarray else leaky_rate + layer_spectral_radius = spectral_radius[n] if type(spectral_radius) is list or type(spectral_radius) is np.ndarray else spectral_radius + layer_bias_scaling = bias_scaling[n] if type(bias_scaling) is list or type(bias_scaling) is np.ndarray else bias_scaling + layer_input_scaling = input_scaling[n] if type(input_scaling) is list or type(input_scaling) is np.ndarray else input_scaling + + # W + if type(w) is torch.Tensor and w.dim() == 3: + layer_w = w[n] + elif type(w) is torch.Tensor: + layer_w = w + else: + layer_w = None + # end if + + # W in + if type(w_in) is torch.Tensor and w_in.dim() == 3: + layer_w_in = w_in[n] + elif type(w_in) is torch.Tensor: + layer_w_in = w_in + else: + layer_w_in = None + # end if + + # W bias + if type(w_bias) is torch.Tensor and w_bias.dim() == 2: + layer_w_bias = w_bias[n] + elif type(w_bias) is torch.Tensor: + layer_w_bias = w_bias + else: + layer_w_bias = None + # end if + + # Parameters + layer_sparsity = sparsity[n] if type(sparsity) is list or type(sparsity) is np.ndarray else sparsity + layer_input_set = input_set[n] if type(input_set) is list or type(input_set) is np.ndarray else input_set + layer_w_sparsity = w_sparsity[n] if type(w_sparsity) is list or type(w_sparsity) is np.ndarray else w_sparsity + layer_nonlin_func = nonlin_func[n] if type(nonlin_func) is list or type(nonlin_func) is np.ndarray else nonlin_func + + # Create LiESN cell + self.esn_layers.append(LiESNCell( + layer_leaky_rate, False, layer_input_dim, hidden_dim[n], layer_spectral_radius, layer_bias_scaling, + layer_input_scaling, layer_w, layer_w_in, layer_w_bias, None, layer_sparsity, layer_input_set, + layer_w_sparsity, layer_nonlin_func + )) + # end for + + # Output layer + self.output = RRCell(self.n_features, output_dim, ridge_param, False, with_bias, learning_algo) + # end __init__ + + ############################################### + # PROPERTIES + ############################################### + + # Hidden layer + @property + def hidden(self): + """ + Hidden layer + :return: + """ + # Hidden states + hidden_states = list() + + # For each ESN + for esn_cell in self.esn_layers: + hidden_states.append(esn_cell.hidden) + # end for + + return hidden_states + # end hidden + + # Hidden weight matrix + @property + def w(self): + """ + Hidden weight matrix + :return: + """ + # W + w_mtx = list() + + # For each ESN + for esn_cell in self.esn_layers: + w_mtx.append(esn_cell.w) + # end for + + return w_mtx + # end w + + # Input matrix + @property + def w_in(self): + """ + Input matrix + :return: + """ + # W in + win_mtx = list() + + # For each ESN + for esn_cell in self.esn_layers: + win_mtx.append(esn_cell.w_in) + # end for + + return win_mtx + # end w_in + + ############################################### + # PUBLIC + ############################################### + + # Reset learning + def reset(self): + """ + Reset learning + :return: + """ + self.output.reset() + + # Training mode again + self.train(True) + # end reset + + # Output matrix + def get_w_out(self): + """ + Output matrix + :return: + """ + return self.output.w_out + # end get_w_out + + # Forward + def forward(self, u, y=None): + """ + Forward + :param u: Input signal. + :param y: Target outputs + :return: Output or hidden states + """ + # Hidden states + hidden_states = Variable(torch.zeros(u.size(0), u.size(1), self.n_features)) + + # Compute hidden states + pos = 0 + for index, esn_cell in enumerate(self.esn_layers): + layer_dim = esn_cell.output_dim + if index == 0: + last_hidden_states = esn_cell(u) + else: + last_hidden_states = esn_cell(last_hidden_states) + # end if + + # Update + hidden_states[:, :, pos:pos + layer_dim] = last_hidden_states + + # Next position + pos += layer_dim + # end for + + # Learning algo + return self.output(hidden_states, y) + # end forward + + # Finish training + def finalize(self): + """ + Finalize training with LU factorization + """ + # Finalize output training + self.output.finalize() + + # Not in training mode anymore + self.train(False) + # end finalize + + # Reset hidden layer + def reset_hidden(self): + """ + Reset hidden layer + :return: + """ + self.esn_cell.reset_hidden() + # end reset_hidden + + # Get W's spectral radius + def get_spectral_radius(self): + """ + Get W's spectral radius + :return: W's spectral radius + """ + return self.esn_cell.get_spectral_raduis() + # end spectral_radius + + ############################################ + # STATIC + ############################################ + + # Generate W matrices for a stacked ESN + @staticmethod + def generate_ws(n_layers, reservoir_size, w_sparsity): + """ + Generate W matrices for a stacked ESN + :param n_layers: + :param reservoir_size: + :param w_sparsity: + :return: + """ + ws = torch.FloatTensor(n_layers, reservoir_size, reservoir_size) + for i in range(n_layers): + ws[i] = ESNCell.generate_w(reservoir_size, w_sparsity) + # end for + return ws + # end for + +# end ESNCell diff --git a/ESN/EchoTorch-master/echotorch/nn/__init__.py b/ESN/EchoTorch-master/echotorch/nn/__init__.py new file mode 100644 index 0000000..b5b2f8c --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/nn/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from .BDESN import BDESN +from .BDESNPCA import BDESNPCA +from .BDESNCell import BDESNCell +from .ESNCell import ESNCell +from .ESN import ESN +from .LiESNCell import LiESNCell +from .LiESN import LiESN +from .GatedESN import GatedESN +from .ICACell import ICACell +from .Identity import Identity +from .PCACell import PCACell +from .RRCell import RRCell +from .SFACell import SFACell +from .StackedESN import StackedESN + +__all__ = [ + 'BDESN', 'BDESNPCA', 'BDESNCell', 'ESNCell', 'ESN', 'LiESNCell', 'LiESN', 'GatedESN', 'ICACell', 'Identity', + 'PCACell', 'RRCell', 'SFACell', 'StackedESN' +] diff --git a/ESN/EchoTorch-master/echotorch/transforms/__init__.py b/ESN/EchoTorch-master/echotorch/transforms/__init__.py new file mode 100644 index 0000000..010c46b --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import text + +__all__ = [ + 'text', 'images' +] diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Character.py b/ESN/EchoTorch-master/echotorch/transforms/text/Character.py new file mode 100644 index 0000000..77ac90a --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Character.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from .Transformer import Transformer + + +# Transform text to character vectors +class Character(Transformer): + """ + Transform text to character vectors + """ + + # Constructor + def __init__(self, uppercase=False, gram_to_ix=None, start_ix=0, fixed_length=-1): + """ + Constructor + """ + # Gram to ix + if gram_to_ix is not None: + self.gram_count = len(gram_to_ix.keys()) + self.gram_to_ix = gram_to_ix + else: + self.gram_count = start_ix + self.gram_to_ix = dict() + # end if + + # Ix to gram + self.ix_to_gram = dict() + if gram_to_ix is not None: + for gram in gram_to_ix.keys(): + self.ix_to_gram[gram_to_ix[gram]] = gram + # end for + # end if + + # Properties + self.uppercase = uppercase + self.fixed_length = fixed_length + + # Super constructor + super(Character, self).__init__() + # end __init__ + + ############################################## + # Public + ############################################## + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 1 + # end input_dim + + # Vocabulary size + @property + def voc_size(self): + """ + Vocabulary size + :return: + """ + return self.gram_count + # end voc_size + + ############################################## + # Private + ############################################## + + # To upper + def to_upper(self, gram): + """ + To upper + :param gram: + :return: + """ + if not self.uppercase: + return gram.lower() + # end if + return gram + # end to_upper + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Add to voc + for i in range(len(text)): + gram = self.to_upper(text[i]) + if gram not in self.gram_to_ix.keys(): + self.gram_to_ix[gram] = self.gram_count + self.ix_to_gram[self.gram_count] = gram + self.gram_count += 1 + # end if + # end for + + # List of character to 2grams + text_idxs = [self.gram_to_ix[self.to_upper(text[i])] for i in range(len(text))] + + # To long tensor + text_idxs = torch.LongTensor(text_idxs) + + # Check length + if self.fixed_length != -1: + if text_idxs.size(0) > self.fixed_length: + text_idxs = text_idxs[:self.fixed_length] + elif text_idxs.size(0) < self.fixed_length: + zero_idxs = torch.LongTensor(self.fixed_length).fill_(0) + zero_idxs[:text_idxs.size(0)] = text_idxs + text_idxs = zero_idxs + # end if + # end if + + return text_idxs, text_idxs.size(0) + # end convert + +# end FunctionWord diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Character2Gram.py b/ESN/EchoTorch-master/echotorch/transforms/text/Character2Gram.py new file mode 100644 index 0000000..350d13b --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Character2Gram.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from .Transformer import Transformer +import numpy as np + + +# Transform text to character 2-gram +class Character2Gram(Transformer): + """ + Transform text to character 2-grams + """ + + # Constructor + def __init__(self, uppercase=False, gram_to_ix=None, start_ix=0, fixed_length=-1, overlapse=True): + """ + Constructor + """ + # Gram to ix + if gram_to_ix is not None: + self.gram_count = len(gram_to_ix.keys()) + self.gram_to_ix = gram_to_ix + else: + self.gram_count = start_ix + self.gram_to_ix = dict() + # end if + + # Ix to gram + self.ix_to_gram = dict() + if gram_to_ix is not None: + for gram in gram_to_ix.keys(): + self.ix_to_gram[gram_to_ix[gram]] = gram + # end for + # end if + + # Properties + self.uppercase = uppercase + self.fixed_length = fixed_length + self.overlapse = overlapse + + # Super constructor + super(Character2Gram, self).__init__() + # end __init__ + + ############################################## + # Public + ############################################## + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 1 + # end input_dim + + # Vocabulary size + @property + def voc_size(self): + """ + Vocabulary size + :return: + """ + return self.gram_count + # end voc_size + + ############################################## + # Private + ############################################## + + # To upper + def to_upper(self, gram): + """ + To upper + :param gram: + :return: + """ + if not self.uppercase: + return gram.lower() + # end if + return gram + # end to_upper + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Step + if self.overlapse: + step = 1 + else: + step = 2 + # end if + + # Add to voc + for i in np.arange(0, len(text) - 1, step): + gram = self.to_upper(text[i] + text[i+1]) + if gram not in self.gram_to_ix.keys(): + self.gram_to_ix[gram] = self.gram_count + self.ix_to_gram[self.gram_count] = gram + self.gram_count += 1 + # end if + # end for + + # List of character to 2grams + text_idxs = [self.gram_to_ix[self.to_upper(text[i] + text[i+1])] for i in range(len(text)-1)] + + # To long tensor + text_idxs = torch.LongTensor(text_idxs) + + # Check length + if self.fixed_length != -1: + if text_idxs.size(0) > self.fixed_length: + text_idxs = text_idxs[:self.fixed_length] + elif text_idxs.size(0) < self.fixed_length: + zero_idxs = torch.LongTensor(self.fixed_length).fill_(0) + zero_idxs[:text_idxs.size(0)] = text_idxs + text_idxs = zero_idxs + # end if + # end if + + return text_idxs, text_idxs.size(0) + # end convert + +# end Character2Gram diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Character3Gram.py b/ESN/EchoTorch-master/echotorch/transforms/text/Character3Gram.py new file mode 100644 index 0000000..b78e66e --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Character3Gram.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +from .Transformer import Transformer +import numpy as np + + +# Transform text to character 3-gram +class Character3Gram(Transformer): + """ + Transform text to character 3-grams + """ + + # Constructor + def __init__(self, uppercase=False, gram_to_ix=None, start_ix=0, fixed_length=-1, overlapse=True): + """ + Constructor + """ + # Gram to ix + if gram_to_ix is not None: + self.gram_count = len(gram_to_ix.keys()) + self.gram_to_ix = gram_to_ix + else: + self.gram_count = start_ix + self.gram_to_ix = dict() + # end if + + # Ix to gram + self.ix_to_gram = dict() + if gram_to_ix is not None: + for gram in gram_to_ix.keys(): + self.ix_to_gram[gram_to_ix[gram]] = gram + # end for + # end if + + # Properties + self.uppercase = uppercase + self.fixed_length = fixed_length + self.overlapse = overlapse + + # Super constructor + super(Character3Gram, self).__init__() + # end __init__ + + ############################################## + # Public + ############################################## + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 1 + # end input_dim + + # Vocabulary size + @property + def voc_size(self): + """ + Vocabulary size + :return: + """ + return self.gram_count + # end voc_size + + ############################################## + # Private + ############################################## + + # To upper + def to_upper(self, gram): + """ + To upper + :param gram: + :return: + """ + if not self.uppercase: + return gram.lower() + # end if + return gram + # end to_upper + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Step + if self.overlapse: + step = 1 + else: + step = 3 + # end if + + # Add to voc + for i in np.arange(0, len(text) - 2, step): + gram = self.to_upper(text[i] + text[i+1] + text[i+2]) + if gram not in self.gram_to_ix.keys(): + self.gram_to_ix[gram] = self.gram_count + self.ix_to_gram[self.gram_count] = gram + self.gram_count += 1 + # end if + # end for + + # List of character to 3 grams + text_idxs = [self.gram_to_ix[self.to_upper(text[i] + text[i+1] + text[i+2])] for i in range(len(text)-2)] + + # To long tensor + text_idxs = torch.LongTensor(text_idxs) + + # Check length + if self.fixed_length != -1: + if text_idxs.size(0) > self.fixed_length: + text_idxs = text_idxs[:self.fixed_length] + elif text_idxs.size(0) < self.fixed_length: + zero_idxs = torch.LongTensor(self.fixed_length).fill_(0) + zero_idxs[:text_idxs.size(0)] = text_idxs + text_idxs = zero_idxs + # end if + # end if + + return text_idxs, text_idxs.size(0) + # end convert + +# end Character3Gram diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Compose.py b/ESN/EchoTorch-master/echotorch/transforms/text/Compose.py new file mode 100644 index 0000000..ab2e5e2 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Compose.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from .Transformer import Transformer + + +# Compose multiple transformations +class Compose(Transformer): + """ + Compose multiple transformations + """ + + # Constructor + def __init__(self, transforms): + """ + Constructor + """ + # Properties + self.transforms = transforms + + # Super constructor + super(Compose, self).__init__() + # end __init__ + + ############################################## + # Public + ############################################## + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return self.transforms[-1].input_dim + # end input_dim + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # For each transform + for index, transform in enumerate(self.transforms): + # Transform + if index == 0: + outputs, size = transform(text) + else: + outputs, size = transform(outputs) + # end if + # end for + + return outputs, size + # end convert + +# end Compose diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Embedding.py b/ESN/EchoTorch-master/echotorch/transforms/text/Embedding.py new file mode 100644 index 0000000..9b03152 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Embedding.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import gensim +from gensim.utils import tokenize +import torch +import numpy as np + + +# Transform text to vectors with embedding +class Embedding(object): + """ + Transform text to vectors with embedding + """ + + # Constructor + def __init__(self, weights): + """ + Constructor + :param weights: Embedding weight matrix + """ + # Properties + self.weights = weights + self.voc_size = weights.size(0) + self.embedding_dim = weights.size(1) + # end __init__ + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs + :return: + """ + return self.embedding_dim + # end input_dim + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, idxs): + """ + Convert a strng + :param text: + :return: + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.embedding_dim) + + # Start + start = True + count = 0.0 + + # OOV + zero = 0.0 + self.oov = 0.0 + + # For each inputs + for i in range(idxs.size(0)): + # Get token ix + ix = idxs[i] + + # Get vector + if ix < self.voc_size: + embedding_vector = self.weights[ix] + else: + embedding_vector = torch.zeros(self.embedding_dim) + # end if + + # Test zero + if torch.sum(embedding_vector) == 0.0: + zero += 1.0 + embedding_vector = np.zeros(self.input_dim) + # end if + + # Start/continue + if not start: + inputs = torch.cat((inputs, torch.FloatTensor(embedding_vector).unsqueeze_(0)), dim=0) + else: + inputs = torch.FloatTensor(embedding_vector).unsqueeze_(0) + start = False + # end if + count += 1 + # end for + + # OOV + self.oov = zero / count * 100.0 + + return inputs, inputs.size()[0] + # end convert + + ############################################## + # Static + ############################################## + + +# end Embedding diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/FunctionWord.py b/ESN/EchoTorch-master/echotorch/transforms/text/FunctionWord.py new file mode 100644 index 0000000..73383da --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/FunctionWord.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +import spacy +from .Transformer import Transformer + + +# Transform text to a function word vectors +class FunctionWord(Transformer): + """ + Transform text to character vectors + """ + + # Constructor + def __init__(self, model="en_core_web_lg"): + """ + Constructor + :param model: Spacy's model to load. + """ + # Super constructor + super(FunctionWord, self).__init__() + + # Properties + self.model = model + self.nlp = spacy.load(model) + # end __init__ + + ############################################## + # Public + ############################################## + + # Get tags + def get_tags(self): + """ + Get tags. + :return: A tag list. + """ + return [u"a", u"about", u"above", u"after", u"after", u"again", u"against", u"ago", u"ahead", + u"all", + u"almost", u"along", u"already", u"also", u"although", u"always", u"am", u"among", u"an", + u"and", u"any", u"are", u"aren't", u"around", u"as", u"at", u"away", u"backward", + u"backwards", u"be", u"because", u"before", u"behind", u"below", u"beneath", u"beside", + u"between", u"both", u"but", u"by", u"can", u"cannot", u"can't", u"cause", u"'cos", + u"could", + u"couldn't", u"'d", u"despite", u"did", u"didn't", u"do", u"does", u"doesn't", u"don't", + u"down", u"during", u"each", u"either", u"even", u"ever", u"every", u"except", u"for", + u"forward", u"from", u"had", u"hadn't", u"has", u"hasn't", u"have", u"haven't", u"he", + u"her", u"here", u"hers", u"herself", u"him", u"himself", u"his", u"how", u"however", + u"I", + u"if", u"in", u"inside", u"inspite", u"instead", u"into", u"is", u"isn't", u"it", u"its", + u"itself", u"just", u"'ll", u"least", u"less", u"like", u"'m", u"many", u"may", + u"mayn't", + u"me", u"might", u"mightn't", u"mine", u"more", u"most", u"much", u"must", u"mustn't", + u"my", u"myself", u"near", u"need", u"needn't", u"needs", u"neither", u"never", u"no", + u"none", u"nor", u"not", u"now", u"of", u"off", u"often", u"on", u"once", u"only", + u"onto", + u"or", u"ought", u"oughtn't", u"our", u"ours", u"ourselves", u"out", u"outside", u"over", + u"past", u"perhaps", u"quite", u"'re", u"rather", u"'s", u"seldom", u"several", u"shall", + u"shan't", u"she", u"should", u"shouldn't", u"since", u"so", u"some", u"sometimes", + u"soon", + u"than", u"that", u"the", u"their", u"theirs", u"them", u"themselves", u"then", u"there", + u"therefore", u"these", u"they", u"this", u"those", u"though", u"through", u"thus", + u"till", + u"to", u"together", u"too", u"towards", u"under", u"unless", u"until", u"up", u"upon", + u"us", u"used", u"usedn't", u"usen't", u"usually", u"'ve", u"very", u"was", u"wasn't", + u"we", u"well", u"were", u"weren't", u"what", u"when", u"where", u"whether", u"which", + u"while", u"who", u"whom", u"whose", u"why", u"will", u"with", u"without", u"won't", + u"would", u"wouldn't", u"yet", u"you", u"your", u"yours", u"yourself", u"yourselves", u"X"] + # end get_tags + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.input_dim) + + # Null symbol + null_symbol = torch.zeros(1, self.input_dim) + null_symbol[0, -1] = 1.0 + + # Start + start = True + + # For each tokens + for token in self.nlp(text): + # Replace if not function word + if token.text not in self.symbols: + token_fw = u"X" + else: + token_fw = token.text + # end if + + # Get tag + fw = self.tag_to_symbol(token_fw) + + # Add + if not start: + inputs = torch.cat((inputs, fw), dim=0) + else: + inputs = fw + start = False + # end if + # end for + + return inputs, inputs.size()[0] + # end convert + +# end FunctionWord diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/GensimModel.py b/ESN/EchoTorch-master/echotorch/transforms/text/GensimModel.py new file mode 100644 index 0000000..d2f48ab --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/GensimModel.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import gensim +from gensim.utils import tokenize +import torch +import numpy as np + + +# Transform text to vectors with a Gensim model +class GensimModel(object): + """ + Transform text to vectors with a Gensim model + """ + + # Constructor + def __init__(self, model_path): + """ + Constructor + :param model_path: Model's path. + """ + # Properties + self.model_path = model_path + + # Format + binary = False if model_path[-4:] == ".vec" else True + + # Load + self.model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=binary, unicode_errors='ignore') + + # OOV + self.oov = 0.0 + # end __init__ + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 300 + # end input_dim + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.input_dim) + + # Start + start = True + count = 0.0 + + # OOV + zero = 0.0 + self.oov = 0.0 + + # For each tokens + for token in tokenize(text): + found = False + # Try normal + try: + word_vector = self.model[token] + found = True + except KeyError: + pass + # end try + + # Try lower + if not found: + try: + word_vector = self.model[token.lower()] + except KeyError: + zero += 1.0 + word_vector = np.zeros(self.input_dim) + # end try + # end if + + # Start/continue + if not start: + inputs = torch.cat((inputs, torch.FloatTensor(word_vector).unsqueeze_(0)), dim=0) + else: + inputs = torch.FloatTensor(word_vector).unsqueeze_(0) + start = False + # end if + count += 1 + # end for + + # OOV + self.oov = zero / count * 100.0 + + return inputs, inputs.size()[0] + # end convert + + ############################################## + # Static + ######################################### \ No newline at end of file diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/GloveVector.py b/ESN/EchoTorch-master/echotorch/transforms/text/GloveVector.py new file mode 100644 index 0000000..c94b6fd --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/GloveVector.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +import spacy +import numpy as np +from datetime import datetime + + +# Transform text to word vectors +class GloveVector(object): + """ + Transform text to word vectors + """ + + # Constructor + def __init__(self, model="en_vectors_web_lg"): + """ + Constructor + :param model: Spacy's model to load. + """ + # Properties + self.model = model + self.nlp = spacy.load(model) + self.oov = 0.0 + # end __init__ + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 300 + # end input_dim + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.input_dim) + + # Start + start = True + count = 0.0 + + # Zero count + zero = 0.0 + self.oov = 0.0 + + # For each tokens + for token in self.nlp(text): + if np.sum(token.vector) == 0: + zero += 1.0 + # end if + if not start: + inputs = torch.cat((inputs, torch.FloatTensor(token.vector).unsqueeze_(0)), dim=0) + else: + inputs = torch.FloatTensor(token.vector).unsqueeze_(0) + start = False + # end if + count += 1.0 + # end for + + # OOV + self.oov = zero / count * 100.0 + + return inputs, inputs.size()[0] + # end convert + + ############################################## + # Static + ############################################## + +# end GloveVector diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/PartOfSpeech.py b/ESN/EchoTorch-master/echotorch/transforms/text/PartOfSpeech.py new file mode 100644 index 0000000..35109c6 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/PartOfSpeech.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +import spacy +from .Transformer import Transformer + + +# Transform text to part-of-speech vectors +class PartOfSpeech(Transformer): + """ + Transform text to part-of-speech vectors + """ + + # Constructor + def __init__(self, model="en_core_web_lg"): + """ + Constructor + :param model: Spacy's model to load. + """ + # Super constructor + super(PartOfSpeech, self).__init__() + + # Properties + self.model = model + self.nlp = spacy.load(model) + # end __init__ + + ############################################## + # Public + ############################################## + + # Get tags + def get_tags(self): + """ + Get tags. + :return: A list of tags. + """ + return [u"ADJ", u"ADP", u"ADV", u"CCONJ", u"DET", u"INTJ", u"NOUN", u"NUM", u"PART", u"PRON", u"PROPN", + u"PUNCT", u"SYM", u"VERB", u"SPACE", u"X"] + # end get_tags + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.input_dim) + + # Start + start = True + + # For each tokens + for token in self.nlp(text): + pos = self.tag_to_symbol(token.pos_) + + if not start: + inputs = torch.cat((inputs, pos), dim=0) + else: + inputs = pos + start = False + # end if + # end for + + return inputs, inputs.size()[0] + # end convert + +# end PartOfSpeech diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Tag.py b/ESN/EchoTorch-master/echotorch/transforms/text/Tag.py new file mode 100644 index 0000000..86bb13c --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Tag.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +import spacy +from .Transformer import Transformer + + +# Transform text to tag vectors +class Tag(Transformer): + """ + Transform text to tag vectors + """ + + # Constructor + def __init__(self, model="en_core_web_lg"): + """ + Constructor + :param model: Spacy's model to load. + """ + # Super constructor + super(Tag, self).__init__() + + # Properties + self.model = model + self.nlp = spacy.load(model) + # end __init__ + + ############################################## + # Public + ############################################## + + # Get tags + def get_tags(self): + """ + Get all tags. + :return: A list of tags. + """ + return [u"''", u",", u":", u".", u"``", u"-LRB-", u"-RRB-", u"AFX", u"CC", u"CD", u"DT", u"EX", u"FW", + u"IN", u"JJ", u"JJR", u"JJS", u"LS", u"MD", u"NN", u"NNS", u"NNP", u"NNPS", u"PDT", u"POS", u"PRP", + u"PRP$", u"RB", u"RBR", u"RBS", u"RP", u"SYM", u"TO", u"UH", u"VB", u"VBZ", u"VBP", u"VBD", u"VBN", + u"VBG", u"WDT", u"WP", u"WP$", u"WRB", u"X"] + # end get_tags + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as tensor + inputs = torch.FloatTensor(1, self.input_dim) + + # Null symbol + null_symbol = torch.zeros(1, self.input_dim) + null_symbol[0, -1] = 1.0 + + # Start + start = True + + # For each tokens + for token in self.nlp(text): + # Replace if not function word + if token.tag_ not in self.symbols: + token_tag = u"X" + else: + token_tag = token.tag_ + # end if + + # Get tag + tag = self.tag_to_symbol(token_tag) + + # Add + if not start: + inputs = torch.cat((inputs, tag), dim=0) + else: + inputs = tag + start = False + # end if + # end for + + return inputs, inputs.size()[0] + # end convert + +# end FunctionWord diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Token.py b/ESN/EchoTorch-master/echotorch/transforms/text/Token.py new file mode 100644 index 0000000..9c2314e --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Token.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import spacy + + +# Transform text to a list of tokens +class Token(object): + """ + Transform text to a list of tokens + """ + + # Constructor + def __init__(self, model="en_core_web_lg"): + """ + Constructor + :param model: Spacy's model to load. + """ + # Properties + self.model = model + self.nlp = spacy.load(model) + # end __init__ + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return 1 + # end input_dim + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, text): + """ + Convert a string to a ESN input + :param text: Text to convert + :return: Tensor of word vectors + """ + # Inputs as a list + tokens = list() + + # For each tokens + for token in self.nlp(text): + tokens.append(unicode(token.text)) + # end for + + return tokens, len(tokens) + # end convert + + ############################################## + # Private + ############################################## + + # Get inputs size + def _get_inputs_size(self): + """ + Get inputs size. + :return: + """ + return 1 + # end if + + ############################################## + # Static + ############################################## + +# end Token diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/Transformer.py b/ESN/EchoTorch-master/echotorch/transforms/text/Transformer.py new file mode 100644 index 0000000..1e167b0 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/Transformer.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch + + +# Base class for text transformers +class Transformer(object): + """ + Base class for text transformers + """ + + # Constructor + def __init__(self): + """ + Constructor + """ + # Properties + self.symbols = self.generate_symbols() + # end __init__ + + ############################################## + # Properties + ############################################## + + # Get the number of inputs + @property + def input_dim(self): + """ + Get the number of inputs. + :return: The input size. + """ + return len(self.get_tags()) + # end input_dim + + ############################################## + # Public + ############################################## + + # Get tags + def get_tags(self): + """ + Get tags. + :return: A list of tags. + """ + return [] + # end get_tags + + # Get symbol from tag + def tag_to_symbol(self, tag): + """ + Get symbol from tag. + :param tag: Tag. + :return: The corresponding symbols. + """ + if tag in self.symbols.keys(): + return self.symbols[tag] + return None + # end word_to_symbol + + # Generate symbols + def generate_symbols(self): + """ + Generate word symbols. + :return: Dictionary of tag to symbols. + """ + result = dict() + for index, p in enumerate(self.get_tags()): + result[p] = torch.zeros(1, self.input_dim) + result[p][0, index] = 1.0 + # end for + return result + # end generate_symbols + + ############################################## + # Override + ############################################## + + # Convert a string + def __call__(self, tokens): + """ + Convert a string to a ESN input + :param tokens: Text to convert + :return: A list of symbols + """ + pass + # end convert + + ############################################## + # Static + ############################################## + +# end TextTransformer diff --git a/ESN/EchoTorch-master/echotorch/transforms/text/__init__.py b/ESN/EchoTorch-master/echotorch/transforms/text/__init__.py new file mode 100644 index 0000000..08fed1c --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/transforms/text/__init__.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from .Character import Character +from .Character2Gram import Character2Gram +from .Character3Gram import Character3Gram +from .Compose import Compose +from .Embedding import Embedding +from .FunctionWord import FunctionWord +from .GensimModel import GensimModel +from .GloveVector import GloveVector +from .PartOfSpeech import PartOfSpeech +from .Tag import Tag +from .Token import Token +from .Transformer import Transformer + +__all__ = [ + 'Character', 'Character2Gram', 'Character3Gram', 'Compose', 'Embedding', 'FunctionWord', 'GensimModel', 'Transformer', 'GloveVector', + 'PartOfSpeech', 'Tag', 'Token' +] diff --git a/ESN/EchoTorch-master/echotorch/utils/__init__.py b/ESN/EchoTorch-master/echotorch/utils/__init__.py new file mode 100644 index 0000000..1699038 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/utils/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# + +# Imports +from .error_measures import nrmse, nmse, rmse, mse, perplexity, cumperplexity +from .utility_functions import spectral_radius, deep_spectral_radius, normalize, average_prob, max_average_through_time + +__all__ = [ + 'nrmse', 'nmse', 'rmse', 'mse', 'perplexity', 'cumperplexity', 'spectral_radius', 'deep_spectral_radius', + 'normalize', 'average_prob', 'max_average_through_time' +] diff --git a/ESN/EchoTorch-master/echotorch/utils/error_measures.py b/ESN/EchoTorch-master/echotorch/utils/error_measures.py new file mode 100644 index 0000000..d129dca --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/utils/error_measures.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch +import math +from decimal import Decimal +import numpy as np + + +# Normalized root-mean-square error +def nrmse(outputs, targets): + """ + Normalized root-mean square error + :param outputs: Module's outputs + :param targets: Target signal to be learned + :return: Normalized root-mean square deviation + """ + # Flatten tensors + outputs = outputs.view(outputs.nelement()) + targets = targets.view(targets.nelement()) + + # Check dim + if outputs.size() != targets.size(): + raise ValueError(u"Ouputs and targets tensors don have the same number of elements") + # end if + + # Normalization with N-1 + var = torch.std(targets) ** 2 + + # Error + error = (targets - outputs) ** 2 + + # Return + return float(math.sqrt(torch.mean(error) / var)) +# end nrmse + + +# Root-mean square error +def rmse(outputs, targets): + """ + Root-mean square error + :param outputs: Module's outputs + :param targets: Target signal to be learned + :return: Root-mean square deviation + """ + # Flatten tensors + outputs = outputs.view(outputs.nelement()) + targets = targets.view(targets.nelement()) + + # Check dim + if outputs.size() != targets.size(): + raise ValueError(u"Ouputs and targets tensors don have the same number of elements") + # end if + + # Error + error = (targets - outputs) ** 2 + + # Return + return float(math.sqrt(torch.mean(error))) +# end rmsd + + +# Mean square error +def mse(outputs, targets): + """ + Mean square error + :param outputs: Module's outputs + :param targets: Target signal to be learned + :return: Mean square deviation + """ + # Flatten tensors + outputs = outputs.view(outputs.nelement()) + targets = targets.view(targets.nelement()) + + # Check dim + if outputs.size() != targets.size(): + raise ValueError(u"Ouputs and targets tensors don have the same number of elements") + # end if + + # Error + error = (targets - outputs) ** 2 + + # Return + return float(torch.mean(error)) +# end mse + + +# Normalized mean square error +def nmse(outputs, targets): + """ + Normalized mean square error + :param outputs: Module's output + :param targets: Target signal to be learned + :return: Normalized mean square deviation + """ + # Flatten tensors + outputs = outputs.view(outputs.nelement()) + targets = targets.view(targets.nelement()) + + # Check dim + if outputs.size() != targets.size(): + raise ValueError(u"Ouputs and targets tensors don have the same number of elements") + # end if + + # Normalization with N-1 + var = torch.std(targets) ** 2 + + # Error + error = (targets - outputs) ** 2 + + # Return + return float(torch.mean(error) / var) +# end nmse + + +# Perplexity +def perplexity(output_probs, targets, log=False): + """ + Perplexity + :param output_probs: Output probabilities for each word/tokens (length x n_tokens) + :param targets: Real word index + :return: Perplexity + """ + pp = Decimal(1.0) + e_vec = torch.FloatTensor(output_probs.size(0), output_probs.size(1)).fill_(np.e) + if log: + set_p = 1.0 / torch.gather(torch.pow(e_vec, exponent=output_probs.data.cpu()), 1, + targets.data.cpu().unsqueeze(1)) + else: + set_p = 1.0 / torch.gather(output_probs.data.cpu(), 1, targets.data.cpu().unsqueeze(1)) + # end if + for j in range(set_p.size(0)): + pp *= Decimal(set_p[j][0]) + # end for + return pp +# end perplexity + + +# Cumulative perplexity +def cumperplexity(output_probs, targets, log=False): + """ + Cumulative perplexity + :param output_probs: + :param targets: + :param log: + :return: + """ + # Get prob of test events + set_p = torch.gather(output_probs, 1, targets.unsqueeze(1)) + + # Make sure it's log + if not log: + set_p = torch.log(set_p) + # end if + + # Log2 + set_log = set_p / np.log(2) + + # sum log + sum_log = torch.sum(set_log) + + # Return + return sum_log +# end cumperplexity diff --git a/ESN/EchoTorch-master/echotorch/utils/utility_functions.py b/ESN/EchoTorch-master/echotorch/utils/utility_functions.py new file mode 100644 index 0000000..6d0bb79 --- /dev/null +++ b/ESN/EchoTorch-master/echotorch/utils/utility_functions.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import torch + + +# Compute spectral radius of a square 2-D tensor +def spectral_radius(m): + """ + Compute spectral radius of a square 2-D tensor + :param m: squared 2D tensor + :return: + """ + return torch.max(torch.abs(torch.eig(m)[0])) +# end spectral_radius + + +# Compute spectral radius of a square 2-D tensor for stacked-ESN +def deep_spectral_radius(m, leaky_rate): + """ + Compute spectral radius of a square 2-D tensor for stacked-ESN + :param m: squared 2D tensor + :param leaky_rate: Layer's leaky rate + :return: + """ + return spectral_radius((1.0 - leaky_rate) * torch.eye(m.size(0), m.size(0)) + leaky_rate * m) +# end spectral_radius + + +# Normalize a tensor on a single dimension +def normalize(tensor, dim=1): + """ + Normalize a tensor on a single dimension + :param t: + :return: + """ + pass +# end normalize + + +# Average probabilties through time +def average_prob(tensor, dim=0): + """ + Average probabilities through time + :param tensor: + :param dim: + :return: + """ + return torch.mean(tensor, dim=dim) +# end average_prob + + +# Max average through time +def max_average_through_time(tensor, dim=0): + """ + Max average through time + :param tensor: + :param dim: Time dimension + :return: + """ + average = torch.mean(tensor, dim=dim) + return torch.max(average, dim=dim)[1] +# end max_average_through_time diff --git a/ESN/EchoTorch-master/examples/MNIST/convert_images.py b/ESN/EchoTorch-master/examples/MNIST/convert_images.py new file mode 100644 index 0000000..2eeca66 --- /dev/null +++ b/ESN/EchoTorch-master/examples/MNIST/convert_images.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# +# File : examples/MNIST/convert_images.py +# Description : Convert images to time series. +# Date : 6th of April, 2017 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + +""" +Created on 6 April 2017 +@author: Nils Schaetti +""" + +import sys +import os +sys.path.insert(0, os.path.abspath('./../..')) +import echotorch + + +if __name__ == "__main__": + + converter = echotorch.dataset.ImageConverter() + +# end if diff --git a/ESN/EchoTorch-master/examples/datasets/logistic_map.py b/ESN/EchoTorch-master/examples/datasets/logistic_map.py new file mode 100644 index 0000000..b31818a --- /dev/null +++ b/ESN/EchoTorch-master/examples/datasets/logistic_map.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import echotorch.datasets +from torch.utils.data.dataloader import DataLoader + + +# Logistoc map dataset +log_map = echotorch.datasets.LogisticMapDataset(10000, 10) + +# Dataset +log_map_dataset = DataLoader(log_map, batch_size=10, shuffle=True) + +# For each sample +for data in log_map_dataset: + print(data[0]) +# end for diff --git a/ESN/EchoTorch-master/examples/generation/narma10_esn_feedbacks.py b/ESN/EchoTorch-master/examples/generation/narma10_esn_feedbacks.py new file mode 100644 index 0000000..9fca247 --- /dev/null +++ b/ESN/EchoTorch-master/examples/generation/narma10_esn_feedbacks.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from echotorch.datasets.NARMADataset import NARMADataset +import echotorch.nn as etnn +import echotorch.utils +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp + +# Dataset params +train_sample_length = 5000 +test_sample_length = 1000 +n_train_samples = 1 +n_test_samples = 1 +batch_size = 1 +spectral_radius = 0.9 +leaky_rate = 1.0 +input_dim = 1 +n_hidden = 100 + +# Use CUDA? +use_cuda = False +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# NARMA30 dataset +narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) +narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) + +# Data loader +trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN cell +esn = etnn.ESN( + input_dim=input_dim, + hidden_dim=n_hidden, + output_dim=1, + spectral_radius=spectral_radius, + learning_algo='inv', + # leaky_rate=leaky_rate, + feedbacks=True +) +if use_cuda: + esn.cuda() +# end if + +# For each batch +for data in trainloader: + # Inputs and outputs + inputs, targets = data + + # To variable + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + + # Accumulate xTx and xTy + esn(inputs, targets) +# end for + +# Finalize training +esn.finalize() + +# Test MSE +dataiter = iter(testloader) +test_u, test_y = dataiter.next() +test_u, test_y = Variable(test_u), Variable(test_y) +gen_u = Variable(torch.zeros(batch_size, test_sample_length, input_dim)) +if use_cuda: test_u, test_y, gen_u = test_u.cuda(), test_y.cuda(), gen_u.cuda() +y_predicted = esn(test_u) +print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))) +print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))) +print(u"") + +y_generated = esn(gen_u) +print(y_generated) diff --git a/ESN/EchoTorch-master/examples/memory/memtest.py b/ESN/EchoTorch-master/examples/memory/memtest.py new file mode 100644 index 0000000..33adaee --- /dev/null +++ b/ESN/EchoTorch-master/examples/memory/memtest.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# +# File : examples/SwitchAttractor/switch_attractor_esn +# Description : Attractor switching task with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from EchoTorch.datasets.MemTestDataset import MemTestDataset +import EchoTorch.nn as etnn +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import matplotlib.pyplot as plt + +# Dataset params +sample_length = 20 +n_samples = 2 +batch_size = 5 + +# MemTest dataset +memtest_dataset = MemTestDataset(sample_length, n_samples, seed=1) + +# Data loader +dataloader = DataLoader(memtest_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN properties +input_dim = 1 +n_hidden = 20 + +# ESN cell +esn = etnn.ESNCell(input_dim, n_hidden) + +# Linear layer +linear = nn.Linear(n_hidden, 1) + +# Objective function +criterion = nn.MSELoss() + +# Learning rate +learning_rate = 0.0001 + +# Number of iterations +n_iterations = 10 + +for data in dataloader: + # For each sample + for i_sample in range(data[0].size()[0]): + # Inputs and outputs + inputs, outputs = data[0][i_sample], data[1][i_sample] + inputs, outputs = Variable(inputs), Variable(outputs) + + # Show the graph + plt.plot(inputs.data.numpy(), c='b') + plt.plot(outputs.data[:, 9].numpy(), c='r') + plt.show() + # end for +# end for \ No newline at end of file diff --git a/ESN/EchoTorch-master/examples/models/NilsNet_example.py b/ESN/EchoTorch-master/examples/models/NilsNet_example.py new file mode 100644 index 0000000..7944647 --- /dev/null +++ b/ESN/EchoTorch-master/examples/models/NilsNet_example.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +import echotorch.utils +from torchvision import datasets, transforms +import matplotlib.pyplot as plt +import numpy as np +import os +from torch.autograd import Variable + + +def imshow(inp, title=None): + """Imshow for Tensor.""" + inp = inp.numpy().transpose((1, 2, 0)) + mean = np.array([0.485, 0.456, 0.406]) + std = np.array([0.229, 0.224, 0.225]) + inp = std * inp + mean + inp = np.clip(inp, 0, 1) + plt.imshow(inp) + if title is not None: + plt.title(title) + plt.show() +# end imshow + +# Data augmentation and normalization for training +# Just normalization for validation +data_transforms = { + 'train': transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'val': transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), +} + +data_dir = 'hymenoptera_data' +image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), + data_transforms[x]) + for x in ['train', 'val']} +dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, + shuffle=True, num_workers=4) + for x in ['train', 'val']} +dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} +class_names = image_datasets['train'].classes + +# Create a NilsNet +nilsnet = echotorch.models.NilsNet(reservoir_dim=1000, sfa_dim=100, ica_dim=100) + +# Get a batch of training data +inputs, classes = next(iter(dataloaders['train'])) +print(inputs.size()) +print(classes.size()) + +inputs = Variable(inputs) +classes = Variable(classes) + +# Make a grid from batch +# out = torchvision.utils.make_grid(inputs) + +# imshow(out, title=[class_names[x] for x in classes]) + +outputs = nilsnet(inputs) + +print(outputs) +print(outputs.size()) \ No newline at end of file diff --git a/ESN/EchoTorch-master/examples/nodes/pca_tests.py b/ESN/EchoTorch-master/examples/nodes/pca_tests.py new file mode 100644 index 0000000..1fd4ed6 --- /dev/null +++ b/ESN/EchoTorch-master/examples/nodes/pca_tests.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +import echotorch.nn as etnn +from torch.autograd import Variable +import mdp + + +# Settings +input_dim = 10 +output_dim = 3 +tlen = 500 + +# Generate +training_samples = torch.randn(1, tlen, input_dim) +test_samples = torch.randn(1, tlen, input_dim) + +# Generate +training_samples_np = training_samples[0].numpy() +test_samples_np = test_samples[0].numpy() + +# Show +print(u"Training samples : {}".format(training_samples_np)) +print(u"Test samples : {}".format(test_samples_np)) + +# PCA node +mdp_pca_node = mdp.Flow([mdp.nodes.PCANode(input_dim=input_dim, output_dim=output_dim)]) +mdp_pca_node.train(training_samples_np) +pca_reduced = mdp_pca_node(test_samples_np) + +# Show +print(u"PCA reduced : {}".format(pca_reduced)) + +# EchoTorch PCA node +et_pca_node = etnn.PCACell(input_dim=input_dim, output_dim=output_dim) +et_pca_node(Variable(training_samples)) +et_pca_node.finalize() +et_reduced = et_pca_node(Variable(test_samples)) + +# Show +print(u"Reduced with EchoTorch/PCA :") +print(et_reduced) diff --git a/ESN/EchoTorch-master/examples/switch_attractor/switch_attractor_esn.py b/ESN/EchoTorch-master/examples/switch_attractor/switch_attractor_esn.py new file mode 100644 index 0000000..6127954 --- /dev/null +++ b/ESN/EchoTorch-master/examples/switch_attractor/switch_attractor_esn.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from echotorch.datasets.SwitchAttractorDataset import SwitchAttractorDataset +import echotorch.nn as etnn +import echotorch.utils +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp +import matplotlib.pyplot as plt + +# Dataset params +train_sample_length = 1000 +test_sample_length = 1000 +n_train_samples = 40 +n_test_samples = 10 +batch_size = 1 +spectral_radius = 0.9 +leaky_rate = 1.0 +input_dim = 1 +n_hidden = 100 + +# Use CUDA? +use_cuda = False +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# Switch attractor dataset +switch_train_dataset = SwitchAttractorDataset(train_sample_length, n_train_samples, seed=1) +switch_test_dataset = SwitchAttractorDataset(test_sample_length, n_test_samples, seed=10) + +# Data loader +trainloader = DataLoader(switch_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(switch_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN cell +esn = etnn.LiESN(input_dim=input_dim, hidden_dim=n_hidden, output_dim=1, spectral_radius=spectral_radius, + learning_algo='inv', leaky_rate=leaky_rate, feedbacks=True) +if use_cuda: + esn.cuda() +# end if + +# For each batch +for data in trainloader: + # Inputs and outputs + inputs, targets = data + + # To variable + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + # plt.plot(targets.data[0].numpy(), c='b') + # plt.plot(y_predicted.data[0, :, 0].numpy(), c='r') + # plt.show() + # Accumulate xTx and xTy + esn(inputs, targets) +# end for + +# Finalize training +esn.finalize() + +# For each batch +for data in testloader: + # Test MSE + test_u, test_y = data + test_u, test_y = Variable(test_u), Variable(test_y) + if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda() + y_predicted = esn(test_u) + plt.ylim(ymax=10) + plt.plot(test_y.data[0].numpy(), c='b') + plt.plot(y_predicted.data[0, :, 0].numpy(), c='r') + plt.show() +# end for diff --git a/ESN/EchoTorch-master/examples/timeserie_prediction/mackey_glass_esn.py b/ESN/EchoTorch-master/examples/timeserie_prediction/mackey_glass_esn.py new file mode 100644 index 0000000..48375fb --- /dev/null +++ b/ESN/EchoTorch-master/examples/timeserie_prediction/mackey_glass_esn.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# +# File : examples/SwitchAttractor/switch_attractor_esn +# Description : Attractor switching task with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from echotorch.datasets.MackeyGlassDataset import MackeyGlassDataset +import echotorch.nn as etnn +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import matplotlib.pyplot as plt + +# Dataset params +sample_length = 1000 +n_samples = 40 +batch_size = 5 + +# Mackey glass dataset +mackey_glass_dataset = MackeyGlassDataset(sample_length, n_samples, tau=30) + +# Data loader +dataloader = DataLoader(mackey_glass_dataset, batch_size=5, shuffle=False, num_workers=2) + +# ESN properties +input_dim = 1 +n_hidden = 20 + +# ESN cell +esn = etnn.ESNCell(input_dim, n_hidden) + +# Linear layer +linear = nn.Linear(n_hidden, 1) + +# Objective function +criterion = nn.MSELoss() + +# Learning rate +learning_rate = 0.0001 + +# Number of iterations +n_iterations = 10 + +# For each iterations +for i_iter in range(n_iterations): + # Iterate through batches + for i_batch, sample_batched in enumerate(dataloader): + # For each sample + for i_sample in range(sample_batched.size()[0]): + # Inputs and outputs + inputs = Variable(sample_batched[i_sample][:-1], requires_grad=False) + outputs = Variable(sample_batched[i_sample][1:], requires_grad=False) + esn_outputs = torch.zeros(sample_length-1, 1) + gradients = torch.zeros(sample_length-1, 1) + + # Init hidden + hidden = esn.init_hidden() + + # Zero grad + esn.zero_grad() + + # Null loss + loss = 0 + + # For each input + for pos in range(sample_length-1): + # Compute next state + next_hidden = esn(inputs[pos], hidden) + + # Linear output + out = linear(next_hidden) + esn_outputs[pos, :] = out.data + + # Add loss + loss += criterion(out, outputs[pos]) + # end for + + # Loss + loss.div_(sample_length-1) + + loss.backward() + + # Update parameters + for p in linear.parameters(): + p.data.add_(-learning_rate, p.grad.data) + # end for + + # Show the graph only for last sample of iteration + #if i_batch == len(dataloader) - 1 and i_sample == len(sample_batched) -1 : + """plt.plot(inputs.data.numpy(), c='b') + plt.plot(outputs.data.numpy(), c='lightblue') + plt.plot(esn_outputs.numpy(), c='r') + plt.show()""" + # end if + # end for + # end for + + # Print + print(u"Iteration {}, loss {}".format(i_iter, loss.data[0])) +# end for diff --git a/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn.py b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn.py new file mode 100644 index 0000000..5ab3c89 --- /dev/null +++ b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from echotorch.datasets.NARMADataset import NARMADataset +import echotorch.nn as etnn +import echotorch.utils +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp + +# Dataset params +train_sample_length = 5000 +test_sample_length = 1000 +n_train_samples = 1 +n_test_samples = 1 +batch_size = 1 +spectral_radius = 0.9 +leaky_rate = 1.0 +input_dim = 1 +n_hidden = 100 + +# Use CUDA? +use_cuda = False +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# NARMA30 dataset +narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) +narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) + +# Data loader +trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN cell +esn = etnn.LiESN(input_dim=input_dim, hidden_dim=n_hidden, output_dim=1, spectral_radius=spectral_radius, learning_algo='inv', leaky_rate=leaky_rate) +if use_cuda: + esn.cuda() +# end if + +# For each batch +for data in trainloader: + # Inputs and outputs + inputs, targets = data + + # To variable + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + + # Accumulate xTx and xTy + esn(inputs, targets) +# end for + +# Finalize training +esn.finalize() + +# Train MSE +dataiter = iter(trainloader) +train_u, train_y = dataiter.next() +train_u, train_y = Variable(train_u), Variable(train_y) +if use_cuda: train_u, train_y = train_u.cuda(), train_y.cuda() +y_predicted = esn(train_u) +print(u"Train MSE: {}".format(echotorch.utils.mse(y_predicted.data, train_y.data))) +print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, train_y.data))) +print(u"") + +# Test MSE +dataiter = iter(testloader) +test_u, test_y = dataiter.next() +test_u, test_y = Variable(test_u), Variable(test_y) +if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda() +y_predicted = esn(test_u) +print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))) +print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))) +print(u"") diff --git a/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn_sgd.py b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn_sgd.py new file mode 100644 index 0000000..93bc453 --- /dev/null +++ b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_esn_sgd.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +import torch.optim as optim +from echotorch.datasets.NARMADataset import NARMADataset +import echotorch.nn as etnn +import echotorch.utils +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp +import matplotlib.pyplot as plt + +# Parameters +spectral_radius = 0.9 +leaky_rate = 1.0 +learning_rate = 0.04 +input_dim = 1 +n_hidden = 100 +n_iterations = 2000 +train_sample_length = 5000 +test_sample_length = 1000 +n_train_samples = 1 +n_test_samples = 1 +batch_size = 1 +momentum = 0.95 +weight_decay = 0 + +# Use CUDA? +use_cuda = True +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# NARMA30 dataset +narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) +narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) + +# Data loader +trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN cell +esn = etnn.ESN(input_dim=input_dim, hidden_dim=n_hidden, output_dim=1, spectral_radius=spectral_radius, learning_algo='grad') +if use_cuda: + esn.cuda() +# end if + +# Objective function +criterion = nn.MSELoss() + +# Stochastic Gradient Descent +optimizer = optim.SGD(esn.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) + +# For each iteration +for epoch in range(n_iterations): + # Iterate over batches + for data in trainloader: + # Inputs and outputs + inputs, targets = data + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + + # Gradients to zero + optimizer.zero_grad() + + # Forward + out = esn(inputs) + loss = criterion(out, targets) + + # Backward pass + loss.backward() + + # Optimize + optimizer.step() + + # Print error measures + print(u"Train MSE: {}".format(float(loss.data))) + print(u"Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data))) + # end for + + # Test reservoir + dataiter = iter(testloader) + test_u, test_y = dataiter.next() + test_u, test_y = Variable(test_u), Variable(test_y) + if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda() + y_predicted = esn(test_u) + + # Print error measures + print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))) + print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))) + print(u"") +# end for diff --git a/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_gated_esn.py b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_gated_esn.py new file mode 100644 index 0000000..343fc91 --- /dev/null +++ b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_gated_esn.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +import torch.optim as optim +from echotorch.datasets.NARMADataset import NARMADataset +import echotorch.nn as etnn +import echotorch.utils +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp +import matplotlib.pyplot as plt + +# Parameters +spectral_radius = 0.9 +leaky_rate = 1.0 +learning_rate = 0.04 +reservoir_dim = 100 +hidden_dim = 20 +input_dim = 1 +n_hidden = 100 +n_iterations = 2000 +train_sample_length = 5000 +test_sample_length = 1000 +n_train_samples = 1 +n_test_samples = 1 +batch_size = 1 +momentum = 0.95 +weight_decay = 0 + +# Use CUDA? +use_cuda = True +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# NARMA30 dataset +narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) +narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) + +# Data loader +trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# Linear output +linear = nn.Linear(in_features=hidden_dim, out_features=1) + +# ESN cell +gated_esn = etnn.GatedESN( + input_dim=input_dim, + reservoir_dim=input_dim, + pca_dim=hidden_dim, + hidden_dim=hidden_dim, + leaky_rate=leaky_rate, + spectral_radius=spectral_radius +) +if use_cuda: + gated_esn.cuda() + linear.cuda() +# end if + +# Objective function +criterion = nn.MSELoss() + +# Stochastic Gradient Descent +optimizer = optim.SGD(gated_esn.parameters(), lr=learning_rate, momentum=momentum) + +# For each iteration +for epoch in range(n_iterations): + # Iterate over batches + for data in trainloader: + # Inputs and outputs + inputs, targets = data + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + print(inputs) + print(targets) + # Gradients to zero + optimizer.zero_grad() + + # Forward + out = linear(gated_esn(inputs)) + print(out) + exit() + loss = criterion(out, targets) + + # Backward pass + loss.backward() + + # Optimize + optimizer.step() + + # Print error measures + print(u"Train MSE: {}".format(float(loss.data))) + print(u"Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data))) + # end for + + # Test reservoir + dataiter = iter(testloader) + test_u, test_y = dataiter.next() + test_u, test_y = Variable(test_u), Variable(test_y) + if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda() + y_predicted = esn(test_u) + + # Print error measures + print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))) + print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))) + print(u"") +# end for diff --git a/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_stacked_esn.py b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_stacked_esn.py new file mode 100644 index 0000000..842c067 --- /dev/null +++ b/ESN/EchoTorch-master/examples/timeserie_prediction/narma10_stacked_esn.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch +from echotorch.datasets.NARMADataset import NARMADataset +import echotorch.nn as etnn +import echotorch.utils +from torch.autograd import Variable +from torch.utils.data.dataloader import DataLoader +import numpy as np +import mdp + +# Dataset params +train_sample_length = 5000 +test_sample_length = 1000 +n_train_samples = 1 +n_test_samples = 1 +batch_size = 1 +spectral_radius = 0.9 +leaky_rates = [1.0, 0.5, 0.1] +input_dim = 1 +n_hidden = [100, 100, 100] + +# Use CUDA? +use_cuda = False +use_cuda = torch.cuda.is_available() if use_cuda else False + +# Manual seed +mdp.numx.random.seed(1) +np.random.seed(2) +torch.manual_seed(1) + +# NARMA30 dataset +narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) +narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) + +# Data loader +trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) +testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) + +# ESN cell +esn = etnn.StackedESN(input_dim=input_dim, hidden_dim=n_hidden, output_dim=1, spectral_radius=spectral_radius, learning_algo='inv', leaky_rate=leaky_rates) + +# For each batch +for data in trainloader: + # Inputs and outputs + inputs, targets = data + + # To variable + inputs, targets = Variable(inputs), Variable(targets) + if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() + + # Accumulate xTx and xTy + hidden_states = esn(inputs, targets) + for i in range(10): + print(hidden_states[0, i]) + # end if +# end for \ No newline at end of file diff --git a/ESN/EchoTorch-master/examples/unsupervised_learning/sfa_logmap.py b/ESN/EchoTorch-master/examples/unsupervised_learning/sfa_logmap.py new file mode 100644 index 0000000..4e8782e --- /dev/null +++ b/ESN/EchoTorch-master/examples/unsupervised_learning/sfa_logmap.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# + +# Imports +import mdp +import numpy as np +import matplotlib.pyplot as plt + +# Init. random +np.random.seed(0) + +# Parameters +n = 10000 +p2 = np.pi * 2 +t = np.linspace(0, 1, n, endpoint=0) +dforce = np.sin(p2*5*t) + np.sin(p2*11*t) + np.sin(p2*13*t) + + +def logistic_map(x, r): + return r*x*(1-x) +# end logistic_map + +# Series +series = np.zeros((n, 1), 'd') +series[0] = 0.6 + +# Create series +for i in range(1, n): + series[i] = logistic_map(series[i-1], 3.6+0.13*dforce[i]) +# end for + +# MDP flow +flow = (mdp.nodes.EtaComputerNode() + + mdp.nodes.TimeFramesNode(10) + + mdp.nodes.PolynomialExpansionNode(3) + + mdp.nodes.SFA2Node(output_dim=1) + + mdp.nodes.EtaComputerNode()) + +# Train +flow.train(series) + +# Slow +slow = flow(series) + +resc_dforce = (dforce - np.mean(dforce, 0)) / np.std(dforce, 0) + +print(u"{}".format(mdp.utils.cov2(resc_dforce[:-9], slow))) +print(u"Eta value (time serie) : {}".format(flow[0].get_eta(t=10000))) +print(u"Eta value (slow feature) : {}".format(flow[-1].get_eta(t=9996))) diff --git a/ESN/EchoTorch-master/examples/validation/validation_10cv.py b/ESN/EchoTorch-master/examples/validation/validation_10cv.py new file mode 100644 index 0000000..0082a5e --- /dev/null +++ b/ESN/EchoTorch-master/examples/validation/validation_10cv.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# +# File : examples/timeserie_prediction/switch_attractor_esn +# Description : NARMA 30 prediction with ESN. +# Date : 26th of January, 2018 +# +# This file is part of EchoTorch. EchoTorch is free software: you can +# redistribute it and/or modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation, version 2. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 51 +# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Copyright Nils Schaetti + + +# Imports +import torch.utils.data +from echotorch import datasets +from echotorch.transforms import text + + +# Reuters C50 dataset +reutersloader = torch.utils.data.DataLoader( + datasets.ReutersC50Dataset(root="../../data/reutersc50/", download=True, n_authors=2, + transform=text.Token(), dataset_size=2, dataset_start=20), + batch_size=1, shuffle=True) + +# For each batch +for k in range(10): + # Set fold and training mode + reutersloader.dataset.set_fold(k) + reutersloader.dataset.set_train(True) + + # Get training data for this fold + for i, data in enumerate(reutersloader): + # Inputs and labels + inputs, label, labels = data + # end for + + # Set test mode + reutersloader.dataset.set_train(False) + + # Get test data for this fold + for i, data in enumerate(reutersloader): + # Inputs and labels + inputs, label, labels = data + # end for +# end for diff --git a/ESN/EchoTorch-master/requirements.txt b/ESN/EchoTorch-master/requirements.txt new file mode 100644 index 0000000..903b0de --- /dev/null +++ b/ESN/EchoTorch-master/requirements.txt @@ -0,0 +1,6 @@ +# This is an implicit value, here for clarity +--index-url https://pypi.python.org/simple/ + +sphinx_bootstrap_theme +http://download.pytorch.org/whl/cu75/torch-0.1.11.post5-cp27-none-linux_x86_64.whl +torchvision \ No newline at end of file diff --git a/ESN/EchoTorch-master/setup.py b/ESN/EchoTorch-master/setup.py new file mode 100644 index 0000000..d6b2498 --- /dev/null +++ b/ESN/EchoTorch-master/setup.py @@ -0,0 +1,18 @@ +from setuptools import setup, find_packages + +setup(name='EchoTorch', + version='0.1.2', + description="A Python toolkit for Reservoir Computing.", + long_description="A Python toolkit for Reservoir Computing and Echo State Network experimentation based on pyTorch.", + author='Nils Schaetti', + author_email='nils.schaetti@unine.ch', + license='GPLv3', + packages=find_packages(), + install_requires=[ + 'torch', + 'numpy', + 'torchvision' + ], + zip_safe=False + ) +